idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,900
def register_preprocessed_file ( self , infile , pmid , extra_annotations ) : infile_base = os . path . basename ( infile ) outfile = os . path . join ( self . preprocessed_dir , infile_base ) shutil . copyfile ( infile , outfile ) infile_key = os . path . splitext ( infile_base ) [ 0 ] self . pmids [ infile_key ] = pmid self . extra_annotations [ infile_key ] = extra_annotations
Set up already preprocessed text file for reading with ISI reader .
123
14
17,901
def preprocess_plain_text_string ( self , text , pmid , extra_annotations ) : output_file = '%s.txt' % self . next_file_id output_file = os . path . join ( self . preprocessed_dir , output_file ) # Tokenize sentence sentences = nltk . sent_tokenize ( text ) # Write sentences to text file first_sentence = True with codecs . open ( output_file , 'w' , encoding = 'utf-8' ) as f : for sentence in sentences : if not first_sentence : f . write ( '\n' ) f . write ( sentence . rstrip ( ) ) first_sentence = False # Store annotations self . pmids [ str ( self . next_file_id ) ] = pmid self . extra_annotations [ str ( self . next_file_id ) ] = extra_annotations # Increment file id self . next_file_id += 1
Preprocess plain text string for use by ISI reader .
214
11
17,902
def preprocess_plain_text_file ( self , filename , pmid , extra_annotations ) : with codecs . open ( filename , 'r' , encoding = 'utf-8' ) as f : content = f . read ( ) self . preprocess_plain_text_string ( content , pmid , extra_annotations )
Preprocess a plain text file for use with ISI reder .
74
13
17,903
def preprocess_nxml_file ( self , filename , pmid , extra_annotations ) : # Create a temporary directory tmp_dir = tempfile . mkdtemp ( 'indra_isi_nxml2txt_output' ) # Run nxml2txt if nxml2txt_path is None : logger . error ( 'NXML2TXT_PATH not specified in config file or ' + 'environment variable' ) return if python2_path is None : logger . error ( 'PYTHON2_PATH not specified in config file or ' + 'environment variable' ) return else : txt_out = os . path . join ( tmp_dir , 'out.txt' ) so_out = os . path . join ( tmp_dir , 'out.so' ) command = [ python2_path , os . path . join ( nxml2txt_path , 'nxml2txt' ) , filename , txt_out , so_out ] ret = subprocess . call ( command ) if ret != 0 : logger . warning ( 'nxml2txt returned non-zero error code' ) with open ( txt_out , 'r' ) as f : txt_content = f . read ( ) # Remote temporary directory shutil . rmtree ( tmp_dir ) # We need to remove some common LaTEX commands from the converted text # or the reader will get confused cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}' cmd2 = '[^ \{\}]+\{[^\{\}]+\}' txt_content = re . sub ( cmd1 , '' , txt_content ) txt_content = re . sub ( cmd2 , '' , txt_content ) with open ( 'tmp.txt' , 'w' ) as f : f . write ( txt_content ) # Prepocess text extracted from nxml self . preprocess_plain_text_string ( txt_content , pmid , extra_annotations )
Preprocess an NXML file for use with the ISI reader .
450
13
17,904
def preprocess_abstract_list ( self , abstract_list ) : for abstract_struct in abstract_list : abs_format = abstract_struct [ 'format' ] content_type = abstract_struct [ 'text_type' ] content_zipped = abstract_struct [ 'content' ] tcid = abstract_struct [ 'tcid' ] trid = abstract_struct [ 'trid' ] assert ( abs_format == 'text' ) assert ( content_type == 'abstract' ) pmid = None # Don't worry about pmid for now extra_annotations = { 'tcid' : tcid , 'trid' : trid } # Uncompress content content = zlib . decompress ( content_zipped , zlib . MAX_WBITS + 16 ) . decode ( 'utf-8' ) self . preprocess_plain_text_string ( content , pmid , extra_annotations )
Preprocess abstracts in database pickle dump format for ISI reader .
203
14
17,905
def process_geneways_files ( input_folder = data_folder , get_evidence = True ) : gp = GenewaysProcessor ( input_folder , get_evidence ) return gp
Reads in Geneways data and returns a list of statements .
40
13
17,906
def post_flag_create ( self , post_id , reason ) : params = { 'post_flag[post_id]' : post_id , 'post_flag[reason]' : reason } return self . _get ( 'post_flags.json' , params , 'POST' , auth = True )
Function to flag a post .
67
6
17,907
def post_versions_list ( self , updater_name = None , updater_id = None , post_id = None , start_id = None ) : params = { 'search[updater_name]' : updater_name , 'search[updater_id]' : updater_id , 'search[post_id]' : post_id , 'search[start_id]' : start_id } return self . _get ( 'post_versions.json' , params )
Get list of post versions .
109
6
17,908
def artist_list ( self , query = None , artist_id = None , creator_name = None , creator_id = None , is_active = None , is_banned = None , empty_only = None , order = None ) : params = { 'search[name]' : query , 'search[id]' : artist_id , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active , 'search[is_banned]' : is_banned , 'search[empty_only]' : empty_only , 'search[order]' : order } return self . _get ( 'artists.json' , params )
Get an artist of a list of artists .
159
9
17,909
def artist_commentary_list ( self , text_matches = None , post_id = None , post_tags_match = None , original_present = None , translated_present = None ) : params = { 'search[text_matches]' : text_matches , 'search[post_id]' : post_id , 'search[post_tags_match]' : post_tags_match , 'search[original_present]' : original_present , 'search[translated_present]' : translated_present } return self . _get ( 'artist_commentaries.json' , params )
list artist commentary .
131
4
17,910
def artist_commentary_versions ( self , post_id , updater_id ) : params = { 'search[updater_id]' : updater_id , 'search[post_id]' : post_id } return self . _get ( 'artist_commentary_versions.json' , params )
Return list of artist commentary versions .
69
7
17,911
def note_list ( self , body_matches = None , post_id = None , post_tags_match = None , creator_name = None , creator_id = None , is_active = None ) : params = { 'search[body_matches]' : body_matches , 'search[post_id]' : post_id , 'search[post_tags_match]' : post_tags_match , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active } return self . _get ( 'notes.json' , params )
Return list of notes .
142
5
17,912
def note_versions ( self , updater_id = None , post_id = None , note_id = None ) : params = { 'search[updater_id]' : updater_id , 'search[post_id]' : post_id , 'search[note_id]' : note_id } return self . _get ( 'note_versions.json' , params )
Get list of note versions .
85
6
17,913
def user_list ( self , name = None , name_matches = None , min_level = None , max_level = None , level = None , user_id = None , order = None ) : params = { 'search[name]' : name , 'search[name_matches]' : name_matches , 'search[min_level]' : min_level , 'search[max_level]' : max_level , 'search[level]' : level , 'search[id]' : user_id , 'search[order]' : order } return self . _get ( 'users.json' , params )
Function to get a list of users or a specific user .
134
12
17,914
def pool_list ( self , name_matches = None , pool_ids = None , category = None , description_matches = None , creator_name = None , creator_id = None , is_deleted = None , is_active = None , order = None ) : params = { 'search[name_matches]' : name_matches , 'search[id]' : pool_ids , 'search[description_matches]' : description_matches , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active , 'search[is_deleted]' : is_deleted , 'search[order]' : order , 'search[category]' : category } return self . _get ( 'pools.json' , params )
Get a list of pools .
183
6
17,915
def pool_versions ( self , updater_id = None , updater_name = None , pool_id = None ) : params = { 'search[updater_id]' : updater_id , 'search[updater_name]' : updater_name , 'search[pool_id]' : pool_id } return self . _get ( 'pool_versions.json' , params )
Get list of pool versions .
89
6
17,916
def tag_aliases ( self , name_matches = None , antecedent_name = None , tag_id = None ) : params = { 'search[name_matches]' : name_matches , 'search[antecedent_name]' : antecedent_name , 'search[id]' : tag_id } return self . _get ( 'tag_aliases.json' , params )
Get tags aliases .
91
4
17,917
def tag_implications ( self , name_matches = None , antecedent_name = None , tag_id = None ) : params = { 'search[name_matches]' : name_matches , 'search[antecedent_name]' : antecedent_name , 'search[id]' : tag_id } return self . _get ( 'tag_implications.json' , params )
Get tags implications .
91
4
17,918
def tag_related ( self , query , category = None ) : params = { 'query' : query , 'category' : category } return self . _get ( 'related_tag.json' , params )
Get related tags .
45
4
17,919
def wiki_list ( self , title = None , creator_id = None , body_matches = None , other_names_match = None , creator_name = None , hide_deleted = None , other_names_present = None , order = None ) : params = { 'search[title]' : title , 'search[creator_id]' : creator_id , 'search[body_matches]' : body_matches , 'search[other_names_match]' : other_names_match , 'search[creator_name]' : creator_name , 'search[hide_deleted]' : hide_deleted , 'search[other_names_present]' : other_names_present , 'search[order]' : order } return self . _get ( 'wiki_pages.json' , params )
Function to retrieves a list of every wiki page .
177
11
17,920
def wiki_versions_list ( self , page_id , updater_id ) : params = { 'earch[updater_id]' : updater_id , 'search[wiki_page_id]' : page_id } return self . _get ( 'wiki_page_versions.json' , params )
Return a list of wiki page version .
69
8
17,921
def forum_topic_list ( self , title_matches = None , title = None , category_id = None ) : params = { 'search[title_matches]' : title_matches , 'search[title]' : title , 'search[category_id]' : category_id } return self . _get ( 'forum_topics.json' , params )
Function to get forum topics .
81
6
17,922
def forum_post_list ( self , creator_id = None , creator_name = None , topic_id = None , topic_title_matches = None , topic_category_id = None , body_matches = None ) : params = { 'search[creator_id]' : creator_id , 'search[creator_name]' : creator_name , 'search[topic_id]' : topic_id , 'search[topic_title_matches]' : topic_title_matches , 'search[topic_category_id]' : topic_category_id , 'search[body_matches]' : body_matches } return self . _get ( 'forum_posts.json' , params )
Return a list of forum posts .
155
7
17,923
def site_name ( self , site_name ) : if site_name in SITE_LIST : self . __site_name = site_name self . __site_url = SITE_LIST [ site_name ] [ 'url' ] else : raise PybooruError ( "The 'site_name' is not valid, specify a valid 'site_name'." )
Function that sets and checks the site name and set url .
81
12
17,924
def site_url ( self , url ) : # Regular expression to URL validate regex = re . compile ( r'^(?:http|https)://' # Scheme only HTTP/HTTPS r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' # Domain r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6 r'(?::\d+)?' # Port r'(?:/?|[/?]\S+)$' , re . IGNORECASE ) # Validate URL if re . match ( '^(?:http|https)://' , url ) : if re . search ( regex , url ) : self . __site_url = url else : raise PybooruError ( "Invalid URL: {0}" . format ( url ) ) else : raise PybooruError ( "Invalid URL scheme, use HTTP or HTTPS: {0}" . format ( url ) )
URL setter and validator for site_url property .
323
12
17,925
def _request ( self , url , api_call , request_args , method = 'GET' ) : try : if method != 'GET' : # Reset content-type for data encoded as a multipart form self . client . headers . update ( { 'content-type' : None } ) response = self . client . request ( method , url , * * request_args ) self . last_call . update ( { 'API' : api_call , 'url' : response . url , 'status_code' : response . status_code , 'status' : self . _get_status ( response . status_code ) , 'headers' : response . headers } ) if response . status_code in ( 200 , 201 , 202 , 204 ) : return response . json ( ) raise PybooruHTTPError ( "In _request" , response . status_code , response . url ) except requests . exceptions . Timeout : raise PybooruError ( "Timeout! url: {0}" . format ( response . url ) ) except ValueError as e : raise PybooruError ( "JSON Error: {0} in line {1} column {2}" . format ( e . msg , e . lineno , e . colno ) )
Function to request and returning JSON data .
267
8
17,926
def post_update ( self , post_id , tags = None , file_ = None , rating = None , source = None , is_rating_locked = None , is_note_locked = None , parent_id = None ) : params = { 'id' : post_id , 'post[tags]' : tags , 'post[rating]' : rating , 'post[source]' : source , 'post[is_rating_locked]' : is_rating_locked , 'post[is_note_locked]' : is_note_locked , 'post[parent_id]' : parent_id } if file_ is not None : file_ = { 'post[file]' : open ( file_ , 'rb' ) } return self . _get ( 'post/update' , params , 'PUT' , file_ ) else : return self . _get ( 'post/update' , params , 'PUT' )
Update a specific post .
198
5
17,927
def site_name ( self , site_name ) : # Set base class property site_name _Pybooru . site_name . fset ( self , site_name ) if ( 'api_version' and 'hashed_string' ) in SITE_LIST [ site_name ] : self . api_version = SITE_LIST [ site_name ] [ 'api_version' ] self . hash_string = SITE_LIST [ site_name ] [ 'hashed_string' ]
Sets api_version and hash_string .
109
10
17,928
def _build_url ( self , api_call ) : if self . api_version in ( '1.13.0' , '1.13.0+update.1' , '1.13.0+update.2' ) : if '/' not in api_call : return "{0}/{1}/index.json" . format ( self . site_url , api_call ) return "{0}/{1}.json" . format ( self . site_url , api_call )
Build request url .
112
4
17,929
def _build_hash_string ( self ) : # Build AUTENTICATION hash_string # Check if hash_string exists if self . site_name in SITE_LIST or self . hash_string : if self . username and self . password : try : hash_string = self . hash_string . format ( self . password ) except TypeError : raise PybooruError ( "Pybooru can't add 'password' " "to 'hash_string'" ) # encrypt hashed_string to SHA1 and return hexdigest string self . password_hash = hashlib . sha1 ( hash_string . encode ( 'utf-8' ) ) . hexdigest ( ) else : raise PybooruError ( "Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute." ) else : raise PybooruError ( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login." )
Function for build password hash string .
225
7
17,930
def _is_autonomous ( indep , exprs ) : if indep is None : return True for expr in exprs : try : in_there = indep in expr . free_symbols except : in_there = expr . has ( indep ) if in_there : return False return True
Whether the expressions for the dependent variables are autonomous .
66
10
17,931
def symmetricsys ( dep_tr = None , indep_tr = None , SuperClass = TransformedSys , * * kwargs ) : if dep_tr is not None : if not callable ( dep_tr [ 0 ] ) or not callable ( dep_tr [ 1 ] ) : raise ValueError ( "Exceptected dep_tr to be a pair of callables" ) if indep_tr is not None : if not callable ( indep_tr [ 0 ] ) or not callable ( indep_tr [ 1 ] ) : raise ValueError ( "Exceptected indep_tr to be a pair of callables" ) class _SymmetricSys ( SuperClass ) : def __init__ ( self , dep_exprs , indep = None , * * inner_kwargs ) : new_kwargs = kwargs . copy ( ) new_kwargs . update ( inner_kwargs ) dep , exprs = zip ( * dep_exprs ) super ( _SymmetricSys , self ) . __init__ ( zip ( dep , exprs ) , indep , dep_transf = list ( zip ( list ( map ( dep_tr [ 0 ] , dep ) ) , list ( map ( dep_tr [ 1 ] , dep ) ) ) ) if dep_tr is not None else None , indep_transf = ( ( indep_tr [ 0 ] ( indep ) , indep_tr [ 1 ] ( indep ) ) if indep_tr is not None else None ) , * * new_kwargs ) @ classmethod def from_callback ( cls , cb , ny = None , nparams = None , * * inner_kwargs ) : new_kwargs = kwargs . copy ( ) new_kwargs . update ( inner_kwargs ) return SuperClass . from_callback ( cb , ny , nparams , dep_transf_cbs = repeat ( dep_tr ) if dep_tr is not None else None , indep_transf_cbs = indep_tr , * * new_kwargs ) return _SymmetricSys
A factory function for creating symmetrically transformed systems .
465
11
17,932
def from_other ( cls , ori , * * kwargs ) : for k in cls . _attrs_to_copy + ( 'params' , 'roots' , 'init_indep' , 'init_dep' ) : if k not in kwargs : val = getattr ( ori , k ) if val is not None : kwargs [ k ] = val if 'lower_bounds' not in kwargs and getattr ( ori , 'lower_bounds' ) is not None : kwargs [ 'lower_bounds' ] = ori . lower_bounds if 'upper_bounds' not in kwargs and getattr ( ori , 'upper_bounds' ) is not None : kwargs [ 'upper_bounds' ] = ori . upper_bounds if len ( ori . pre_processors ) > 0 : if 'pre_processors' not in kwargs : kwargs [ 'pre_processors' ] = [ ] kwargs [ 'pre_processors' ] = kwargs [ 'pre_processors' ] + ori . pre_processors if len ( ori . post_processors ) > 0 : if 'post_processors' not in kwargs : kwargs [ 'post_processors' ] = [ ] kwargs [ 'post_processors' ] = ori . post_processors + kwargs [ 'post_processors' ] if 'dep_exprs' not in kwargs : kwargs [ 'dep_exprs' ] = zip ( ori . dep , ori . exprs ) if 'indep' not in kwargs : kwargs [ 'indep' ] = ori . indep instance = cls ( * * kwargs ) for attr in ori . _attrs_to_copy : if attr not in cls . _attrs_to_copy : setattr ( instance , attr , getattr ( ori , attr ) ) return instance
Creates a new instance with an existing one as a template .
440
13
17,933
def get_jac ( self ) : if self . _jac is True : if self . sparse is True : self . _jac , self . _colptrs , self . _rowvals = self . be . sparse_jacobian_csc ( self . exprs , self . dep ) elif self . band is not None : # Banded self . _jac = self . be . banded_jacobian ( self . exprs , self . dep , * self . band ) else : f = self . be . Matrix ( 1 , self . ny , self . exprs ) self . _jac = f . jacobian ( self . be . Matrix ( 1 , self . ny , self . dep ) ) elif self . _jac is False : return False return self . _jac
Derives the jacobian from self . exprs and self . dep .
173
17
17,934
def get_jtimes ( self ) : if self . _jtimes is False : return False if self . _jtimes is True : r = self . be . Dummy ( 'r' ) v = tuple ( self . be . Dummy ( 'v_{0}' . format ( i ) ) for i in range ( self . ny ) ) f = self . be . Matrix ( 1 , self . ny , self . exprs ) f = f . subs ( [ ( x_i , x_i + r * v_i ) for x_i , v_i in zip ( self . dep , v ) ] ) return v , self . be . flatten ( f . diff ( r ) . subs ( r , 0 ) ) else : return tuple ( zip ( * self . _jtimes ) )
Derive the jacobian - vector product from self . exprs and self . dep
176
19
17,935
def jacobian_singular ( self ) : cses , ( jac_in_cses , ) = self . be . cse ( self . get_jac ( ) ) if jac_in_cses . nullspace ( ) : return True else : return False
Returns True if Jacobian is singular else False .
61
10
17,936
def get_dfdx ( self ) : if self . _dfdx is True : if self . indep is None : zero = 0 * self . be . Dummy ( ) ** 0 self . _dfdx = self . be . Matrix ( 1 , self . ny , [ zero ] * self . ny ) else : self . _dfdx = self . be . Matrix ( 1 , self . ny , [ expr . diff ( self . indep ) for expr in self . exprs ] ) elif self . _dfdx is False : return False return self . _dfdx
Calculates 2nd derivatives of self . exprs
126
11
17,937
def get_f_ty_callback ( self ) : cb = self . _callback_factory ( self . exprs ) lb = self . lower_bounds ub = self . upper_bounds if lb is not None or ub is not None : def _bounds_wrapper ( t , y , p = ( ) , be = None ) : if lb is not None : if np . any ( y < lb - 10 * self . _current_integration_kwargs [ 'atol' ] ) : raise RecoverableError y = np . array ( y ) y [ y < lb ] = lb [ y < lb ] if ub is not None : if np . any ( y > ub + 10 * self . _current_integration_kwargs [ 'atol' ] ) : raise RecoverableError y = np . array ( y ) y [ y > ub ] = ub [ y > ub ] return cb ( t , y , p , be ) return _bounds_wrapper else : return cb
Generates a callback for evaluating self . exprs .
219
11
17,938
def get_j_ty_callback ( self ) : j_exprs = self . get_jac ( ) if j_exprs is False : return None cb = self . _callback_factory ( j_exprs ) if self . sparse : from scipy . sparse import csc_matrix def sparse_cb ( x , y , p = ( ) ) : data = cb ( x , y , p ) . flatten ( ) return csc_matrix ( ( data , self . _rowvals , self . _colptrs ) ) return sparse_cb else : return cb
Generates a callback for evaluating the jacobian .
131
12
17,939
def get_dfdx_callback ( self ) : dfdx_exprs = self . get_dfdx ( ) if dfdx_exprs is False : return None return self . _callback_factory ( dfdx_exprs )
Generate a callback for evaluating derivative of self . exprs
54
12
17,940
def get_jtimes_callback ( self ) : jtimes = self . get_jtimes ( ) if jtimes is False : return None v , jtimes_exprs = jtimes return _Callback ( self . indep , tuple ( self . dep ) + tuple ( v ) , self . params , jtimes_exprs , Lambdify = self . be . Lambdify )
Generate a callback fro evaluating the jacobian - vector product .
83
15
17,941
def from_linear_invariants ( cls , ori_sys , preferred = None , * * kwargs ) : _be = ori_sys . be A = _be . Matrix ( ori_sys . linear_invariants ) rA , pivots = A . rref ( ) if len ( pivots ) < A . shape [ 0 ] : # If the linear system contains rows which a linearly dependent these could be removed. # The criterion for removal could be dictated by a user provided callback. # # An alternative would be to write the matrix in reduced row echelon form, however, # this would cause the invariants to become linear combinations of each other and # their intuitive meaning (original principles they were formulated from) will be lost. # Hence that is not the default behaviour. However, the user may choose to rewrite the # equations in reduced row echelon form if they choose to before calling this method. raise NotImplementedError ( "Linear invariants contain linear dependencies." ) per_row_cols = [ ( ri , [ ci for ci in range ( A . cols ) if A [ ri , ci ] != 0 ] ) for ri in range ( A . rows ) ] if preferred is None : preferred = ori_sys . names [ : A . rows ] if ori_sys . dep_by_name else list ( range ( A . rows ) ) targets = [ ori_sys . names . index ( dep ) if ori_sys . dep_by_name else ( dep if isinstance ( dep , int ) else ori_sys . dep . index ( dep ) ) for dep in preferred ] row_tgt = [ ] for ri , colids in sorted ( per_row_cols , key = lambda k : len ( k [ 1 ] ) ) : for tgt in targets : if tgt in colids : row_tgt . append ( ( ri , tgt ) ) targets . remove ( tgt ) break if len ( targets ) == 0 : break else : raise ValueError ( "Could not find a solutions for: %s" % targets ) def analytic_factory ( x0 , y0 , p0 , be ) : return { ori_sys . dep [ tgt ] : y0 [ ori_sys . dep [ tgt ] if ori_sys . dep_by_name else tgt ] - sum ( [ A [ ri , ci ] * ( ori_sys . dep [ ci ] - y0 [ ori_sys . dep [ ci ] if ori_sys . dep_by_name else ci ] ) for ci in range ( A . cols ) if ci != tgt ] ) / A [ ri , tgt ] for ri , tgt in row_tgt } ori_li_nms = ori_sys . linear_invariant_names or ( ) new_lin_invar = [ [ cell for ci , cell in enumerate ( row ) if ci not in list ( zip ( * row_tgt ) ) [ 1 ] ] for ri , row in enumerate ( A . tolist ( ) ) if ri not in list ( zip ( * row_tgt ) ) [ 0 ] ] new_lin_i_nms = [ nam for ri , nam in enumerate ( ori_li_nms ) if ri not in list ( zip ( * row_tgt ) ) [ 0 ] ] return cls ( ori_sys , analytic_factory , linear_invariants = new_lin_invar , linear_invariant_names = new_lin_i_nms , * * kwargs )
Reformulates the ODE system in fewer variables .
797
11
17,942
def chained_parameter_variation ( subject , durations , y0 , varied_params , default_params = None , integrate_kwargs = None , x0 = None , npoints = 1 , numpy = None ) : assert len ( durations ) > 0 , 'need at least 1 duration (preferably many)' assert npoints > 0 , 'need at least 1 point per duration' for k , v in varied_params . items ( ) : if len ( v ) != len ( durations ) : raise ValueError ( "Mismathced lengths of durations and varied_params" ) if isinstance ( subject , ODESys ) : integrate = subject . integrate numpy = numpy or subject . numpy else : integrate = subject numpy = numpy or np default_params = default_params or { } integrate_kwargs = integrate_kwargs or { } def _get_idx ( cont , idx ) : if isinstance ( cont , dict ) : return { k : ( v [ idx ] if hasattr ( v , '__len__' ) and getattr ( v , 'ndim' , 1 ) > 0 else v ) for k , v in cont . items ( ) } else : return cont [ idx ] durations = numpy . cumsum ( durations ) for idx_dur in range ( len ( durations ) ) : params = copy . copy ( default_params ) for k , v in varied_params . items ( ) : params [ k ] = v [ idx_dur ] if idx_dur == 0 : if x0 is None : x0 = durations [ 0 ] * 0 out = integrate ( numpy . linspace ( x0 , durations [ 0 ] , npoints + 1 ) , y0 , params , * * integrate_kwargs ) else : if isinstance ( out , Result ) : out . extend_by_integration ( durations [ idx_dur ] , params , npoints = npoints , * * integrate_kwargs ) else : for idx_res , r in enumerate ( out ) : r . extend_by_integration ( durations [ idx_dur ] , _get_idx ( params , idx_res ) , npoints = npoints , * * integrate_kwargs ) return out
Integrate an ODE - system for a serie of durations with some parameters changed in - between
508
21
17,943
def pre_process ( self , xout , y0 , params = ( ) ) : for pre_processor in self . pre_processors : xout , y0 , params = pre_processor ( xout , y0 , params ) return [ self . numpy . atleast_1d ( arr ) for arr in ( xout , y0 , params ) ]
Transforms input to internal values used internally .
80
9
17,944
def post_process ( self , xout , yout , params ) : for post_processor in self . post_processors : xout , yout , params = post_processor ( xout , yout , params ) return xout , yout , params
Transforms internal values to output used internally .
56
9
17,945
def adaptive ( self , y0 , x0 , xend , params = ( ) , * * kwargs ) : return self . integrate ( ( x0 , xend ) , y0 , params = params , * * kwargs )
Integrate with integrator chosen output .
52
8
17,946
def predefined ( self , y0 , xout , params = ( ) , * * kwargs ) : xout , yout , info = self . integrate ( xout , y0 , params = params , force_predefined = True , * * kwargs ) return yout , info
Integrate with user chosen output .
64
7
17,947
def integrate ( self , x , y0 , params = ( ) , atol = 1e-8 , rtol = 1e-8 , * * kwargs ) : arrs = self . to_arrays ( x , y0 , params ) _x , _y , _p = _arrs = self . pre_process ( * arrs ) ndims = [ a . ndim for a in _arrs ] if ndims == [ 1 , 1 , 1 ] : twodim = False elif ndims == [ 2 , 2 , 2 ] : twodim = True else : raise ValueError ( "Pre-processor made ndims inconsistent?" ) if self . append_iv : _p = self . numpy . concatenate ( ( _p , _y ) , axis = - 1 ) if hasattr ( self , 'ny' ) : if _y . shape [ - 1 ] != self . ny : raise ValueError ( "Incorrect shape of intern_y0" ) if isinstance ( atol , dict ) : kwargs [ 'atol' ] = [ atol [ k ] for k in self . names ] else : kwargs [ 'atol' ] = atol kwargs [ 'rtol' ] = rtol integrator = kwargs . pop ( 'integrator' , None ) if integrator is None : integrator = os . environ . get ( 'PYODESYS_INTEGRATOR' , 'scipy' ) args = tuple ( map ( self . numpy . atleast_2d , ( _x , _y , _p ) ) ) self . _current_integration_kwargs = kwargs if isinstance ( integrator , str ) : nfo = getattr ( self , '_integrate_' + integrator ) ( * args , * * kwargs ) else : kwargs [ 'with_jacobian' ] = getattr ( integrator , 'with_jacobian' , None ) nfo = self . _integrate ( integrator . integrate_adaptive , integrator . integrate_predefined , * args , * * kwargs ) if twodim : _xout = [ d [ 'internal_xout' ] for d in nfo ] _yout = [ d [ 'internal_yout' ] for d in nfo ] _params = [ d [ 'internal_params' ] for d in nfo ] res = [ Result ( * ( self . post_process ( _xout [ i ] , _yout [ i ] , _params [ i ] ) + ( nfo [ i ] , self ) ) ) for i in range ( len ( nfo ) ) ] else : _xout = nfo [ 0 ] [ 'internal_xout' ] _yout = nfo [ 0 ] [ 'internal_yout' ] self . _internal = _xout . copy ( ) , _yout . copy ( ) , _p . copy ( ) nfo = nfo [ 0 ] res = Result ( * ( self . post_process ( _xout , _yout , _p ) + ( nfo , self ) ) ) return res
Integrate the system of ordinary differential equations .
698
9
17,948
def plot_phase_plane ( self , indices = None , * * kwargs ) : return self . _plot ( plot_phase_plane , indices = indices , * * kwargs )
Plots a phase portrait from last integration .
42
9
17,949
def user_can_edit_news ( user ) : newsitem_models = [ model . get_newsitem_model ( ) for model in NEWSINDEX_MODEL_CLASSES ] if user . is_active and user . is_superuser : # admin can edit news iff any news types exist return bool ( newsitem_models ) for NewsItem in newsitem_models : for perm in format_perms ( NewsItem , [ 'add' , 'change' , 'delete' ] ) : if user . has_perm ( perm ) : return True return False
Check if the user has permission to edit any of the registered NewsItem types .
123
16
17,950
def user_can_edit_newsitem ( user , NewsItem ) : for perm in format_perms ( NewsItem , [ 'add' , 'change' , 'delete' ] ) : if user . has_perm ( perm ) : return True return False
Check if the user has permission to edit a particular NewsItem type .
56
14
17,951
def get_date_or_404 ( year , month , day ) : try : return datetime . date ( int ( year ) , int ( month ) , int ( day ) ) except ValueError : raise Http404
Try to make a date from the given inputs raising Http404 on error
47
15
17,952
def respond ( self , request , view , newsitems , extra_context = { } ) : context = self . get_context ( request , view = view ) context . update ( self . paginate_newsitems ( request , newsitems ) ) context . update ( extra_context ) template = self . get_template ( request , view = view ) return TemplateResponse ( request , template , context )
A helper that takes some news items and returns an HttpResponse
84
13
17,953
def get_newsitem_model ( model_string ) : try : NewsItem = apps . get_model ( model_string ) assert issubclass ( NewsItem , AbstractNewsItem ) except ( ValueError , LookupError , AssertionError ) : raise ValueError ( 'Invalid news item model string' . format ( model_string ) ) return NewsItem
Get the NewsItem model from a model string . Raises ValueError if the model string is invalid or references a model that is not a NewsItem .
77
31
17,954
def from_quad_tree ( cls , quad_tree ) : assert bool ( re . match ( '^[0-3]*$' , quad_tree ) ) , 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len ( str ( quad_tree ) ) offset = int ( math . pow ( 2 , zoom ) ) - 1 google_x , google_y = [ reduce ( lambda result , bit : ( result << 1 ) | bit , bits , 0 ) for bits in zip ( * ( reversed ( divmod ( digit , 2 ) ) for digit in ( int ( c ) for c in str ( quad_tree ) ) ) ) ] return cls ( tms_x = google_x , tms_y = ( offset - google_y ) , zoom = zoom )
Creates a tile from a Microsoft QuadTree
180
9
17,955
def from_google ( cls , google_x , google_y , zoom ) : max_tile = ( 2 ** zoom ) - 1 assert 0 <= google_x <= max_tile , 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile , 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls ( tms_x = google_x , tms_y = ( 2 ** zoom - 1 ) - google_y , zoom = zoom )
Creates a tile from Google format X Y and zoom
126
11
17,956
def for_point ( cls , point , zoom ) : latitude , longitude = point . latitude_longitude return cls . for_latitude_longitude ( latitude = latitude , longitude = longitude , zoom = zoom )
Creates a tile for given point
50
7
17,957
def quad_tree ( self ) : value = '' tms_x , tms_y = self . tms tms_y = ( 2 ** self . zoom - 1 ) - tms_y for i in range ( self . zoom , 0 , - 1 ) : digit = 0 mask = 1 << ( i - 1 ) if ( tms_x & mask ) != 0 : digit += 1 if ( tms_y & mask ) != 0 : digit += 2 value += str ( digit ) return value
Gets the tile in the Microsoft QuadTree format converted from TMS
109
14
17,958
def google ( self ) : tms_x , tms_y = self . tms return tms_x , ( 2 ** self . zoom - 1 ) - tms_y
Gets the tile in the Google format converted from TMS
40
12
17,959
def bounds ( self ) : google_x , google_y = self . google pixel_x_west , pixel_y_north = google_x * TILE_SIZE , google_y * TILE_SIZE pixel_x_east , pixel_y_south = ( google_x + 1 ) * TILE_SIZE , ( google_y + 1 ) * TILE_SIZE point_min = Point . from_pixel ( pixel_x = pixel_x_west , pixel_y = pixel_y_south , zoom = self . zoom ) point_max = Point . from_pixel ( pixel_x = pixel_x_east , pixel_y = pixel_y_north , zoom = self . zoom ) return point_min , point_max
Gets the bounds of a tile represented as the most west and south point and the most east and north point
163
22
17,960
def read_ix ( ix , * * kwargs ) : if not isinstance ( ix , ixmp . TimeSeries ) : error = 'not recognized as valid ixmp class: {}' . format ( ix ) raise ValueError ( error ) df = ix . timeseries ( iamc = False , * * kwargs ) df [ 'model' ] = ix . model df [ 'scenario' ] = ix . scenario return df , 'year' , [ ]
Read timeseries data from an ixmp object
109
10
17,961
def requires_package ( pkg , msg , error_type = ImportError ) : def _requires_package ( func ) : def wrapper ( * args , * * kwargs ) : if pkg is None : raise error_type ( msg ) return func ( * args , * * kwargs ) return wrapper return _requires_package
Decorator when a function requires an optional dependency
72
10
17,962
def write_sheet ( writer , name , df , index = False ) : if index : df = df . reset_index ( ) df . to_excel ( writer , name , index = False ) worksheet = writer . sheets [ name ] for i , col in enumerate ( df . columns ) : if df . dtypes [ col ] . name . startswith ( ( 'float' , 'int' ) ) : width = len ( str ( col ) ) + 2 else : width = max ( [ df [ col ] . map ( lambda x : len ( str ( x or 'None' ) ) ) . max ( ) , len ( col ) ] ) + 2 xls_col = '{c}:{c}' . format ( c = NUMERIC_TO_STR [ i ] ) worksheet . set_column ( xls_col , width )
Write a pandas DataFrame to an ExcelWriter auto - formatting column width depending on maxwidth of data and colum header
187
25
17,963
def read_pandas ( fname , * args , * * kwargs ) : if not os . path . exists ( fname ) : raise ValueError ( 'no data file `{}` found!' . format ( fname ) ) if fname . endswith ( 'csv' ) : df = pd . read_csv ( fname , * args , * * kwargs ) else : xl = pd . ExcelFile ( fname ) if len ( xl . sheet_names ) > 1 and 'sheet_name' not in kwargs : kwargs [ 'sheet_name' ] = 'data' df = pd . read_excel ( fname , * args , * * kwargs ) return df
Read a file and return a pd . DataFrame
163
11
17,964
def sort_data ( data , cols ) : return data . sort_values ( cols ) [ cols + [ 'value' ] ] . reset_index ( drop = True )
Sort data rows and order columns
40
6
17,965
def _escape_regexp ( s ) : return ( str ( s ) . replace ( '|' , '\\|' ) . replace ( '.' , '\.' ) # `.` has to be replaced before `*` . replace ( '*' , '.*' ) . replace ( '+' , '\+' ) . replace ( '(' , '\(' ) . replace ( ')' , '\)' ) . replace ( '$' , '\\$' ) )
escape characters with specific regexp use
107
7
17,966
def years_match ( data , years ) : years = [ years ] if isinstance ( years , int ) else years dt = datetime . datetime if isinstance ( years , dt ) or isinstance ( years [ 0 ] , dt ) : error_msg = "`year` can only be filtered with ints or lists of ints" raise TypeError ( error_msg ) return data . isin ( years )
matching of year columns for data filtering
92
8
17,967
def hour_match ( data , hours ) : hours = [ hours ] if isinstance ( hours , int ) else hours return data . isin ( hours )
matching of days in time columns for data filtering
33
10
17,968
def datetime_match ( data , dts ) : dts = dts if islistable ( dts ) else [ dts ] if any ( [ not isinstance ( i , datetime . datetime ) for i in dts ] ) : error_msg = ( "`time` can only be filtered by datetimes" ) raise TypeError ( error_msg ) return data . isin ( dts )
matching of datetimes in time columns for data filtering
89
11
17,969
def to_int ( x , index = False ) : _x = x . index if index else x cols = list ( map ( int , _x ) ) error = _x [ cols != _x ] if not error . empty : raise ValueError ( 'invalid values `{}`' . format ( list ( error ) ) ) if index : x . index = cols return x else : return _x
Formatting series or timeseries columns to int and checking validity . If index = False the function works on the pd . Series x ; else the function casts the index of x to int and returns x with a new index .
90
46
17,970
def concat_with_pipe ( x , cols = None ) : cols = cols or x . index return '|' . join ( [ x [ i ] for i in cols if x [ i ] not in [ None , np . nan ] ] )
Concatenate a pd . Series separated by | drop None or np . nan
58
18
17,971
def _make_index ( df , cols = META_IDX ) : return pd . MultiIndex . from_tuples ( pd . unique ( list ( zip ( * [ df [ col ] for col in cols ] ) ) ) , names = tuple ( cols ) )
Create an index from the columns of a dataframe
63
10
17,972
def check_aggregate ( df , variable , components = None , exclude_on_fail = False , multiplier = 1 , * * kwargs ) : fdf = df . filter ( * * kwargs ) if len ( fdf . data ) > 0 : vdf = fdf . check_aggregate ( variable = variable , components = components , exclude_on_fail = exclude_on_fail , multiplier = multiplier ) df . meta [ 'exclude' ] |= fdf . meta [ 'exclude' ] # update if any excluded return vdf
Check whether the timeseries values match the aggregation of sub - categories
121
13
17,973
def filter_by_meta ( data , df , join_meta = False , * * kwargs ) : if not set ( META_IDX ) . issubset ( data . index . names + list ( data . columns ) ) : raise ValueError ( 'missing required index dimensions or columns!' ) meta = pd . DataFrame ( df . meta [ list ( set ( kwargs ) - set ( META_IDX ) ) ] . copy ( ) ) # filter meta by columns keep = np . array ( [ True ] * len ( meta ) ) apply_filter = False for col , values in kwargs . items ( ) : if col in META_IDX and values is not None : _col = meta . index . get_level_values ( 0 if col is 'model' else 1 ) keep &= pattern_match ( _col , values , has_nan = False ) apply_filter = True elif values is not None : keep &= pattern_match ( meta [ col ] , values ) apply_filter |= values is not None meta = meta [ keep ] # set the data index to META_IDX and apply filtered meta index data = data . copy ( ) idx = list ( data . index . names ) if not data . index . names == [ None ] else None data = data . reset_index ( ) . set_index ( META_IDX ) meta = meta . loc [ meta . index . intersection ( data . index ) ] meta . index . names = META_IDX if apply_filter : data = data . loc [ meta . index ] data . index . names = META_IDX # join meta (optional), reset index to format as input arg data = data . join ( meta ) if join_meta else data data = data . reset_index ( ) . set_index ( idx or 'index' ) if idx is None : data . index . name = None return data
Filter by and join meta columns from an IamDataFrame to a pd . DataFrame
420
19
17,974
def compare ( left , right , left_label = 'left' , right_label = 'right' , drop_close = True , * * kwargs ) : ret = pd . concat ( { right_label : right . data . set_index ( right . _LONG_IDX ) , left_label : left . data . set_index ( left . _LONG_IDX ) } , axis = 1 ) ret . columns = ret . columns . droplevel ( 1 ) if drop_close : ret = ret [ ~ np . isclose ( ret [ left_label ] , ret [ right_label ] , * * kwargs ) ] return ret [ [ right_label , left_label ] ]
Compare the data in two IamDataFrames and return a pd . DataFrame
157
17
17,975
def concat ( dfs ) : if isstr ( dfs ) or not hasattr ( dfs , '__iter__' ) : msg = 'Argument must be a non-string iterable (e.g., list or tuple)' raise TypeError ( msg ) _df = None for df in dfs : df = df if isinstance ( df , IamDataFrame ) else IamDataFrame ( df ) if _df is None : _df = copy . deepcopy ( df ) else : _df . append ( df , inplace = True ) return _df
Concatenate a series of pyam . IamDataFrame - like objects together
123
18
17,976
def variables ( self , include_units = False ) : if include_units : return self . data [ [ 'variable' , 'unit' ] ] . drop_duplicates ( ) . reset_index ( drop = True ) . sort_values ( 'variable' ) else : return pd . Series ( self . data . variable . unique ( ) , name = 'variable' )
Get a list of variables
82
5
17,977
def append ( self , other , ignore_meta_conflict = False , inplace = False , * * kwargs ) : if not isinstance ( other , IamDataFrame ) : other = IamDataFrame ( other , * * kwargs ) ignore_meta_conflict = True if self . time_col is not other . time_col : raise ValueError ( 'incompatible time format (years vs. datetime)!' ) ret = copy . deepcopy ( self ) if not inplace else self diff = other . meta . index . difference ( ret . meta . index ) intersect = other . meta . index . intersection ( ret . meta . index ) # merge other.meta columns not in self.meta for existing scenarios if not intersect . empty : # if not ignored, check that overlapping meta dataframes are equal if not ignore_meta_conflict : cols = [ i for i in other . meta . columns if i in ret . meta . columns ] if not ret . meta . loc [ intersect , cols ] . equals ( other . meta . loc [ intersect , cols ] ) : conflict_idx = ( pd . concat ( [ ret . meta . loc [ intersect , cols ] , other . meta . loc [ intersect , cols ] ] ) . drop_duplicates ( ) . index . drop_duplicates ( ) ) msg = 'conflict in `meta` for scenarios {}' . format ( [ i for i in pd . DataFrame ( index = conflict_idx ) . index ] ) raise ValueError ( msg ) cols = [ i for i in other . meta . columns if i not in ret . meta . columns ] _meta = other . meta . loc [ intersect , cols ] ret . meta = ret . meta . merge ( _meta , how = 'outer' , left_index = True , right_index = True ) # join other.meta for new scenarios if not diff . empty : # sorting not supported by ` pd.append()` prior to version 23 sort_kwarg = { } if int ( pd . __version__ . split ( '.' ) [ 1 ] ) < 23 else dict ( sort = False ) ret . meta = ret . meta . append ( other . meta . loc [ diff , : ] , * * sort_kwarg ) # append other.data (verify integrity for no duplicates) _data = ret . data . set_index ( ret . _LONG_IDX ) . append ( other . data . set_index ( other . _LONG_IDX ) , verify_integrity = True ) # merge extra columns in `data` and set `LONG_IDX` ret . extra_cols += [ i for i in other . extra_cols if i not in ret . extra_cols ] ret . _LONG_IDX = IAMC_IDX + [ ret . time_col ] + ret . extra_cols ret . data = sort_data ( _data . reset_index ( ) , ret . _LONG_IDX ) if not inplace : return ret
Append any castable object to this IamDataFrame . Columns in other . meta that are not in self . meta are always merged duplicate region - variable - unit - year rows raise a ValueError .
669
43
17,978
def pivot_table ( self , index , columns , values = 'value' , aggfunc = 'count' , fill_value = None , style = None ) : index = [ index ] if isstr ( index ) else index columns = [ columns ] if isstr ( columns ) else columns df = self . data # allow 'aggfunc' to be passed as string for easier user interface if isstr ( aggfunc ) : if aggfunc == 'count' : df = self . data . groupby ( index + columns , as_index = False ) . count ( ) fill_value = 0 elif aggfunc == 'mean' : df = self . data . groupby ( index + columns , as_index = False ) . mean ( ) . round ( 2 ) aggfunc = np . sum fill_value = 0 if style == 'heatmap' else "" elif aggfunc == 'sum' : aggfunc = np . sum fill_value = 0 if style == 'heatmap' else "" df = df . pivot_table ( values = values , index = index , columns = columns , aggfunc = aggfunc , fill_value = fill_value ) return df
Returns a pivot table
247
4
17,979
def as_pandas ( self , with_metadata = False ) : if with_metadata : cols = self . _discover_meta_cols ( * * with_metadata ) if isinstance ( with_metadata , dict ) else self . meta . columns return ( self . data . set_index ( META_IDX ) . join ( self . meta [ cols ] ) . reset_index ( ) ) else : return self . data . copy ( )
Return this as a pd . DataFrame
101
9
17,980
def _new_meta_column ( self , name ) : if name is None : raise ValueError ( 'cannot add a meta column `{}`' . format ( name ) ) if name not in self . meta : self . meta [ name ] = np . nan
Add a column to meta if it doesn t exist set to value np . nan
58
16
17,981
def convert_unit ( self , conversion_mapping , inplace = False ) : ret = copy . deepcopy ( self ) if not inplace else self for current_unit , ( new_unit , factor ) in conversion_mapping . items ( ) : factor = pd . to_numeric ( factor ) where = ret . data [ 'unit' ] == current_unit ret . data . loc [ where , 'value' ] *= factor ret . data . loc [ where , 'unit' ] = new_unit if not inplace : return ret
Converts units based on provided unit conversion factors
119
9
17,982
def normalize ( self , inplace = False , * * kwargs ) : if len ( kwargs ) > 1 or self . time_col not in kwargs : raise ValueError ( 'Only time(year)-based normalization supported' ) ret = copy . deepcopy ( self ) if not inplace else self df = ret . data # change all below if supporting more in the future cols = self . time_col value = kwargs [ self . time_col ] x = df . set_index ( IAMC_IDX ) x [ 'value' ] /= x [ x [ cols ] == value ] [ 'value' ] ret . data = x . reset_index ( ) if not inplace : return ret
Normalize data to a given value . Currently only supports normalizing to a specific time .
161
18
17,983
def aggregate ( self , variable , components = None , append = False ) : # default components to all variables one level below `variable` components = components or self . _variable_components ( variable ) if not len ( components ) : msg = 'cannot aggregate variable `{}` because it has no components' logger ( ) . info ( msg . format ( variable ) ) return rows = self . _apply_filters ( variable = components ) _data = _aggregate ( self . data [ rows ] , 'variable' ) if append is True : self . append ( _data , variable = variable , inplace = True ) else : return _data
Compute the aggregate of timeseries components or sub - categories
138
12
17,984
def check_aggregate ( self , variable , components = None , exclude_on_fail = False , multiplier = 1 , * * kwargs ) : # compute aggregate from components, return None if no components df_components = self . aggregate ( variable , components ) if df_components is None : return # filter and groupby data, use `pd.Series.align` for matching index rows = self . _apply_filters ( variable = variable ) df_variable , df_components = ( _aggregate ( self . data [ rows ] , 'variable' ) . align ( df_components ) ) # use `np.isclose` for checking match diff = df_variable [ ~ np . isclose ( df_variable , multiplier * df_components , * * kwargs ) ] if len ( diff ) : msg = '`{}` - {} of {} rows are not aggregates of components' logger ( ) . info ( msg . format ( variable , len ( diff ) , len ( df_variable ) ) ) if exclude_on_fail : self . _exclude_on_fail ( diff . index . droplevel ( [ 2 , 3 , 4 ] ) ) return IamDataFrame ( diff , variable = variable ) . timeseries ( )
Check whether a timeseries matches the aggregation of its components
274
11
17,985
def aggregate_region ( self , variable , region = 'World' , subregions = None , components = None , append = False ) : # default subregions to all regions other than `region` if subregions is None : rows = self . _apply_filters ( variable = variable ) subregions = set ( self . data [ rows ] . region ) - set ( [ region ] ) if not len ( subregions ) : msg = 'cannot aggregate variable `{}` to `{}` because it does not' ' exist in any subregion' logger ( ) . info ( msg . format ( variable , region ) ) return # compute aggregate over all subregions subregion_df = self . filter ( region = subregions ) cols = [ 'region' , 'variable' ] _data = _aggregate ( subregion_df . filter ( variable = variable ) . data , cols ) # add components at the `region` level, defaults to all variables one # level below `variable` that are only present in `region` region_df = self . filter ( region = region ) components = components or ( set ( region_df . _variable_components ( variable ) ) . difference ( subregion_df . _variable_components ( variable ) ) ) if len ( components ) : rows = region_df . _apply_filters ( variable = components ) _data = _data . add ( _aggregate ( region_df . data [ rows ] , cols ) , fill_value = 0 ) if append is True : self . append ( _data , region = region , variable = variable , inplace = True ) else : return _data
Compute the aggregate of timeseries over a number of regions including variable components only defined at the region level
360
21
17,986
def check_aggregate_region ( self , variable , region = 'World' , subregions = None , components = None , exclude_on_fail = False , * * kwargs ) : # compute aggregate from subregions, return None if no subregions df_subregions = self . aggregate_region ( variable , region , subregions , components ) if df_subregions is None : return # filter and groupby data, use `pd.Series.align` for matching index rows = self . _apply_filters ( region = region , variable = variable ) df_region , df_subregions = ( _aggregate ( self . data [ rows ] , [ 'region' , 'variable' ] ) . align ( df_subregions ) ) # use `np.isclose` for checking match diff = df_region [ ~ np . isclose ( df_region , df_subregions , * * kwargs ) ] if len ( diff ) : msg = ( '`{}` - {} of {} rows are not aggregates of subregions' ) logger ( ) . info ( msg . format ( variable , len ( diff ) , len ( df_region ) ) ) if exclude_on_fail : self . _exclude_on_fail ( diff . index . droplevel ( [ 2 , 3 ] ) ) col_args = dict ( region = region , variable = variable ) return IamDataFrame ( diff , * * col_args ) . timeseries ( )
Check whether the region timeseries data match the aggregation of components
327
12
17,987
def check_internal_consistency ( self , * * kwargs ) : inconsistent_vars = { } for variable in self . variables ( ) : diff_agg = self . check_aggregate ( variable , * * kwargs ) if diff_agg is not None : inconsistent_vars [ variable + "-aggregate" ] = diff_agg diff_regional = self . check_aggregate_region ( variable , * * kwargs ) if diff_regional is not None : inconsistent_vars [ variable + "-regional" ] = diff_regional return inconsistent_vars if inconsistent_vars else None
Check whether the database is internally consistent
137
7
17,988
def _apply_filters ( self , * * filters ) : regexp = filters . pop ( 'regexp' , False ) keep = np . array ( [ True ] * len ( self . data ) ) # filter by columns and list of values for col , values in filters . items ( ) : # treat `_apply_filters(col=None)` as no filter applied if values is None : continue if col in self . meta . columns : matches = pattern_match ( self . meta [ col ] , values , regexp = regexp ) cat_idx = self . meta [ matches ] . index keep_col = ( self . data [ META_IDX ] . set_index ( META_IDX ) . index . isin ( cat_idx ) ) elif col == 'variable' : level = filters [ 'level' ] if 'level' in filters else None keep_col = pattern_match ( self . data [ col ] , values , level , regexp ) elif col == 'year' : _data = self . data [ col ] if self . time_col is not 'time' else self . data [ 'time' ] . apply ( lambda x : x . year ) keep_col = years_match ( _data , values ) elif col == 'month' and self . time_col is 'time' : keep_col = month_match ( self . data [ 'time' ] . apply ( lambda x : x . month ) , values ) elif col == 'day' and self . time_col is 'time' : if isinstance ( values , str ) : wday = True elif isinstance ( values , list ) and isinstance ( values [ 0 ] , str ) : wday = True else : wday = False if wday : days = self . data [ 'time' ] . apply ( lambda x : x . weekday ( ) ) else : # ints or list of ints days = self . data [ 'time' ] . apply ( lambda x : x . day ) keep_col = day_match ( days , values ) elif col == 'hour' and self . time_col is 'time' : keep_col = hour_match ( self . data [ 'time' ] . apply ( lambda x : x . hour ) , values ) elif col == 'time' and self . time_col is 'time' : keep_col = datetime_match ( self . data [ col ] , values ) elif col == 'level' : if 'variable' not in filters . keys ( ) : keep_col = find_depth ( self . data [ 'variable' ] , level = values ) else : continue elif col in self . data . columns : keep_col = pattern_match ( self . data [ col ] , values , regexp = regexp ) else : _raise_filter_error ( col ) keep &= keep_col return keep
Determine rows to keep in data for given set of filters
632
13
17,989
def col_apply ( self , col , func , * args , * * kwargs ) : if col in self . data : self . data [ col ] = self . data [ col ] . apply ( func , * args , * * kwargs ) else : self . meta [ col ] = self . meta [ col ] . apply ( func , * args , * * kwargs )
Apply a function to a column
84
6
17,990
def _to_file_format ( self , iamc_index ) : df = self . timeseries ( iamc_index = iamc_index ) . reset_index ( ) df = df . rename ( columns = { c : str ( c ) . title ( ) for c in df . columns } ) return df
Return a dataframe suitable for writing to a file
71
10
17,991
def to_csv ( self , path , iamc_index = False , * * kwargs ) : self . _to_file_format ( iamc_index ) . to_csv ( path , index = False , * * kwargs )
Write timeseries data to a csv file
56
9
17,992
def to_excel ( self , excel_writer , sheet_name = 'data' , iamc_index = False , * * kwargs ) : if not isinstance ( excel_writer , pd . ExcelWriter ) : close = True excel_writer = pd . ExcelWriter ( excel_writer ) self . _to_file_format ( iamc_index ) . to_excel ( excel_writer , sheet_name = sheet_name , index = False , * * kwargs ) if close : excel_writer . close ( )
Write timeseries data to Excel format
121
7
17,993
def export_metadata ( self , path ) : writer = pd . ExcelWriter ( path ) write_sheet ( writer , 'meta' , self . meta , index = True ) writer . save ( )
Export metadata to Excel
43
4
17,994
def load_metadata ( self , path , * args , * * kwargs ) : if not os . path . exists ( path ) : raise ValueError ( "no metadata file '" + path + "' found!" ) if path . endswith ( 'csv' ) : df = pd . read_csv ( path , * args , * * kwargs ) else : xl = pd . ExcelFile ( path ) if len ( xl . sheet_names ) > 1 and 'sheet_name' not in kwargs : kwargs [ 'sheet_name' ] = 'meta' df = pd . read_excel ( path , * args , * * kwargs ) req_cols = [ 'model' , 'scenario' , 'exclude' ] if not set ( req_cols ) . issubset ( set ( df . columns ) ) : e = 'File `{}` does not have required columns ({})!' raise ValueError ( e . format ( path , req_cols ) ) # set index, filter to relevant scenarios from imported metadata file df . set_index ( META_IDX , inplace = True ) idx = self . meta . index . intersection ( df . index ) n_invalid = len ( df ) - len ( idx ) if n_invalid > 0 : msg = 'Ignoring {} scenario{} from imported metadata' logger ( ) . info ( msg . format ( n_invalid , 's' if n_invalid > 1 else '' ) ) if idx . empty : raise ValueError ( 'No valid scenarios in imported metadata file!' ) df = df . loc [ idx ] # Merge in imported metadata msg = 'Importing metadata for {} scenario{} (for total of {})' logger ( ) . info ( msg . format ( len ( df ) , 's' if len ( df ) > 1 else '' , len ( self . meta ) ) ) for col in df . columns : self . _new_meta_column ( col ) self . meta [ col ] = df [ col ] . combine_first ( self . meta [ col ] ) # set column `exclude` to bool self . meta . exclude = self . meta . exclude . astype ( 'bool' )
Load metadata exported from pyam . IamDataFrame instance
490
12
17,995
def line_plot ( self , x = 'year' , y = 'value' , * * kwargs ) : df = self . as_pandas ( with_metadata = kwargs ) # pivot data if asked for explicit variable name variables = df [ 'variable' ] . unique ( ) if x in variables or y in variables : keep_vars = set ( [ x , y ] ) & set ( variables ) df = df [ df [ 'variable' ] . isin ( keep_vars ) ] idx = list ( set ( df . columns ) - set ( [ 'value' ] ) ) df = ( df . reset_index ( ) . set_index ( idx ) . value # df -> series . unstack ( level = 'variable' ) # keep_vars are columns . rename_axis ( None , axis = 1 ) # rm column index name . reset_index ( ) . set_index ( META_IDX ) ) if x != 'year' and y != 'year' : df = df . drop ( 'year' , axis = 1 ) # years causes NaNs ax , handles , labels = plotting . line_plot ( df . dropna ( ) , x = x , y = y , * * kwargs ) return ax
Plot timeseries lines of existing data
275
7
17,996
def stack_plot ( self , * args , * * kwargs ) : df = self . as_pandas ( with_metadata = True ) ax = plotting . stack_plot ( df , * args , * * kwargs ) return ax
Plot timeseries stacks of existing data
54
7
17,997
def scatter ( self , x , y , * * kwargs ) : variables = self . data [ 'variable' ] . unique ( ) xisvar = x in variables yisvar = y in variables if not xisvar and not yisvar : cols = [ x , y ] + self . _discover_meta_cols ( * * kwargs ) df = self . meta [ cols ] . reset_index ( ) elif xisvar and yisvar : # filter pivot both and rename dfx = ( self . filter ( variable = x ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : x , 'unit' : 'xunit' } ) . set_index ( YEAR_IDX ) . drop ( 'variable' , axis = 1 ) ) dfy = ( self . filter ( variable = y ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : y , 'unit' : 'yunit' } ) . set_index ( YEAR_IDX ) . drop ( 'variable' , axis = 1 ) ) df = dfx . join ( dfy , lsuffix = '_left' , rsuffix = '' ) . reset_index ( ) else : # filter, merge with meta, and rename value column to match var var = x if xisvar else y df = ( self . filter ( variable = var ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : var } ) ) ax = plotting . scatter ( df . dropna ( ) , x , y , * * kwargs ) return ax
Plot a scatter chart using metadata columns
375
7
17,998
def update ( self , rc ) : rc = self . _load_yaml ( rc ) self . store = _recursive_update ( self . store , rc )
Add additional run control parameters
36
5
17,999
def recursive_update ( self , k , d ) : u = self . __getitem__ ( k ) self . store [ k ] = _recursive_update ( u , d )
Recursively update a top - level option in the run control
40
13