idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
6,800
def make_config_data ( * , guided ) : config_data = { } config_data [ INCLUDE_DIRS_KEY ] = _make_include_dirs ( guided = guided ) config_data [ RUNTIME_DIRS_KEY ] = _make_runtime_dirs ( guided = guided ) config_data [ RUNTIME_KEY ] = _make_runtime ( ) return config_data
Makes the data necessary to construct a functional config file
90
11
6,801
def generate_configurations ( * , guided = False , fresh_start = False , save = False ) : if fresh_start : purge_configs ( ) loaded_status , loaded_data = get_config ( ) if loaded_status != CONFIG_VALID : if save : make_config_file ( guided = guided ) status , config_data = get_config ( ) else : config_data = make_config_data ( guided = guided ) else : config_data = loaded_data return config_data
If a config file is found in the standard locations it will be loaded and the config data would be retuned . If not found then generate the data on the fly and return it
110
36
6,802
def info ( self ) : url = "queues/%s" % ( self . name , ) result = self . client . get ( url ) return result [ 'body' ] [ 'queue' ]
Execute an HTTP request to get details on a queue and return it .
44
15
6,803
def clear ( self ) : url = "queues/%s/messages" % self . name result = self . client . delete ( url = url , body = json . dumps ( { } ) , headers = { 'Content-Type' : 'application/json' } ) return result [ 'body' ]
Executes an HTTP request to clear all contents of a queue .
67
13
6,804
def delete ( self , message_id , reservation_id = None , subscriber_name = None ) : url = "queues/%s/messages/%s" % ( self . name , message_id ) qitems = { } if reservation_id is not None : qitems [ 'reservation_id' ] = reservation_id if subscriber_name is not None : qitems [ 'subscriber_name' ] = subscriber_name body = json . dumps ( qitems ) result = self . client . delete ( url = url , body = body , headers = { 'Content-Type' : 'application/json' } ) return result [ 'body' ]
Execute an HTTP request to delete a message from queue .
145
12
6,805
def delete_multiple ( self , ids = None , messages = None ) : url = "queues/%s/messages" % self . name items = None if ids is None and messages is None : raise Exception ( 'Please, specify at least one parameter.' ) if ids is not None : items = [ { 'id' : item } for item in ids ] if messages is not None : items = [ { 'id' : item [ 'id' ] , 'reservation_id' : item [ 'reservation_id' ] } for item in messages [ 'messages' ] ] data = json . dumps ( { 'ids' : items } ) result = self . client . delete ( url = url , body = data , headers = { 'Content-Type' : 'application/json' } ) return result [ 'body' ]
Execute an HTTP request to delete messages from queue .
184
11
6,806
def post ( self , * messages ) : url = "queues/%s/messages" % self . name msgs = [ { 'body' : msg } if isinstance ( msg , basestring ) else msg for msg in messages ] data = json . dumps ( { 'messages' : msgs } ) result = self . client . post ( url = url , body = data , headers = { 'Content-Type' : 'application/json' } ) return result [ 'body' ]
Executes an HTTP request to create message on the queue . Creates queue if not existed .
108
19
6,807
def reserve ( self , max = None , timeout = None , wait = None , delete = None ) : url = "queues/%s/reservations" % self . name qitems = { } if max is not None : qitems [ 'n' ] = max if timeout is not None : qitems [ 'timeout' ] = timeout if wait is not None : qitems [ 'wait' ] = wait if delete is not None : qitems [ 'delete' ] = delete body = json . dumps ( qitems ) response = self . client . post ( url , body = body , headers = { 'Content-Type' : 'application/json' } ) return response [ 'body' ]
Retrieves Messages from the queue and reserves it .
150
11
6,808
def touch ( self , message_id , reservation_id , timeout = None ) : url = "queues/%s/messages/%s/touch" % ( self . name , message_id ) qitems = { 'reservation_id' : reservation_id } if timeout is not None : qitems [ 'timeout' ] = timeout body = json . dumps ( qitems ) response = self . client . post ( url , body = body , headers = { 'Content-Type' : 'application/json' } ) return response [ 'body' ]
Touching a reserved message extends its timeout to the duration specified when the message was created .
121
18
6,809
def release ( self , message_id , reservation_id , delay = 0 ) : url = "queues/%s/messages/%s/release" % ( self . name , message_id ) body = { 'reservation_id' : reservation_id } if delay > 0 : body [ 'delay' ] = delay body = json . dumps ( body ) response = self . client . post ( url , body = body , headers = { 'Content-Type' : 'application/json' } ) return response [ 'body' ]
Release locked message after specified time . If there is no message with such id on the queue .
117
19
6,810
def queues ( self , page = None , per_page = None , previous = None , prefix = None ) : options = { } if page is not None : raise Exception ( 'page param is deprecated!' ) if per_page is not None : options [ 'per_page' ] = per_page if previous is not None : options [ 'previous' ] = previous if prefix is not None : options [ 'prefix' ] = prefix query = urlencode ( options ) url = 'queues' if query != '' : url = "%s?%s" % ( url , query ) result = self . client . get ( url ) return [ queue [ 'name' ] for queue in result [ 'body' ] [ 'queues' ] ]
Execute an HTTP request to get a list of queues and return it .
160
15
6,811
def get_timex ( self , timex_id ) : if timex_id in self . idx : return Ctime ( self . idx [ timex_id ] ) else : return None
Returns the timex object for the supplied identifier
44
9
6,812
def add_timex ( self , timex_obj ) : timex_id = timex_obj . get_id ( ) #check if id is not already present if not timex_id in self . idx : timex_node = timex_obj . get_node ( ) self . node . append ( timex_node ) self . idx [ timex_id ] = timex_node else : #FIXME: what we want is that the element receives a new identifier that #is not present in current element yet print ( 'Error: trying to add new element with existing identifier' )
Adds a timex object to the layer .
131
9
6,813
def ensure_storage_format ( root_dir ) : if not os . path . isdir ( os . path . join ( root_dir , 'blobs' ) ) : print ( '"blobs/" directory not found' ) sys . exit ( 1 ) if not os . path . isdir ( os . path . join ( root_dir , 'links' ) ) : print ( '"links/" directory not found' ) sys . exit ( 1 ) if not os . path . isdir ( os . path . join ( root_dir , 'db' ) ) : print ( '"db/" directory not found' ) sys . exit ( 1 )
Checks if the directory looks like a filetracker storage . Exits with error if it doesn t .
140
22
6,814
def get_deepest_phrase_for_termid ( self , termid ) : terminal_id = self . terminal_for_term . get ( termid ) label = None subsumed = [ ] if terminal_id is not None : first_path = self . paths_for_terminal [ terminal_id ] [ 0 ] first_phrase_id = first_path [ 1 ] label = self . label_for_nonter . get ( first_phrase_id ) subsumed = self . terms_subsumed_by_nonter . get ( first_phrase_id , [ ] ) return label , sorted ( list ( subsumed ) )
Returns the deepest phrase type for the term identifier and the list of subsumed by the same element
141
19
6,815
def get_least_common_subsumer ( self , from_tid , to_tid ) : termid_from = self . terminal_for_term . get ( from_tid ) termid_to = self . terminal_for_term . get ( to_tid ) path_from = self . paths_for_terminal [ termid_from ] [ 0 ] path_to = self . paths_for_terminal [ termid_to ] [ 0 ] common_nodes = set ( path_from ) & set ( path_to ) if len ( common_nodes ) == 0 : return None else : indexes = [ ] for common_node in common_nodes : index1 = path_from . index ( common_node ) index2 = path_to . index ( common_node ) indexes . append ( ( common_node , index1 + index2 ) ) indexes . sort ( key = itemgetter ( 1 ) ) shortest_common = indexes [ 0 ] [ 0 ] return shortest_common
Returns the deepest common subsumer among two terms
224
9
6,816
def get_deepest_subsumer ( self , list_terms ) : #To store with how many terms every nonterminal appears count_per_no_terminal = defaultdict ( int ) #To store the total deep of each noter for all the term ides (as we want the deepest) total_deep_per_no_terminal = defaultdict ( int ) for term_id in list_terms : terminal_id = self . terminal_for_term . get ( term_id ) path = self . paths_for_terminal [ terminal_id ] [ 0 ] print ( term_id , path ) for c , noter in enumerate ( path ) : count_per_no_terminal [ noter ] += 1 total_deep_per_no_terminal [ noter ] += c deepest_and_common = None deepest = 10000 for noterid , this_total in total_deep_per_no_terminal . items ( ) : if count_per_no_terminal . get ( noterid , - 1 ) == len ( list_terms ) : ##Only the nontarms that ocurr with all the term ids in the input if this_total < deepest : deepest = this_total deepest_and_common = noterid label = None if deepest_and_common is not None : label = self . label_for_nonter [ deepest_and_common ] return deepest_and_common , label
Returns the labels of the deepest node that subsumes all the terms in the list of terms id s provided
315
21
6,817
def get_chunks ( self , chunk_type ) : for nonter , this_type in self . label_for_nonter . items ( ) : if this_type == chunk_type : subsumed = self . terms_subsumed_by_nonter . get ( nonter ) if subsumed is not None : yield sorted ( list ( subsumed ) )
Returns the chunks for a certain type
80
7
6,818
def get_all_chunks_for_term ( self , termid ) : terminal_id = self . terminal_for_term . get ( termid ) paths = self . paths_for_terminal [ terminal_id ] for path in paths : for node in path : this_type = self . label_for_nonter [ node ] subsumed = self . terms_subsumed_by_nonter . get ( node ) if subsumed is not None : yield this_type , sorted ( list ( subsumed ) )
Returns all the chunks in which the term is contained
115
10
6,819
def _lookup_enum_in_ns ( namespace , value ) : for attribute in dir ( namespace ) : if getattr ( namespace , attribute ) == value : return attribute
Return the attribute of namespace corresponding to value .
37
9
6,820
def _is_word_type ( token_type ) : return token_type in [ TokenType . Word , TokenType . QuotedLiteral , TokenType . UnquotedLiteral , TokenType . Number , TokenType . Deref ]
Return true if this is a word - type token .
55
11
6,821
def _is_in_comment_type ( token_type ) : return token_type in [ TokenType . Comment , TokenType . Newline , TokenType . Whitespace , TokenType . RST , TokenType . BeginRSTComment , TokenType . BeginInlineRST , TokenType . EndInlineRST ]
Return true if this kind of token can be inside a comment .
70
13
6,822
def _get_string_type_from_token ( token_type ) : return_value = None if token_type in [ TokenType . BeginSingleQuotedLiteral , TokenType . EndSingleQuotedLiteral ] : return_value = "Single" elif token_type in [ TokenType . BeginDoubleQuotedLiteral , TokenType . EndDoubleQuotedLiteral ] : return_value = "Double" assert return_value is not None return return_value
Return Single or Double depending on what kind of string this is .
106
13
6,823
def _make_header_body_handler ( end_body_regex , node_factory , has_footer = True ) : def handler ( tokens , tokens_len , body_index , function_call ) : """Handler function.""" def _end_header_body_definition ( token_index , tokens ) : """Header body termination function.""" if end_body_regex . match ( tokens [ token_index ] . content ) : try : if tokens [ token_index + 1 ] . type == TokenType . LeftParen : return True except IndexError : raise RuntimeError ( "Syntax Error" ) return False token_index , body = _ast_worker ( tokens , tokens_len , body_index , _end_header_body_definition ) extra_kwargs = { } if has_footer : # Handle footer token_index , footer = _handle_function_call ( tokens , tokens_len , token_index ) extra_kwargs = { "footer" : footer } return ( token_index , node_factory ( header = function_call , body = body . statements , line = tokens [ body_index ] . line , col = tokens [ body_index ] . col , index = body_index , * * extra_kwargs ) ) return handler
Utility function to make a handler for header - body node .
282
13
6,824
def _handle_if_block ( tokens , tokens_len , body_index , function_call ) : # First handle the if statement and body next_index , if_statement = _IF_BLOCK_IF_HANDLER ( tokens , tokens_len , body_index , function_call ) elseif_statements = [ ] else_statement = None footer = None # Keep going until we hit endif while True : # Back up a bit until we found out what terminated the if statement # body assert _RE_END_IF_BODY . match ( tokens [ next_index ] . content ) terminator = tokens [ next_index ] . content . lower ( ) if terminator == "endif" : next_index , footer = _handle_function_call ( tokens , tokens_len , next_index ) break next_index , header = _handle_function_call ( tokens , tokens_len , next_index ) if terminator == "elseif" : next_index , elseif_stmnt = _ELSEIF_BLOCK_HANDLER ( tokens , tokens_len , next_index + 1 , header ) elseif_statements . append ( elseif_stmnt ) elif terminator == "else" : next_index , else_statement = _ELSE_BLOCK_HANDLER ( tokens , tokens_len , next_index + 1 , header ) assert footer is not None return next_index , IfBlock ( if_statement = if_statement , elseif_statements = elseif_statements , else_statement = else_statement , footer = footer , line = if_statement . line , col = if_statement . col , index = body_index )
Special handler for if - blocks .
374
7
6,825
def _handle_function_call ( tokens , tokens_len , index ) : def _end_function_call ( token_index , tokens ) : """Function call termination detector.""" return tokens [ token_index ] . type == TokenType . RightParen # First handle the "function call" next_index , call_body = _ast_worker ( tokens , tokens_len , index + 2 , _end_function_call ) function_call = FunctionCall ( name = tokens [ index ] . content , arguments = call_body . arguments , line = tokens [ index ] . line , col = tokens [ index ] . col , index = index ) # Next find a handler for the body and pass control to that try : handler = _FUNCTION_CALL_DISAMBIGUATE [ tokens [ index ] . content . lower ( ) ] except KeyError : handler = None if handler : return handler ( tokens , tokens_len , next_index , function_call ) else : return ( next_index , function_call )
Handle function calls which could include a control statement .
220
10
6,826
def _ast_worker ( tokens , tokens_len , index , term ) : statements = [ ] arguments = [ ] while index < tokens_len : if term : if term ( index , tokens ) : break # Function call if tokens [ index ] . type == TokenType . Word and index + 1 < tokens_len and tokens [ index + 1 ] . type == TokenType . LeftParen : index , statement = _handle_function_call ( tokens , tokens_len , index ) statements . append ( statement ) # Argument elif _is_word_type ( tokens [ index ] . type ) : arguments . append ( Word ( type = _word_type ( tokens [ index ] . type ) , contents = tokens [ index ] . content , line = tokens [ index ] . line , col = tokens [ index ] . col , index = index ) ) index = index + 1 return ( index , GenericBody ( statements = statements , arguments = arguments ) )
The main collector for all AST functions .
202
8
6,827
def _replace_token_range ( tokens , start , end , replacement ) : tokens = tokens [ : start ] + replacement + tokens [ end : ] return tokens
For a range indicated from start to end replace with replacement .
34
12
6,828
def _is_really_comment ( tokens , index ) : if tokens [ index ] . type == TokenType . Comment : return True # Really a comment in disguise! try : if tokens [ index ] . content . lstrip ( ) [ 0 ] == "#" : return True except IndexError : return False
Return true if the token at index is really a comment .
64
12
6,829
def _paste_tokens_line_by_line ( tokens , token_type , begin , end ) : block_index = begin while block_index < end : rst_line = tokens [ block_index ] . line line_traversal_index = block_index pasted = "" try : while tokens [ line_traversal_index ] . line == rst_line : pasted += tokens [ line_traversal_index ] . content line_traversal_index += 1 except IndexError : assert line_traversal_index == end last_tokens_len = len ( tokens ) tokens = _replace_token_range ( tokens , block_index , line_traversal_index , [ Token ( type = token_type , content = pasted , line = tokens [ block_index ] . line , col = tokens [ block_index ] . col ) ] ) end -= last_tokens_len - len ( tokens ) block_index += 1 return ( block_index , len ( tokens ) , tokens )
Return lines of tokens pasted together line by line .
229
11
6,830
def _find_recorder ( recorder , tokens , index ) : if recorder is None : # See if we can start recording something for recorder_factory in _RECORDERS : recorder = recorder_factory . maybe_start_recording ( tokens , index ) if recorder is not None : return recorder return recorder
Given a current recorder and a token index try to find a recorder .
66
14
6,831
def _compress_tokens ( tokens ) : recorder = None def _edge_case_stray_end_quoted ( tokens , index ) : """Convert stray end_quoted_literals to unquoted_literals.""" # In this case, "tokenize" the matched token into what it would # have looked like had the last quote not been there. Put the # last quote on the end of the final token and call it an # unquoted_literal tokens [ index ] = Token ( type = TokenType . UnquotedLiteral , content = tokens [ index ] . content , line = tokens [ index ] . line , col = tokens [ index ] . col ) tokens_len = len ( tokens ) index = 0 with _EdgeCaseStrayParens ( ) as edge_case_stray_parens : edge_cases = [ ( _is_paren_type , edge_case_stray_parens ) , ( _is_end_quoted_type , _edge_case_stray_end_quoted ) , ] while index < tokens_len : recorder = _find_recorder ( recorder , tokens , index ) if recorder is not None : # Do recording result = recorder . consume_token ( tokens , index , tokens_len ) if result is not None : ( index , tokens_len , tokens ) = result recorder = None else : # Handle edge cases for matcher , handler in edge_cases : if matcher ( tokens [ index ] . type ) : handler ( tokens , index ) index += 1 return tokens
Paste multi - line strings comments RST etc together .
339
12
6,832
def tokenize ( contents ) : tokens = _scan_for_tokens ( contents ) tokens = _compress_tokens ( tokens ) tokens = [ token for token in tokens if token . type != TokenType . Whitespace ] return tokens
Parse a string called contents for CMake tokens .
53
11
6,833
def parse ( contents , tokens = None ) : # Shortcut for users who are interested in tokens if tokens is None : tokens = [ t for t in tokenize ( contents ) ] token_index , body = _ast_worker ( tokens , len ( tokens ) , 0 , None ) assert token_index == len ( tokens ) assert body . arguments == [ ] return ToplevelBody ( statements = body . statements )
Parse a string called contents for an AST and return it .
88
13
6,834
def maybe_start_recording ( tokens , index ) : if _is_really_comment ( tokens , index ) : return _CommentedLineRecorder ( index , tokens [ index ] . line ) return None
Return a new _CommentedLineRecorder when it is time to record .
45
16
6,835
def maybe_start_recording ( tokens , index ) : if tokens [ index ] . type == TokenType . BeginRSTComment : return _RSTCommentBlockRecorder ( index , tokens [ index ] . line ) return None
Return a new _RSTCommentBlockRecorder when its time to record .
49
16
6,836
def maybe_start_recording ( tokens , index ) : if tokens [ index ] . type == TokenType . BeginInlineRST : return _InlineRSTRecorder ( index )
Return a new _InlineRSTRecorder when its time to record .
41
16
6,837
def maybe_start_recording ( tokens , index ) : if _is_begin_quoted_type ( tokens [ index ] . type ) : string_type = _get_string_type_from_token ( tokens [ index ] . type ) return _MultilineStringRecorder ( index , string_type ) return None
Return a new _MultilineStringRecorder when its time to record .
71
16
6,838
def stratified_kfold ( df , n_folds ) : sessions = pd . DataFrame . from_records ( list ( df . index . unique ( ) ) ) . groupby ( 0 ) . apply ( lambda x : x [ 1 ] . unique ( ) ) sessions . apply ( lambda x : np . random . shuffle ( x ) ) folds = [ ] for i in range ( n_folds ) : idx = sessions . apply ( lambda x : pd . Series ( x [ i * ( len ( x ) / n_folds ) : ( i + 1 ) * ( len ( x ) / n_folds ) ] ) ) idx = pd . DataFrame ( idx . stack ( ) . reset_index ( level = 1 , drop = True ) ) . set_index ( 0 , append = True ) . index . values folds . append ( df . loc [ idx ] ) return folds
Create stratified k - folds from an indexed dataframe
200
11
6,839
def keystroke_model ( ) : model = Pohmm ( n_hidden_states = 2 , init_spread = 2 , emissions = [ 'lognormal' , 'lognormal' ] , smoothing = 'freq' , init_method = 'obs' , thresh = 1 ) return model
Generates a 2 - state model with lognormal emissions and frequency smoothing
67
16
6,840
def run_backdoor ( address , namespace = None ) : log . info ( "starting on %r" % ( address , ) ) serversock = io . Socket ( ) serversock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) serversock . bind ( address ) serversock . listen ( socket . SOMAXCONN ) while 1 : clientsock , address = serversock . accept ( ) log . info ( "connection received from %r" % ( address , ) ) scheduler . schedule ( backdoor_handler , args = ( clientsock , namespace ) )
start a server that runs python interpreters on connections made to it
131
13
6,841
def backdoor_handler ( clientsock , namespace = None ) : namespace = { } if namespace is None else namespace . copy ( ) console = code . InteractiveConsole ( namespace ) multiline_statement = [ ] stdout , stderr = StringIO ( ) , StringIO ( ) clientsock . sendall ( PREAMBLE + "\n" + PS1 ) for input_line in _produce_lines ( clientsock ) : input_line = input_line . rstrip ( ) if input_line : input_line = '\n' + input_line source = '\n' . join ( multiline_statement ) + input_line response = '' with _wrap_stdio ( stdout , stderr ) : result = console . runsource ( source ) response += stdout . getvalue ( ) err = stderr . getvalue ( ) if err : response += err if err or not result : multiline_statement = [ ] response += PS1 else : multiline_statement . append ( input_line ) response += PS2 clientsock . sendall ( response )
start an interactive python interpreter on an existing connection
236
9
6,842
def prepare_params ( self ) : if self . options . resolve_fragment : self . fragment_name = self . node . fragment_name . resolve ( self . context ) else : self . fragment_name = str ( self . node . fragment_name ) # Remove quotes that surround the name for char in '\'\"' : if self . fragment_name . startswith ( char ) or self . fragment_name . endswith ( char ) : if self . fragment_name . startswith ( char ) and self . fragment_name . endswith ( char ) : self . fragment_name = self . fragment_name [ 1 : - 1 ] break else : raise ValueError ( 'Number of quotes around the fragment name is incoherent' ) self . expire_time = self . get_expire_time ( ) if self . options . versioning : self . version = force_bytes ( self . get_version ( ) ) self . vary_on = [ template . Variable ( var ) . resolve ( self . context ) for var in self . node . vary_on ]
Prepare the parameters passed to the templatetag
234
12
6,843
def get_expire_time ( self ) : try : expire_time = self . node . expire_time . resolve ( self . context ) except template . VariableDoesNotExist : raise template . TemplateSyntaxError ( '"%s" tag got an unknown variable: %r' % ( self . node . nodename , self . node . expire_time . var ) ) try : if expire_time is not None : expire_time = str ( expire_time ) if not expire_time . isdigit ( ) : raise TypeError expire_time = int ( expire_time ) except ( ValueError , TypeError ) : raise template . TemplateSyntaxError ( '"%s" tag got a non-integer (or None) timeout value: %r' % ( self . node . nodename , expire_time ) ) return expire_time
Return the expire time passed to the templatetag . Must be None or an integer .
182
20
6,844
def get_version ( self ) : if not self . node . version : return None try : version = smart_str ( '%s' % self . node . version . resolve ( self . context ) ) except template . VariableDoesNotExist : raise template . TemplateSyntaxError ( '"%s" tag got an unknown variable: %r' % ( self . node . nodename , self . node . version . var ) ) return '%s' % version
Return the stringified version passed to the templatetag .
100
14
6,845
def hash_args ( self ) : return hashlib . md5 ( force_bytes ( ':' . join ( [ urlquote ( force_bytes ( var ) ) for var in self . vary_on ] ) ) ) . hexdigest ( )
Take all the arguments passed after the fragment name and return a hashed version which will be used in the cache key
53
23
6,846
def get_cache_key_args ( self ) : cache_key_args = dict ( nodename = self . node . nodename , name = self . fragment_name , hash = self . hash_args ( ) , ) if self . options . include_pk : cache_key_args [ 'pk' ] = self . get_pk ( ) return cache_key_args
Return the arguments to be passed to the base cache key returned by get_base_cache_key .
85
21
6,847
def cache_set ( self , to_cache ) : self . cache . set ( self . cache_key , to_cache , self . expire_time )
Set content into the cache
34
5
6,848
def render_node ( self ) : self . content = self . node . nodelist . render ( self . context )
Render the template and save the generated content
25
8
6,849
def create_content ( self ) : self . render_node ( ) if self . options . compress_spaces : self . content = self . RE_SPACELESS . sub ( ' ' , self . content ) if self . options . compress : to_cache = self . encode_content ( ) else : to_cache = self . content to_cache = self . join_content_version ( to_cache ) try : self . cache_set ( to_cache ) except Exception : if is_template_debug_activated ( ) : raise logger . exception ( 'Error when saving the cached template fragment' )
Render the template apply options on it and save it to the cache .
131
14
6,850
def get_templatetag_module ( cls ) : if cls not in CacheTag . _templatetags_modules : # find the library including the main templatetag of the current class all_tags = cls . get_all_tags_and_filters_by_function ( ) [ 'tags' ] CacheTag . _templatetags_modules [ cls ] = all_tags [ CacheTag . _templatetags [ cls ] [ 'cache' ] ] [ 0 ] return CacheTag . _templatetags_modules [ cls ]
Return the templatetags module name for which the current class is used . It s used to render the nocache blocks by loading the correct module
134
32
6,851
def render_nocache ( self ) : tmpl = template . Template ( '' . join ( [ # start by loading the cache library template . BLOCK_TAG_START , 'load %s' % self . get_templatetag_module ( ) , template . BLOCK_TAG_END , # and surround the cached template by "raw" tags self . RAW_TOKEN_START , self . content , self . RAW_TOKEN_END , ] ) ) return tmpl . render ( self . context )
Render the nocache blocks of the content and return the whole html
117
14
6,852
def user_active_directory_deactivate ( user , attributes , created , updated ) : try : user_account_control = int ( attributes [ 'userAccountControl' ] [ 0 ] ) if user_account_control & 2 : user . is_active = False except KeyError : pass
Deactivate user accounts based on Active Directory s userAccountControl flags . Requires userAccountControl to be included in LDAP_SYNC_USER_EXTRA_ATTRIBUTES .
62
39
6,853
def _get_contents_between ( string , opener , closer ) : opener_location = string . index ( opener ) closer_location = string . index ( closer ) content = string [ opener_location + 1 : closer_location ] return content
Get the contents of a string between two characters
52
9
6,854
def _check_whitespace ( string ) : if string . count ( ' ' ) + string . count ( '\t' ) + string . count ( '\n' ) > 0 : raise ValueError ( INSTRUCTION_HAS_WHITESPACE )
Make sure thre is no whitespace in the given string . Will raise a ValueError if whitespace is detected
59
23
6,855
def _check_parameters ( parameters , symbols ) : for param in parameters : if not param : raise ValueError ( EMPTY_PARAMETER ) elif ( param [ 0 ] in symbols ) and ( not param [ 1 : ] ) : print ( param ) raise ValueError ( EMPTY_KEYWORD_PARAMETER )
Checks that the parameters given are not empty . Ones with prefix symbols can be denoted by including the prefix in symbols
73
24
6,856
def _check_dependencies ( string ) : opener , closer = '(' , ')' _check_enclosing_characters ( string , opener , closer ) if opener in string : if string [ 0 ] != opener : raise ValueError ( DEPENDENCIES_NOT_FIRST ) ret = True else : ret = False return ret
Checks the dependencies constructor . Looks to make sure that the dependencies are the first things defined
73
18
6,857
def _check_building_options ( string ) : opener , closer = '{' , '}' _check_enclosing_characters ( string , opener , closer ) if opener in string : if string [ - 1 ] != closer : raise ValueError ( OPTIONS_NOT_LAST ) ret = True else : ret = False return ret
Checks the building options to make sure that they are defined last after the task name and the dependencies
74
20
6,858
def _parse_dependencies ( string ) : contents = _get_contents_between ( string , '(' , ')' ) unsorted_dependencies = contents . split ( ',' ) _check_parameters ( unsorted_dependencies , ( '?' , ) ) buildable_dependencies = [ ] given_dependencies = [ ] for dependency in unsorted_dependencies : if dependency [ 0 ] == '?' : given_dependencies . append ( dependency [ 1 : ] ) else : buildable_dependencies . append ( dependency ) string = string [ string . index ( ')' ) + 1 : ] return buildable_dependencies , given_dependencies , string
This function actually parses the dependencies are sorts them into the buildable and given dependencies
145
17
6,859
def parseString ( string ) : buildable_dependencies = [ ] given_dependencies = [ ] output_directory = None output_format = None building_directory = None output_name = None _check_whitespace ( string ) there_are_dependencies = _check_dependencies ( string ) if there_are_dependencies : buildable_dependencies , given_dependencies , string = _parse_dependencies ( string ) there_are_options = _check_building_options ( string ) if there_are_options : output_directory , output_format , building_directory , string = _parse_building_options ( string ) if string [ 0 ] == '>' : string = string [ 1 : ] if string [ - 1 ] == '>' : string = string [ : - 1 ] is_a_flow_operator = _check_flow_operator ( string ) if is_a_flow_operator : greater_than_location = string . index ( '>' ) output_name = string [ greater_than_location + 1 : ] string = string [ : greater_than_location ] ret = object ( ) ret . input_name = string ret . output_name = output_name ret . buildable_dependencies = buildable_dependencies ret . given_dependencies = given_dependencies ret . output_format = output_format ret . building_directory = building_directory ret . output_directory = output_directory return ret
This function takes an entire instruction in the form of a string and will parse the entire string and return a dictionary of the fields gathered from the parsing
316
29
6,860
def GetAlias ( session = None ) : if session is not None : return session [ 'alias' ] if not clc . ALIAS : clc . v2 . API . _Login ( ) return ( clc . ALIAS )
Return specified alias or if none the alias associated with the provided credentials .
50
14
6,861
def GetLocation ( session = None ) : if session is not None : return session [ 'location' ] if not clc . LOCATION : clc . v2 . API . _Login ( ) return ( clc . LOCATION )
Return specified location or if none the default location associated with the provided credentials and alias .
50
17
6,862
def PrimaryDatacenter ( self ) : return ( clc . v2 . Datacenter ( alias = self . alias , location = self . data [ 'primaryDataCenter' ] , session = self . session ) )
Returns the primary datacenter object associated with the account .
47
12
6,863
def add_file ( self , name , filename , compress_hint = True ) : return self . add_stream ( name , open ( filename , 'rb' ) )
Saves the actual file in the store .
37
9
6,864
def get_file ( self , name , filename ) : stream , vname = self . get_stream ( name ) path , version = split_name ( vname ) dir_path = os . path . dirname ( filename ) if dir_path : mkdir ( dir_path ) with open ( filename , 'wb' ) as f : shutil . copyfileobj ( stream , f ) return vname
Saves the content of file named name to filename .
87
11
6,865
def remove_this_tlink ( self , tlink_id ) : for tlink in self . get_tlinks ( ) : if tlink . get_id ( ) == tlink_id : self . node . remove ( tlink . get_node ( ) ) break
Removes the tlink for the given tlink identifier
60
11
6,866
def remove_this_predicateAnchor ( self , predAnch_id ) : for predAnch in self . get_predicateAnchors ( ) : if predAnch . get_id ( ) == predAnch_id : self . node . remove ( predAnch . get_node ( ) ) break
Removes the predicate anchor for the given predicate anchor identifier
71
11
6,867
def wait_fds ( fd_events , inmask = 1 , outmask = 2 , timeout = None ) : current = compat . getcurrent ( ) activated = { } poll_regs = { } callback_refs = { } def activate ( fd , event ) : if not activated and timeout != 0 : # this is the first invocation of `activated` for a blocking # `wait_fds` call, so re-schedule the blocked coroutine scheduler . schedule ( current ) # if there was a timeout then also have to pull # the coroutine from the timed_paused structure if timeout : scheduler . _remove_timer ( waketime , current ) # in any case, set the event information activated . setdefault ( fd , 0 ) activated [ fd ] |= event for fd , events in fd_events : readable = None writable = None if events & inmask : readable = functools . partial ( activate , fd , inmask ) if events & outmask : writable = functools . partial ( activate , fd , outmask ) callback_refs [ fd ] = ( readable , writable ) poll_regs [ fd ] = scheduler . _register_fd ( fd , readable , writable ) if timeout : # real timeout value, schedule ourself `timeout` seconds in the future waketime = time . time ( ) + timeout scheduler . pause_until ( waketime ) elif timeout == 0 : # timeout == 0, only pause for 1 loop iteration scheduler . pause ( ) else : # timeout is None, it's up to _hit_poller->activate to bring us back scheduler . state . mainloop . switch ( ) for fd , reg in poll_regs . iteritems ( ) : readable , writable = callback_refs [ fd ] scheduler . _unregister_fd ( fd , readable , writable , reg ) if scheduler . state . interrupted : raise IOError ( errno . EINTR , "interrupted system call" ) return activated . items ( )
wait for the first of a number of file descriptors to have activity
451
14
6,868
def hack_find_packages ( include_str ) : new_list = [ include_str ] for element in find_packages ( include_str ) : new_list . append ( include_str + '.' + element ) return new_list
patches setuptools . find_packages issue
52
10
6,869
def WaitUntilComplete ( self , poll_freq = 2 , timeout = None ) : start_time = time . time ( ) while len ( self . requests ) : cur_requests = [ ] for request in self . requests : status = request . Status ( ) if status in ( 'notStarted' , 'executing' , 'resumed' , 'queued' , 'running' ) : cur_requests . append ( request ) elif status == 'succeeded' : self . success_requests . append ( request ) elif status in ( "failed" , "unknown" ) : self . error_requests . append ( request ) self . requests = cur_requests if self . requests > 0 and clc . v2 . time_utils . TimeoutExpired ( start_time , timeout ) : raise clc . RequestTimeoutException ( 'Timeout waiting for Requests: {0}' . format ( self . requests [ 0 ] . id ) , self . requests [ 0 ] . Status ( ) ) time . sleep ( poll_freq ) # alternately - sleep for the delta between start time and 2s # Is this the best approach? Non-zero indicates some error. Exception seems the wrong approach for # a partial failure return ( len ( self . error_requests ) )
Poll until all request objects have completed .
282
8
6,870
def WaitUntilComplete ( self , poll_freq = 2 , timeout = None ) : start_time = time . time ( ) while not self . time_completed : status = self . Status ( ) if status == 'executing' : if not self . time_executed : self . time_executed = time . time ( ) if clc . v2 . time_utils . TimeoutExpired ( start_time , timeout ) : raise clc . RequestTimeoutException ( 'Timeout waiting for Request: {0}' . format ( self . id ) , status ) elif status == 'succeeded' : self . time_completed = time . time ( ) elif status in ( "failed" , "resumed" or "unknown" ) : # TODO - need to ID best reaction for resumed status (e.g. manual intervention) self . time_completed = time . time ( ) raise ( clc . CLCException ( "%s %s execution %s" % ( self . context_key , self . context_val , status ) ) ) time . sleep ( poll_freq )
Poll until status is completed .
242
6
6,871
def Server ( self ) : if self . context_key == 'newserver' : server_id = clc . v2 . API . Call ( 'GET' , self . context_val , session = self . session ) [ 'id' ] return ( clc . v2 . Server ( id = server_id , alias = self . alias , session = self . session ) ) elif self . context_key == 'server' : return ( clc . v2 . Server ( id = self . context_val , alias = self . alias , session = self . session ) ) else : raise ( clc . CLCException ( "%s object not server" % self . context_key ) )
Return server associated with this request .
150
7
6,872
def login ( self ) : auth_data = dict ( ) auth_data [ 'apikey' ] = self . api_key auth_data [ 'username' ] = self . username auth_data [ 'userkey' ] = self . account_identifier auth_resp = requests_util . run_request ( 'post' , self . API_BASE_URL + '/login' , data = json . dumps ( auth_data ) , headers = self . __get_header ( ) ) if auth_resp . status_code == 200 : auth_resp_data = self . parse_raw_response ( auth_resp ) self . __token = auth_resp_data [ 'token' ] self . __auth_time = datetime . now ( ) self . is_authenticated = True else : raise AuthenticationFailedException ( 'Authentication failed!' )
This method performs the login on TheTVDB given the api key user name and account identifier .
187
19
6,873
def search_series ( self , name = None , imdb_id = None , zap2it_id = None ) : arguments = locals ( ) optional_parameters = { 'name' : 'name' , 'imdb_id' : 'imdbId' , 'zap2it_id' : 'zap2itId' } query_string = utils . query_param_string_from_option_args ( optional_parameters , arguments ) raw_response = requests_util . run_request ( 'get' , '%s%s?%s' % ( self . API_BASE_URL , '/search/series' , query_string ) , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Searchs for a series in TheTVDB by either its name imdb_id or zap2it_id .
178
25
6,874
def get_series_actors ( self , series_id ) : raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/series/%d/actors' % series_id , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Retrieves the information on the actors of a particular series given its TheTVDB id .
82
19
6,875
def get_series_episodes ( self , series_id , page = 1 ) : raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/series/%d/episodes?page=%d' % ( series_id , page ) , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Retrieves all episodes for a particular series given its TheTVDB id . It retrieves a maximum of 100 results per page .
95
27
6,876
def get_series_episodes ( self , series_id , episode_number = None , aired_season = None , aired_episode = None , dvd_season = None , dvd_episode = None , imdb_id = None , page = 1 ) : arguments = locals ( ) optional_parameters = { 'episode_number' : 'absoluteNumber' , 'aired_season' : 'airedSeason' , 'aired_episode' : 'airedEpisode' , 'dvd_season' : 'dvdSeason' , 'dvd_episode' : 'dvdEpisode' , 'imdb_id' : 'imdbId' , 'page' : 'page' } query_string = utils . query_param_string_from_option_args ( optional_parameters , arguments ) raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/series/%d/episodes/query?%s' % ( series_id , query_string ) , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details .
253
21
6,877
def get_updated ( self , from_time , to_time = None ) : arguments = locals ( ) optional_parameters = { 'to_time' : 'toTime' } query_string = 'fromTime=%s&%s' % ( from_time , utils . query_param_string_from_option_args ( optional_parameters , arguments ) ) raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/uodated/query?%s' % query_string , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an specified to time .
151
28
6,878
def get_user ( self ) : return self . parse_raw_response ( requests_util . run_request ( 'get' , self . API_BASE_URL + '/user' , headers = self . __get_header_with_auth ( ) ) )
Retrieves information about the user currently using the api .
58
12
6,879
def get_user_favorites ( self ) : return self . parse_raw_response ( requests_util . run_request ( 'get' , self . API_BASE_URL + '/user/favorites' , headers = self . __get_header_with_auth ( ) ) )
Retrieves the list of tv series the current user has flagged as favorite .
66
16
6,880
def delete_user_favorite ( self , series_id ) : return self . parse_raw_response ( requests_util . run_request ( 'delete' , self . API_BASE_URL + '/user/favorites/%d' % series_id , headers = self . __get_header_with_auth ( ) ) )
Deletes the series of the provided id from the favorites list of the current user .
75
17
6,881
def __get_user_ratings ( self ) : return self . parse_raw_response ( requests_util . run_request ( 'get' , self . API_BASE_URL + '/user/ratings' , headers = self . __get_header_with_auth ( ) ) )
Returns a list of the ratings provided by the current user .
65
12
6,882
def get_user_ratings ( self , item_type = None ) : if item_type : query_string = 'itemType=%s' % item_type return self . parse_raw_response ( requests_util . run_request ( 'get' , self . API_BASE_URL + '/user/ratings/qeury?%s' % query_string , headers = self . __get_header_with_auth ( ) ) ) else : return self . __get_user_ratings ( )
Returns a list of the ratings for the type of item provided for the current user .
115
17
6,883
def add_user_rating ( self , item_type , item_id , item_rating ) : raw_response = requests_util . run_request ( 'put' , self . API_BASE_URL + '/user/ratings/%s/%d/%d' % ( item_type , item_id , item_rating ) , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Adds the rating for the item indicated for the current user .
105
12
6,884
def delete_user_rating ( self , item_type , item_id ) : raw_response = requests_util . run_request ( 'delete' , self . API_BASE_URL + '/user/ratings/%s/%d' % ( item_type , item_id ) , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Deletes from the list of rating of the current user the rating provided for the specified element type .
94
20
6,885
def get_episode ( self , episode_id ) : raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/episodes/%d' % episode_id , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Returns the full information of the episode belonging to the Id provided .
77
13
6,886
def get_languages ( self ) : raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/languages' , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Returns a list of all language options available in TheTVDB .
67
13
6,887
def get_language ( self , language_id ) : raw_response = requests_util . run_request ( 'get' , self . API_BASE_URL + '/languages/%d' % language_id , headers = self . __get_header_with_auth ( ) ) return self . parse_raw_response ( raw_response )
Retrieves information about the language of the given id .
77
12
6,888
def SetCredentials ( api_key , api_passwd ) : global V1_API_KEY global V1_API_PASSWD global _V1_ENABLED _V1_ENABLED = True V1_API_KEY = api_key V1_API_PASSWD = api_passwd
Establish API key and password associated with APIv1 commands .
70
13
6,889
def normalize ( A , axis = None , inplace = False ) : if not inplace : A = A . copy ( ) A += np . finfo ( float ) . eps Asum = A . sum ( axis ) if axis and A . ndim > 1 : # Make sure we don't divide by zero. Asum [ Asum == 0 ] = 1 shape = list ( A . shape ) shape [ axis ] = 1 Asum . shape = shape A /= Asum return A
Normalize the input array so that it sums to 1 .
106
12
6,890
def ph2full ( ptrans , htrans ) : n_pstates = len ( ptrans ) n_hstates = len ( htrans [ 0 , 0 ] ) N = n_pstates * n_hstates trans = np . zeros ( ( N , N ) ) for pidx in range ( n_pstates ) : for hidx in range ( n_hstates ) : trans [ pidx * n_hstates + hidx ] = ( ptrans [ pidx , : , np . newaxis ] * htrans [ pidx , : , hidx ] ) . flatten ( ) return trans
Convert a p - state transition matrix and h - state matrices to the full transation matrix
133
20
6,891
def full2ph ( trans , n_pstates ) : n_hstates = len ( trans ) / n_pstates htrans = np . zeros ( ( n_pstates , n_pstates , n_hstates , n_hstates ) ) for pidx1 , pidx2 in product ( range ( n_pstates ) , range ( n_pstates ) ) : idx1 = pidx1 * n_hstates idx2 = pidx2 * n_hstates htrans [ pidx1 , pidx2 ] = trans [ idx1 : idx1 + n_hstates , idx2 : idx2 + n_hstates ] ptrans = normalize ( htrans . sum ( axis = - 1 ) . sum ( axis = - 1 ) , axis = 1 ) htrans = normalize ( htrans , axis = 3 ) return ptrans , htrans
Convert a full transmat to the respective p - state and h - state transmats
197
18
6,892
def gen_stochastic_matrix ( size , random_state = None ) : if not type ( size ) is tuple : size = ( 1 , size ) assert len ( size ) == 2 n = random_state . uniform ( size = ( size [ 0 ] , size [ 1 ] - 1 ) ) n = np . concatenate ( [ np . zeros ( ( size [ 0 ] , 1 ) ) , n , np . ones ( ( size [ 0 ] , 1 ) ) ] , axis = 1 ) A = np . diff ( np . sort ( n ) ) return A . squeeze ( )
Generate a unfiformly - random stochastic array or matrix
130
14
6,893
def steadystate ( A , max_iter = 100 ) : P = np . linalg . matrix_power ( A , max_iter ) # Determine the unique rows in A v = [ ] for i in range ( len ( P ) ) : if not np . any ( [ np . allclose ( P [ i ] , vi , ) for vi in v ] ) : v . append ( P [ i ] ) return normalize ( np . sum ( v , axis = 0 ) )
Empirically determine the steady state probabilities from a stochastic matrix
105
15
6,894
def pipe ( ) : r , w = os . pipe ( ) return File . fromfd ( r , 'rb' ) , File . fromfd ( w , 'wb' )
create an inter - process communication pipe
38
7
6,895
def get_id_head ( self ) : id_head = None for target_node in self : if target_node . is_head ( ) : id_head = target_node . get_id ( ) break return id_head
Returns the id of the target that is set as head
51
11
6,896
def add_target_id ( self , this_id ) : new_target = Ctarget ( ) new_target . set_id ( this_id ) self . node . append ( new_target . get_node ( ) )
Adds a new target to the span with the specified id
50
11
6,897
def create_from_ids ( self , list_ids ) : for this_id in list_ids : new_target = Ctarget ( ) new_target . set_id ( this_id ) self . node . append ( new_target . get_node ( ) )
Adds new targets to the span with the specified ids
59
11
6,898
def create_from_targets ( self , list_targs ) : for this_target in list_targs : self . node . append ( this_target . get_node ( ) )
Adds new targets to the span that are defined in a list
43
12
6,899
def get_statement ( self , statement_id ) : if statement_id in self . idx : return Cstatement ( self . idx [ statement_id ] , self . type ) else : return None
Returns the statement object for the supplied identifier
44
8