idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
7,000
def find_field ( browser , field_type , value ) : return find_field_by_id ( browser , field_type , value ) + find_field_by_name ( browser , field_type , value ) + find_field_by_label ( browser , field_type , value )
Locate an input field .
65
6
7,001
def find_field_by_id ( browser , field_type , id ) : return ElementSelector ( browser , xpath = field_xpath ( field_type , 'id' ) % string_literal ( id ) , filter_displayed = True , )
Locate the control input with the given id .
58
10
7,002
def find_field_by_name ( browser , field_type , name ) : return ElementSelector ( browser , field_xpath ( field_type , 'name' ) % string_literal ( name ) , filter_displayed = True , )
Locate the control input with the given name .
55
10
7,003
def find_field_by_value ( browser , field_type , name ) : xpath = field_xpath ( field_type , 'value' ) % string_literal ( name ) elems = ElementSelector ( browser , xpath = str ( xpath ) , filter_displayed = True , filter_enabled = True , ) # sort by shortest first (most closely matching) if field_type in ( 'button-element' , 'button-role' ) : elems = sorted ( elems , key = lambda elem : len ( elem . text ) ) else : elems = sorted ( elems , key = lambda elem : len ( elem . get_attribute ( 'value' ) ) ) if elems : elems = [ elems [ 0 ] ] return ElementSelector ( browser , elements = elems )
Locate the control input with the given value . Useful for buttons .
182
14
7,004
def find_field_by_label ( browser , field_type , label ) : return ElementSelector ( browser , xpath = field_xpath ( field_type , 'id' ) % '//label[contains(., {0})]/@for' . format ( string_literal ( label ) ) , filter_displayed = True , )
Locate the control input that has a label pointing to it .
78
13
7,005
def wait_for ( func ) : @ wraps ( func ) def wrapped ( * args , * * kwargs ) : timeout = kwargs . pop ( 'timeout' , TIMEOUT ) start = None while True : try : return func ( * args , * * kwargs ) except AssertionError : # The function took some time to test the assertion, however, # the result might correspond to the state of the world at any # point in time, perhaps earlier than the timeout. Therefore, # start counting time from the first assertion fail, not from # before the function was called. if not start : start = time ( ) if time ( ) - start < timeout : sleep ( CHECK_EVERY ) continue else : raise return wrapped
A decorator to invoke a function retrying on assertion errors for a specified time interval .
158
18
7,006
def filter ( self , displayed = False , enabled = False ) : if self . evaluated : # Filter elements one by one result = self if displayed : result = ElementSelector ( result . browser , elements = [ e for e in result if e . is_displayed ( ) ] ) if enabled : result = ElementSelector ( result . browser , elements = [ e for e in result if e . is_enabled ( ) ] ) else : result = copy ( self ) if displayed : result . displayed = True if enabled : result . enabled = True return result
Filter elements by visibility and enabled status .
117
8
7,007
def _select ( self ) : for element in self . browser . find_elements_by_xpath ( self . xpath ) : if self . filter_displayed : if not element . is_displayed ( ) : continue if self . filter_enabled : if not element . is_enabled ( ) : continue yield element
Fetch the elements from the browser .
70
8
7,008
def authenticate ( self ) -> bool : with IHCController . _mutex : if not self . client . authenticate ( self . _username , self . _password ) : return False if self . _ihcevents : self . client . enable_runtime_notifications ( self . _ihcevents . keys ( ) ) return True
Authenticate and enable the registered notifications
76
7
7,009
def get_runtime_value ( self , ihcid : int ) : if self . client . get_runtime_value ( ihcid ) : return True self . re_authenticate ( ) return self . client . get_runtime_value ( ihcid )
Get runtime value with re - authenticate if needed
60
10
7,010
def set_runtime_value_bool ( self , ihcid : int , value : bool ) -> bool : if self . client . set_runtime_value_bool ( ihcid , value ) : return True self . re_authenticate ( ) return self . client . set_runtime_value_bool ( ihcid , value )
Set bool runtime value with re - authenticate if needed
76
11
7,011
def set_runtime_value_int ( self , ihcid : int , value : int ) -> bool : if self . client . set_runtime_value_int ( ihcid , value ) : return True self . re_authenticate ( ) return self . client . set_runtime_value_int ( ihcid , value )
Set integer runtime value with re - authenticate if needed
76
11
7,012
def set_runtime_value_float ( self , ihcid : int , value : float ) -> bool : if self . client . set_runtime_value_float ( ihcid , value ) : return True self . re_authenticate ( ) return self . client . set_runtime_value_float ( ihcid , value )
Set float runtime value with re - authenticate if needed
76
11
7,013
def get_project ( self ) -> str : with IHCController . _mutex : if self . _project is None : if self . client . get_state ( ) != IHCSTATE_READY : ready = self . client . wait_for_state_change ( IHCSTATE_READY , 10 ) if ready != IHCSTATE_READY : return None self . _project = self . client . get_project ( ) return self . _project
Get the ihc project and make sure controller is ready before
101
13
7,014
def add_notify_event ( self , resourceid : int , callback , delayed = False ) : with IHCController . _mutex : if resourceid in self . _ihcevents : self . _ihcevents [ resourceid ] . append ( callback ) else : self . _ihcevents [ resourceid ] = [ callback ] if delayed : self . _newnotifyids . append ( resourceid ) else : if not self . client . enable_runtime_notification ( resourceid ) : return False if not self . _notifyrunning : self . _notifythread . start ( ) return True
Add a notify callback for a specified resource id If delayed is set to true the enable request will be send from the notofication thread
136
27
7,015
def _notify_fn ( self ) : self . _notifyrunning = True while self . _notifyrunning : try : with IHCController . _mutex : # Are there are any new ids to be added? if self . _newnotifyids : self . client . enable_runtime_notifications ( self . _newnotifyids ) self . _newnotifyids = [ ] changes = self . client . wait_for_resource_value_changes ( ) if changes is False : self . re_authenticate ( True ) continue for ihcid in changes : value = changes [ ihcid ] if ihcid in self . _ihcevents : for callback in self . _ihcevents [ ihcid ] : callback ( ihcid , value ) except Exception as exp : self . re_authenticate ( True )
The notify thread function .
192
5
7,016
def re_authenticate ( self , notify : bool = False ) -> bool : timeout = datetime . now ( ) + timedelta ( seconds = self . reauthenticatetimeout ) while True : if self . authenticate ( ) : return True if notify : if not self . _notifyrunning : return False else : if timeout and datetime . now ( ) > timeout : return False # wait before we try to authenticate again time . sleep ( self . retryinterval )
Authenticate again after failure . Keep trying with 10 sec interval . If called from the notify thread we will not have a timeout but will end if the notify thread has been cancled . Will return True if authentication was successful .
103
45
7,017
def get_texts ( self , metadata = None ) : if metadata is None : metadata = self . metadata self . input_file = gzip . GzipFile ( self . input_file_path ) volume_num = 0 with self . input_file as lines : for lineno , line in enumerate ( lines ) : if volume_num >= len ( self . book_meta [ 'volumes' ] ) : raise StopIteration ( ) if lineno < self . book_meta [ 'volumes' ] [ volume_num ] [ 'start' ] : continue if lineno < self . book_meta [ 'volumes' ] [ volume_num ] [ 'stop' ] : # act_num, scene_num = 0, 0 # FIXME: use self.book_meta['volumes'][volume_num]['sections'] if metadata : # FIXME: use self.lemmatize toks = self . tokenize ( line , lowercase = self . lowercase ) yield ( toks , ( lineno , ) ) else : toks = self . tokenize ( line , lowercase = self . lowercase ) yield toks else : volume_num += 1
Iterate over the lines of The Complete Works of William Shakespeare .
256
13
7,018
def encode ( df , encoding = 'utf8' , verbosity = 1 ) : if verbosity > 0 : # pbar_i = 0 pbar = progressbar . ProgressBar ( maxval = df . shape [ 1 ] ) pbar . start ( ) # encode strings as UTF-8 so they'll work in python2 and python3 for colnum , col in enumerate ( df . columns ) : if isinstance ( df [ col ] , pd . Series ) : if verbosity : pbar . update ( colnum ) if df [ col ] . dtype in ( np . dtype ( 'object' ) , np . dtype ( 'U' ) , np . dtype ( 'S' ) ) and any ( isinstance ( obj , basestring ) for obj in df [ col ] ) : strmask = np . array ( [ isinstance ( obj , basestring ) for obj in df [ col ] ] ) series = df [ col ] . copy ( ) try : series [ strmask ] = np . char . encode ( series [ strmask ] . values . astype ( 'U' ) ) except TypeError : print ( "Unable to convert {} elements starting at position {} in column {}" . format ( sum ( strmask ) , [ i for i , b in enumerate ( strmask ) if b ] [ : 1 ] , col ) ) raise except ( UnicodeDecodeError , UnicodeEncodeError ) : try : series [ strmask ] = np . array ( [ eval ( s , { } , { } ) for s in series [ strmask ] ] ) # FIXME: do something different for unicode and decode errors except ( SyntaxError , UnicodeDecodeError , UnicodeEncodeError ) : newseries = [ ] for s in series [ strmask ] : try : newseries += [ s . encode ( 'utf8' ) ] except : print ( u'Had trouble encoding {} so used repr to turn it into {}' . format ( s , repr ( transcode_unicode ( s ) ) ) ) # strip all unicode chars are convert to ASCII str newseries += [ transcode_unicode ( s ) ] # for dtype('U'): UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 207: ordinal not in r series [ strmask ] = np . array ( newseries ) . astype ( 'O' ) df [ col ] = series # df[col] = np.array([x.encode('utf8') if isinstance(x, unicode) else x for x in df[col]]) # WARNING: this takes DAYS for only 100k tweets! # series = df[col].copy() # for i, value in series.iteritems(): # if isinstance(value, basestring): # series[i] = str(value.encode(encoding)) # df[col] = series if verbosity : pbar . finish ( ) return df
If you try to encode each element individually with python this would take days!
641
15
7,019
def run ( verbosity = 1 ) : filepath = os . path . join ( DATA_PATH , 'all_tweets.csv' ) # this should load 100k tweets in about a minute # check the file size and estimate load time from that (see scritps/cat_tweets.py) print ( 'Loading tweets from {} (could take a minute or so)...' . format ( filepath ) ) df = pd . read_csv ( filepath , encoding = 'utf-8' , engine = 'python' ) if 'id' in df . columns : df = df . set_index ( 'id' ) df = normalize ( df ) df = dropna ( df ) df = encode ( df , verbosity = verbosity ) df = clean_labels ( df ) df . to_csv ( os . path . join ( DATA_PATH , 'cleaned_tweets.csv.gz' ) , compression = 'gzip' , quotechar = '"' , quoting = pd . io . common . csv . QUOTE_NONNUMERIC ) # the round-trip to disk cleans up encoding issues so encoding no longer needs to be specified on load df = pd . read_csv ( os . path . join ( DATA_PATH , 'cleaned_tweets.csv.gz' ) , index_col = 'id' , compression = 'gzip' , quotechar = '"' , quoting = pd . io . common . csv . QUOTE_NONNUMERIC , low_memory = False ) df . to_csv ( os . path . join ( DATA_PATH , 'cleaned_tweets.csv.gz' ) , compression = 'gzip' , quotechar = '"' , quoting = pd . io . common . csv . QUOTE_NONNUMERIC ) return df
Load all_tweets . csv and run normalize dropna encode before dumping to cleaned_tweets . csv . gz
405
30
7,020
def data_worker ( * * kwargs ) : if kwargs is not None : if "function" in kwargs : function = kwargs [ "function" ] else : Exception ( "Invalid arguments, no function specified" ) if "input" in kwargs : input_queue = kwargs [ "input" ] else : Exception ( "Invalid Arguments, no input queue" ) if "output" in kwargs : output_map = kwargs [ "output" ] else : Exception ( "Invalid Arguments, no output map" ) if "token" in kwargs : argsdict = { "quandl_token" : kwargs [ "token" ] } else : if "Quandl" in function . __module__ : Exception ( "Invalid Arguments, no Quandl token" ) if ( "source" and "begin" and "end" ) in kwargs : argsdict = { "data_source" : kwargs [ "source" ] , "begin" : kwargs [ "begin" ] , "end" : kwargs [ "end" ] } else : if "pandas.io.data" in function . __module__ : Exception ( "Invalid Arguments, no pandas data source specified" ) if ( "source" in kwargs ) and ( ( "begin" and "end" ) not in kwargs ) : argsdict = { "data_source" : kwargs [ "source" ] } else : if "pandas.io.data" in function . __module__ : Exception ( "Invalid Arguments, no pandas data source specified" ) else : Exception ( "Invalid Arguments" ) retries = 5 while not input_queue . empty ( ) : data_key = input_queue . get ( ) get_data ( function , data_key , output_map , retries , argsdict )
Function to be spawned concurrently consume data keys from input queue and push the resulting dataframes to output map
415
20
7,021
def consume_keys ( self ) : print ( "\nLooking up " + self . input_queue . qsize ( ) . __str__ ( ) + " keys from " + self . source_name + "\n" ) self . data_worker ( * * self . worker_args )
Work through the keys to look up sequentially
62
9
7,022
def consume_keys_asynchronous_processes ( self ) : print ( "\nLooking up " + self . input_queue . qsize ( ) . __str__ ( ) + " keys from " + self . source_name + "\n" ) jobs = multiprocessing . cpu_count ( ) * 4 if ( multiprocessing . cpu_count ( ) * 4 < self . input_queue . qsize ( ) ) else self . input_queue . qsize ( ) pool = multiprocessing . Pool ( processes = jobs , maxtasksperchild = 10 ) for x in range ( jobs ) : pool . apply ( self . data_worker , [ ] , self . worker_args ) pool . close ( ) pool . join ( )
Work through the keys to look up asynchronously using multiple processes
163
13
7,023
def consume_keys_asynchronous_threads ( self ) : print ( "\nLooking up " + self . input_queue . qsize ( ) . __str__ ( ) + " keys from " + self . source_name + "\n" ) jobs = multiprocessing . cpu_count ( ) * 4 if ( multiprocessing . cpu_count ( ) * 4 < self . input_queue . qsize ( ) ) else self . input_queue . qsize ( ) pool = ThreadPool ( jobs ) for x in range ( jobs ) : pool . apply ( self . data_worker , [ ] , self . worker_args ) pool . close ( ) pool . join ( )
Work through the keys to look up asynchronously using multiple threads
149
13
7,024
def unpack ( self , to_unpack ) : # Python 3 lacks basestring type, work around below try : isinstance ( to_unpack , basestring ) except NameError : basestring = str # Base Case if isinstance ( to_unpack , basestring ) : self . input_queue . put ( to_unpack ) return for possible_key in to_unpack : if isinstance ( possible_key , basestring ) : self . input_queue . put ( possible_key ) elif sys . version_info >= ( 3 , 0 ) : if isinstance ( possible_key , collections . abc . Container ) and not isinstance ( possible_key , basestring ) : self . unpack ( possible_key ) else : raise Exception ( "A type that is neither a string or a container was passed to unpack. " "Aborting!" ) else : if isinstance ( possible_key , collections . Container ) and not isinstance ( possible_key , basestring ) : self . unpack ( possible_key ) else : raise Exception ( "A type that is neither a string or a container was passed to unpack. " "Aborting!" )
Unpack is a recursive function that will unpack anything that inherits from abstract base class Container provided it is not also inheriting from Python basestring .
258
32
7,025
def set_source_quandl ( self , quandl_token ) : self . data_worker = data_worker self . worker_args = { "function" : Quandl . get , "input" : self . input_queue , "output" : self . output_map , "token" : quandl_token } self . source_name = "Quandl"
Set data source to Quandl
85
7
7,026
def set_source_google_finance ( self ) : self . data_worker = data_worker self . worker_args = { "function" : pandas . io . data . DataReader , "input" : self . input_queue , "output" : self . output_map , "source" : 'google' } self . source_name = "Google Finance"
Set data source to Google Finance
81
6
7,027
def set_source_yahoo_options ( self ) : self . data_worker = data_worker self . worker_args = { "function" : Options , "input" : self . input_queue , "output" : self . output_map , "source" : 'yahoo' } self . source_name = "Yahoo Finance Options"
Set data source to yahoo finance specifically to download financial options data
74
13
7,028
def load_jquery ( func ) : @ wraps ( func ) def wrapped ( browser , * args , * * kwargs ) : """Run the function, loading jQuery if needed.""" try : return func ( browser , * args , * * kwargs ) except WebDriverException as ex : if not is_jquery_not_defined_error ( ex . msg ) : raise load_script ( browser , JQUERY ) @ wait_for def jquery_available ( ) : """Assert that jQuery has loaded.""" try : return browser . execute_script ( 'return $' ) except WebDriverException : raise AssertionError ( "jQuery is not loaded" ) jquery_available ( ) return func ( browser , * args , * * kwargs ) return wrapped
A decorator to ensure a function is run with jQuery available .
168
13
7,029
def check_element_by_selector ( self , selector ) : elems = find_elements_by_jquery ( world . browser , selector ) if not elems : raise AssertionError ( "Expected matching elements, none found." )
Assert an element exists matching the given selector .
55
10
7,030
def check_no_element_by_selector ( self , selector ) : elems = find_elements_by_jquery ( world . browser , selector ) if elems : raise AssertionError ( "Expected no matching elements, found {}." . format ( len ( elems ) ) )
Assert an element does not exist matching the given selector .
66
12
7,031
def wait_for_element_by_selector ( self , selector , seconds ) : def assert_element_present ( ) : """Assert an element matching the given selector exists.""" if not find_elements_by_jquery ( world . browser , selector ) : raise AssertionError ( "Expected a matching element." ) wait_for ( assert_element_present ) ( timeout = int ( seconds ) )
Assert an element exists matching the given selector within the given time period .
91
15
7,032
def count_elements_exactly_by_selector ( self , number , selector ) : elems = find_elements_by_jquery ( world . browser , selector ) number = int ( number ) if len ( elems ) != number : raise AssertionError ( "Expected {} elements, found {}" . format ( number , len ( elems ) ) )
Assert n elements exist matching the given selector .
82
10
7,033
def fill_in_by_selector ( self , selector , value ) : elem = find_element_by_jquery ( world . browser , selector ) elem . clear ( ) elem . send_keys ( value )
Fill in the form element matching the CSS selector .
50
10
7,034
def submit_by_selector ( self , selector ) : elem = find_element_by_jquery ( world . browser , selector ) elem . submit ( )
Submit the form matching the CSS selector .
37
8
7,035
def check_by_selector ( self , selector ) : elem = find_element_by_jquery ( world . browser , selector ) if not elem . is_selected ( ) : elem . click ( )
Check the checkbox matching the CSS selector .
48
9
7,036
def click_by_selector ( self , selector ) : # No need for separate button press step with selector style. elem = find_element_by_jquery ( world . browser , selector ) elem . click ( )
Click the element matching the CSS selector .
49
8
7,037
def follow_link_by_selector ( self , selector ) : elem = find_element_by_jquery ( world . browser , selector ) href = elem . get_attribute ( 'href' ) world . browser . get ( href )
Navigate to the href of the element matching the CSS selector .
54
13
7,038
def is_selected_by_selector ( self , selector ) : elem = find_element_by_jquery ( world . browser , selector ) if not elem . is_selected ( ) : raise AssertionError ( "Element expected to be selected." )
Assert the option matching the CSS selector is selected .
58
11
7,039
def select_by_selector ( self , selector ) : option = find_element_by_jquery ( world . browser , selector ) selectors = find_parents_by_jquery ( world . browser , selector ) if not selectors : raise AssertionError ( "No parent element found for the option." ) selector = selectors [ 0 ] selector . click ( ) sleep ( 0.3 ) option . click ( ) if not option . is_selected ( ) : raise AssertionError ( "Option should have become selected after clicking it." )
Select the option matching the CSS selector .
119
8
7,040
def run_filter_calculation ( self ) : inputs = { 'cif' : self . inputs . cif , 'code' : self . inputs . cif_filter , 'parameters' : self . inputs . cif_filter_parameters , 'metadata' : { 'options' : self . inputs . options . get_dict ( ) , } } calculation = self . submit ( CifFilterCalculation , * * inputs ) self . report ( 'submitted {}<{}>' . format ( CifFilterCalculation . __name__ , calculation . uuid ) ) return ToContext ( cif_filter = calculation )
Run the CifFilterCalculation on the CifData input node .
138
15
7,041
def inspect_filter_calculation ( self ) : try : node = self . ctx . cif_filter self . ctx . cif = node . outputs . cif except exceptions . NotExistent : self . report ( 'aborting: CifFilterCalculation<{}> did not return the required cif output' . format ( node . uuid ) ) return self . exit_codes . ERROR_CIF_FILTER_FAILED
Inspect the result of the CifFilterCalculation verifying that it produced a CifData output node .
98
22
7,042
def run_select_calculation ( self ) : inputs = { 'cif' : self . ctx . cif , 'code' : self . inputs . cif_select , 'parameters' : self . inputs . cif_select_parameters , 'metadata' : { 'options' : self . inputs . options . get_dict ( ) , } } calculation = self . submit ( CifSelectCalculation , * * inputs ) self . report ( 'submitted {}<{}>' . format ( CifSelectCalculation . __name__ , calculation . uuid ) ) return ToContext ( cif_select = calculation )
Run the CifSelectCalculation on the CifData output node of the CifFilterCalculation .
139
22
7,043
def inspect_select_calculation ( self ) : try : node = self . ctx . cif_select self . ctx . cif = node . outputs . cif except exceptions . NotExistent : self . report ( 'aborting: CifSelectCalculation<{}> did not return the required cif output' . format ( node . uuid ) ) return self . exit_codes . ERROR_CIF_SELECT_FAILED
Inspect the result of the CifSelectCalculation verifying that it produced a CifData output node .
97
22
7,044
def parse_cif_structure ( self ) : from aiida_codtools . workflows . functions . primitive_structure_from_cif import primitive_structure_from_cif if self . ctx . cif . has_unknown_species : self . ctx . exit_code = self . exit_codes . ERROR_CIF_HAS_UNKNOWN_SPECIES self . report ( self . ctx . exit_code . message ) return if self . ctx . cif . has_undefined_atomic_sites : self . ctx . exit_code = self . exit_codes . ERROR_CIF_HAS_UNDEFINED_ATOMIC_SITES self . report ( self . ctx . exit_code . message ) return if self . ctx . cif . has_attached_hydrogens : self . ctx . exit_code = self . exit_codes . ERROR_CIF_HAS_ATTACHED_HYDROGENS self . report ( self . ctx . exit_code . message ) return parse_inputs = { 'cif' : self . ctx . cif , 'parse_engine' : self . inputs . parse_engine , 'site_tolerance' : self . inputs . site_tolerance , 'symprec' : self . inputs . symprec , } try : structure , node = primitive_structure_from_cif . run_get_node ( * * parse_inputs ) except Exception : # pylint: disable=broad-except self . ctx . exit_code = self . exit_codes . ERROR_CIF_STRUCTURE_PARSING_FAILED self . report ( self . ctx . exit_code . message ) return if node . is_failed : self . ctx . exit_code = self . exit_codes ( node . exit_status ) # pylint: disable=too-many-function-args self . report ( self . ctx . exit_code . message ) else : self . ctx . structure = structure
Parse a StructureData from the cleaned CifData returned by the CifSelectCalculation .
454
20
7,045
def results ( self ) : self . out ( 'cif' , self . ctx . cif ) if 'group_cif' in self . inputs : self . inputs . group_cif . add_nodes ( [ self . ctx . cif ] ) if 'group_structure' in self . inputs : try : structure = self . ctx . structure except AttributeError : return self . ctx . exit_code else : self . inputs . group_structure . add_nodes ( [ structure ] ) self . out ( 'structure' , structure ) self . report ( 'workchain finished successfully' )
If successfully created add the cleaned CifData and StructureData as output nodes to the workchain .
137
20
7,046
def get_input_node ( cls , value ) : from aiida import orm if cls in ( orm . Bool , orm . Float , orm . Int , orm . Str ) : result = orm . QueryBuilder ( ) . append ( cls , filters = { 'attributes.value' : value } ) . first ( ) if result is None : node = cls ( value ) . store ( ) else : node = result [ 0 ] elif cls is orm . Dict : result = orm . QueryBuilder ( ) . append ( cls , filters = { 'attributes' : { '==' : value } } ) . first ( ) if result is None : node = cls ( dict = value ) . store ( ) else : node = result [ 0 ] else : raise NotImplementedError return node
Return a Node of a given class and given value .
186
11
7,047
def bind ( self , form ) : field = self . field ( default = self . default , * * self . field_kwargs ) form . _fields [ self . name ] = field . bind ( form , self . name , prefix = form . _prefix )
Bind to filters form .
56
5
7,048
def get_config_path ( ) : dir_path = ( os . getenv ( 'APPDATA' ) if os . name == "nt" else os . path . expanduser ( '~' ) ) return os . path . join ( dir_path , '.vtjp' )
Put together the default configuration path based on OS .
62
10
7,049
def print_table ( document , * columns ) : headers = [ ] for _ , header in columns : headers . append ( header ) table = [ ] for element in document : row = [ ] for item , _ in columns : if item in element : row . append ( element [ item ] ) else : row . append ( None ) table . append ( row ) print ( tabulate . tabulate ( table , headers ) )
Print json document as table
89
5
7,050
def print_trip_table ( document ) : headers = [ 'Alt.' , 'Name' , 'Time' , 'Track' , 'Direction' , 'Dest.' , 'Track' , 'Arrival' ] table = [ ] altnr = 0 for alternative in document : altnr += 1 first_trip_in_alt = True if not isinstance ( alternative [ 'Leg' ] , list ) : alternative [ 'Leg' ] = [ alternative [ 'Leg' ] ] for part in alternative [ 'Leg' ] : orig = part [ 'Origin' ] dest = part [ 'Destination' ] row = [ altnr if first_trip_in_alt else None , part [ 'name' ] , orig [ 'rtTime' ] if 'rtTime' in orig else orig [ 'time' ] , orig [ 'track' ] , part [ 'direction' ] if 'direction' in part else None , dest [ 'name' ] , dest [ 'track' ] , dest [ 'rtTime' ] if 'rtTime' in dest else dest [ 'time' ] , ] table . append ( row ) first_trip_in_alt = False print ( tabulate . tabulate ( table , headers ) )
Print trip table
262
3
7,051
def makeproperty ( ns , cls = None , name = None , docstring = '' , descendant = True ) : def get_property ( self ) : """ A generic property getter. """ if cls is None : xpath = '%s:%s' % ( ns , name ) else : xpath = '%s:%s' % ( ns , cls . __name__ ) xpath = self . _node . xpath ( xpath , namespaces = SLDNode . _nsmap ) if len ( xpath ) == 1 : if cls is None : return xpath [ 0 ] . text else : elem = cls . __new__ ( cls ) cls . __init__ ( elem , self , descendant = descendant ) return elem else : return None def set_property ( self , value ) : """ A generic property setter. """ if cls is None : xpath = '%s:%s' % ( ns , name ) else : xpath = '%s:%s' % ( ns , cls . __name__ ) xpath = self . _node . xpath ( xpath , namespaces = SLDNode . _nsmap ) if len ( xpath ) == 1 : if cls is None : xpath [ 0 ] . text = value else : xpath [ 0 ] = value . _node else : if cls is None : elem = self . _node . makeelement ( '{%s}%s' % ( SLDNode . _nsmap [ ns ] , name ) , nsmap = SLDNode . _nsmap ) elem . text = value self . _node . append ( elem ) else : self . _node . append ( value . _node ) def del_property ( self ) : """ A generic property deleter. """ if cls is None : xpath = '%s:%s' % ( ns , name ) else : xpath = '%s:%s' % ( ns , cls . __name__ ) xpath = self . _node . xpath ( xpath , namespaces = SLDNode . _nsmap ) if len ( xpath ) == 1 : self . _node . remove ( xpath [ 0 ] ) return property ( get_property , set_property , del_property , docstring )
Make a property on an instance of an SLDNode . If cls is omitted the property is assumed to be a text node with no corresponding class object . If name is omitted the property is assumed to be a complex node with a corresponding class wrapper .
512
51
7,052
def get_or_create_element ( self , ns , name ) : if len ( self . _node . xpath ( '%s:%s' % ( ns , name ) , namespaces = SLDNode . _nsmap ) ) == 1 : return getattr ( self , name ) return self . create_element ( ns , name )
Attempt to get the only child element from this SLDNode . If the node does not exist create the element attach it to the DOM and return the class object that wraps the node .
76
37
7,053
def create_element ( self , ns , name ) : elem = self . _node . makeelement ( '{%s}%s' % ( SLDNode . _nsmap [ ns ] , name ) , nsmap = SLDNode . _nsmap ) self . _node . append ( elem ) return getattr ( self , name )
Create an element as a child of this SLDNode .
79
12
7,054
def normalize ( self ) : for i , rnode in enumerate ( self . _nodes ) : rule = Rule ( self , i - 1 , descendant = False ) rule . normalize ( )
Normalize this node and all rules contained within . The SLD model is modified in place .
43
19
7,055
def validate ( self ) : self . normalize ( ) if self . _node is None : logging . debug ( 'The node is empty, and cannot be validated.' ) return False if self . _schema is None : self . _schema = XMLSchema ( self . _schemadoc ) is_valid = self . _schema . validate ( self . _node ) for msg in self . _schema . error_log : logging . info ( 'Line:%d, Column:%d -- %s' , msg . line , msg . column , msg . message ) return is_valid
Validate the current file against the SLD schema . This first normalizes the SLD document then validates it . Any schema validation error messages are logged at the INFO level .
130
36
7,056
def helper ( path ) : if sys . platform . startswith ( "win" ) : # link batch files src_path = os . path . join ( PHLB_BASE_DIR , "helper_cmd" ) elif sys . platform . startswith ( "linux" ) : # link shell scripts src_path = os . path . join ( PHLB_BASE_DIR , "helper_sh" ) else : print ( "TODO: %s" % sys . platform ) return if not os . path . isdir ( src_path ) : raise RuntimeError ( "Helper script path not found here: '%s'" % src_path ) for entry in scandir ( src_path ) : print ( "_" * 79 ) print ( "Link file: '%s'" % entry . name ) src = entry . path dst = os . path . join ( path , entry . name ) if os . path . exists ( dst ) : print ( "Remove old file '%s'" % dst ) try : os . remove ( dst ) except OSError as err : print ( "\nERROR:\n%s\n" % err ) continue print ( "source.....: '%s'" % src ) print ( "destination: '%s'" % dst ) try : os . link ( src , dst ) except OSError as err : print ( "\nERROR:\n%s\n" % err ) continue
link helper files to given path
316
6
7,057
def backup ( path , name = None ) : from PyHardLinkBackup . phlb . phlb_main import backup backup ( path , name )
Start a Backup run
32
4
7,058
def verify ( backup_path , fast ) : from PyHardLinkBackup . phlb . verify import verify_backup verify_backup ( backup_path , fast )
Verify a existing backup
37
5
7,059
def setup_package ( ) : import json from setuptools import setup , find_packages filename_setup_json = 'setup.json' filename_description = 'README.md' with open ( filename_setup_json , 'r' ) as handle : setup_json = json . load ( handle ) with open ( filename_description , 'r' ) as handle : description = handle . read ( ) setup ( include_package_data = True , packages = find_packages ( ) , setup_requires = [ 'reentry' ] , reentry_register = True , long_description = description , long_description_content_type = 'text/markdown' , * * setup_json )
Setup procedure .
150
3
7,060
def literal_to_dict ( value ) : if isinstance ( value , Literal ) : if value . language is not None : return { "@value" : str ( value ) , "@language" : value . language } return value . toPython ( ) elif isinstance ( value , URIRef ) : return { "@id" : str ( value ) } elif value is None : return None return str ( value )
Transform an object value into a dict readable value
90
9
7,061
def dict_to_literal ( dict_container : dict ) : if isinstance ( dict_container [ "@value" ] , int ) : return dict_container [ "@value" ] , else : return dict_container [ "@value" ] , dict_container . get ( "@language" , None )
Transforms a JSON + LD PyLD dictionary into an RDFLib object
65
15
7,062
def set_src_filepath ( self , src_dir_path ) : log . debug ( "set_src_filepath() with: '%s'" , src_dir_path ) self . abs_src_filepath = src_dir_path . resolved_path log . debug ( " * abs_src_filepath: %s" % self . abs_src_filepath ) if self . abs_src_filepath is None : log . info ( "Can't resolve source path: %s" , src_dir_path ) return self . sub_filepath = self . abs_src_filepath . relative_to ( self . abs_src_root ) log . debug ( " * sub_filepath: %s" % self . sub_filepath ) self . sub_path = self . sub_filepath . parent log . debug ( " * sub_path: %s" % self . sub_path ) self . filename = self . sub_filepath . name log . debug ( " * filename: %s" % self . filename ) self . abs_dst_path = Path2 ( self . abs_dst_root , self . sub_path ) log . debug ( " * abs_dst_path: %s" % self . abs_dst_path ) self . abs_dst_filepath = Path2 ( self . abs_dst_root , self . sub_filepath ) log . debug ( " * abs_dst_filepath: %s" % self . abs_dst_filepath ) self . abs_dst_hash_filepath = Path2 ( "%s%s%s" % ( self . abs_dst_filepath , os . extsep , phlb_config . hash_name ) ) log . debug ( " * abs_dst_hash_filepath: %s" % self . abs_dst_hash_filepath )
Set one filepath to backup this file . Called for every file in the source directory .
424
18
7,063
def _cryptodome_encrypt ( cipher_factory , plaintext , key , iv ) : encryptor = cipher_factory ( key , iv ) return encryptor . encrypt ( plaintext )
Use a Pycryptodome cipher factory to encrypt data .
43
12
7,064
def _cryptodome_decrypt ( cipher_factory , ciphertext , key , iv ) : decryptor = cipher_factory ( key , iv ) return decryptor . decrypt ( ciphertext )
Use a Pycryptodome cipher factory to decrypt data .
43
12
7,065
def _cryptography_encrypt ( cipher_factory , plaintext , key , iv ) : encryptor = cipher_factory ( key , iv ) . encryptor ( ) return encryptor . update ( plaintext ) + encryptor . finalize ( )
Use a cryptography cipher factory to encrypt data .
55
9
7,066
def _cryptography_decrypt ( cipher_factory , ciphertext , key , iv ) : decryptor = cipher_factory ( key , iv ) . decryptor ( ) return decryptor . update ( ciphertext ) + decryptor . finalize ( )
Use a cryptography cipher factory to decrypt data .
55
9
7,067
def generic_encrypt ( cipher_factory_map , plaintext , key , iv ) : if backend is None : raise PysnmpCryptoError ( 'Crypto backend not available' ) return _ENCRYPT_MAP [ backend ] ( cipher_factory_map [ backend ] , plaintext , key , iv )
Encrypt data using the available backend .
72
8
7,068
def generic_decrypt ( cipher_factory_map , ciphertext , key , iv ) : if backend is None : raise PysnmpCryptoError ( 'Crypto backend not available' ) return _DECRYPT_MAP [ backend ] ( cipher_factory_map [ backend ] , ciphertext , key , iv )
Decrypt data using the available backend .
71
8
7,069
def _prepare_disks ( self , disks_name ) : fstab = '/etc/fstab' for disk in tqdm ( disks_name . split ( ',' ) ) : sudo ( 'umount /dev/{0}' . format ( disk ) , warn_only = True ) if sudo ( 'mkfs.xfs -f /dev/{0}' . format ( disk ) , warn_only = True ) . failed : sudo ( 'apt-get update' ) sudo ( 'apt-get -y install xfsprogs' ) sudo ( 'mkfs.xfs -f /dev/{0}' . format ( disk ) ) sudo ( 'mkdir -p /srv/node/{0}' . format ( disk ) ) files . append ( fstab , '/dev/{0} /srv/node/{1} xfs noatime,nodiratime,nobarrier,logbufs=8 0 2' . format ( disk , disk ) , use_sudo = True ) sudo ( 'mount /srv/node/{0}' . format ( disk ) )
format disks to xfs and mount it
249
8
7,070
def load_file ( cls , file_path ) : with open ( os . path . abspath ( file_path ) , 'rt' ) as f : s = Sudoku ( f . read ( ) . strip ( ) ) return s
Load a Sudoku from file .
52
7
7,071
def _parse_from_string ( string_input ) : # Check if comment line is present. read_lines = list ( filter ( None , string_input . split ( '\n' ) ) ) if read_lines [ 0 ] . startswith ( '#' ) : comment = read_lines . pop ( 0 ) else : comment = '' if len ( read_lines ) > 1 : # Assume that Sudoku is defined over several rows. order = int ( math . sqrt ( len ( read_lines ) ) ) else : # Sudoku is defined on one line. order = int ( math . sqrt ( math . sqrt ( len ( read_lines [ 0 ] ) ) ) ) read_lines = filter ( lambda x : len ( x ) == ( order ** 2 ) , [ read_lines [ 0 ] [ i : ( i + order ** 2 ) ] for i in utils . range_ ( len ( read_lines [ 0 ] ) ) if i % ( order ** 2 ) == 0 ] ) matrix = utils . get_list_of_lists ( order ** 2 , order ** 2 , fill_with = 0 ) for i , line in enumerate ( read_lines ) : line = line . strip ( ) for j , value in enumerate ( line ) : if value . isdigit ( ) and int ( value ) : matrix [ i ] [ j ] = int ( value ) else : matrix [ i ] [ j ] = 0 return order , comment , matrix
Parses a Sudoku instance from string input .
321
11
7,072
def row_iter ( self ) : for k in utils . range_ ( self . side ) : yield self . row ( k )
Get an iterator over all rows in the Sudoku
29
10
7,073
def col_iter ( self ) : for k in utils . range_ ( self . side ) : yield self . col ( k )
Get an iterator over all columns in the Sudoku
29
10
7,074
def box ( self , row , col ) : box = [ ] box_i = ( row // self . order ) * self . order box_j = ( col // self . order ) * self . order for i in utils . range_ ( box_i , box_i + self . order ) : for j in utils . range_ ( box_j , box_j + self . order ) : box . append ( self [ i ] [ j ] ) return box
Get the values of the box pertaining to the specified row and column of the Sudoku
102
17
7,075
def box_iter ( self ) : for i in utils . range_ ( self . order ) : for j in utils . range_ ( self . order ) : yield self . box ( i * 3 , j * 3 )
Get an iterator over all boxes in the Sudoku
49
10
7,076
def set_cell ( self , i , j , value ) : bool_tests = [ value in self . _possibles [ i ] [ j ] , value in self . _poss_rows [ i ] , value in self . _poss_cols [ j ] , value in self . _poss_box [ ( i // self . order ) * self . order + ( j // self . order ) ] , value not in self . row ( i ) , value not in self . col ( j ) , value not in self . box ( i , j ) ] if all ( bool_tests ) : self [ i ] [ j ] = value else : raise SudokuHasNoSolutionError ( "This value cannot be set here!" )
Set a cell s value with a series of safety checks
160
11
7,077
def solve ( self , verbose = False , allow_brute_force = True ) : while not self . is_solved : # Update possibles arrays. self . _update ( ) # See if any position can be singled out. singles_found = False or self . _fill_naked_singles ( ) or self . _fill_hidden_singles ( ) # If singles_found is False, then no new uniquely defined cells were found # and this solver cannot solve the Sudoku. We either use brute force or throw an error. # Else, if singles_found is True, run another iteration to see if new singles have shown up. if not singles_found : if allow_brute_force : solution = None try : dlxs = DancingLinksSolver ( copy . deepcopy ( self . _matrix ) ) solutions = dlxs . solve ( ) solution = next ( solutions ) more_solutions = next ( solutions ) except StopIteration as e : if solution is not None : self . _matrix = solution else : raise SudokuHasNoSolutionError ( "Dancing Links solver could not find any solution." ) except Exception as e : raise SudokuHasNoSolutionError ( "Brute Force method failed." ) else : # We end up here if the second `next(solutions)` works, # i.e. if multiple solutions exist. raise SudokuHasMultipleSolutionsError ( "This Sudoku has multiple solutions!" ) self . solution_steps . append ( "BRUTE FORCE - Dancing Links" ) break else : print ( self ) raise SudokuTooDifficultError ( "This Sudoku requires more advanced methods!" ) if verbose : print ( "Sudoku solved in {0} iterations!\n{1}" . format ( len ( self . solution_steps ) , self ) ) for step in self . solution_steps : print ( step )
Solve the Sudoku .
408
6
7,078
def _update ( self ) : # Update possible values in each row, column and box. for i , ( row , col , box ) in enumerate ( zip ( self . row_iter ( ) , self . col_iter ( ) , self . box_iter ( ) ) ) : self . _poss_rows [ i ] = set ( self . _values ) . difference ( set ( row ) ) self . _poss_cols [ i ] = set ( self . _values ) . difference ( set ( col ) ) self . _poss_box [ i ] = set ( self . _values ) . difference ( set ( box ) ) # Iterate over the entire Sudoku and combine information about possible values # from rows, columns and boxes to get a set of possible values for each cell. for i in utils . range_ ( self . side ) : self . _possibles [ i ] = { } for j in utils . range_ ( self . side ) : self . _possibles [ i ] [ j ] = set ( ) if self [ i ] [ j ] > 0 : continue this_box_index = ( ( i // self . order ) * self . order ) + ( j // self . order ) self . _possibles [ i ] [ j ] = self . _poss_rows [ i ] . intersection ( self . _poss_cols [ j ] ) . intersection ( self . _poss_box [ this_box_index ] )
Calculate remaining values for each row column box and finally cell .
323
14
7,079
def _fill_naked_singles ( self ) : simple_found = False for i in utils . range_ ( self . side ) : for j in utils . range_ ( self . side ) : if self [ i ] [ j ] > 0 : continue p = self . _possibles [ i ] [ j ] if len ( p ) == 1 : self . set_cell ( i , j , list ( p ) [ 0 ] ) self . solution_steps . append ( self . _format_step ( "NAKED" , ( i , j ) , self [ i ] [ j ] ) ) simple_found = True elif len ( p ) == 0 : raise SudokuHasNoSolutionError ( "Error made! No possible value for ({0},{1})!" . format ( i + 1 , j + 1 ) ) return simple_found
Look for naked singles i . e . cells with ony one possible value .
187
16
7,080
def _fill_hidden_singles ( self ) : for i in utils . range_ ( self . side ) : box_i = ( i // self . order ) * self . order for j in utils . range_ ( self . side ) : box_j = ( j // self . order ) * self . order # Skip if this cell is determined already. if self [ i ] [ j ] > 0 : continue # Look for hidden single in rows. p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == j : continue p = p . difference ( self . _possibles [ i ] [ k ] ) if len ( p ) == 1 : # Found a hidden single in a row! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-ROW" , ( i , j ) , self [ i ] [ j ] ) ) return True # Look for hidden single in columns p = self . _possibles [ i ] [ j ] for k in utils . range_ ( self . side ) : if k == i : continue p = p . difference ( self . _possibles [ k ] [ j ] ) if len ( p ) == 1 : # Found a hidden single in a column! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-COL" , ( i , j ) , self [ i ] [ j ] ) ) return True # Look for hidden single in box p = self . _possibles [ i ] [ j ] for k in utils . range_ ( box_i , box_i + self . order ) : for kk in utils . range_ ( box_j , box_j + self . order ) : if k == i and kk == j : continue p = p . difference ( self . _possibles [ k ] [ kk ] ) if len ( p ) == 1 : # Found a hidden single in a box! self . set_cell ( i , j , p . pop ( ) ) self . solution_steps . append ( self . _format_step ( "HIDDEN-BOX" , ( i , j ) , self [ i ] [ j ] ) ) return True return False
Look for hidden singles i . e . cells with only one unique possible value in row column or box .
531
21
7,081
def parse ( cls , resource , direction = "children" , * * additional_parameters ) -> "DtsCollection" : data = jsonld . expand ( resource ) if len ( data ) == 0 : raise JsonLdCollectionMissing ( "Missing collection in JSON" ) data = data [ 0 ] obj = cls ( identifier = resource [ "@id" ] , * * additional_parameters ) obj . _parse_metadata ( data ) obj . _parse_members ( data , direction = direction , * * additional_parameters ) return obj
Given a dict representation of a json object generate a DTS Collection
118
13
7,082
def _filldown ( self , lineno ) : if self . line > lineno : # XXX decorated functions make us jump backwards. # understand this more return self . lines . extend ( self . current_context for _ in range ( self . line , lineno ) ) self . line = lineno
Copy current_context into lines down up until lineno
63
11
7,083
def _add_section ( self , node ) : self . _filldown ( node . lineno ) # push a new context onto stack self . context . append ( node . name ) self . _update_current_context ( ) for _ in map ( self . visit , iter_child_nodes ( node ) ) : pass # restore current context self . context . pop ( ) self . _update_current_context ( )
Register the current node as a new context block
91
9
7,084
def _module_name ( filename ) : absfile = os . path . abspath ( filename ) match = filename for base in [ '' ] + sys . path : base = os . path . abspath ( base ) if absfile . startswith ( base ) : match = absfile [ len ( base ) : ] break return SUFFIX_RE . sub ( '' , match ) . lstrip ( '/' ) . replace ( '/' , '.' )
Try to find a module name for a file path by stripping off a prefix found in sys . modules .
98
21
7,085
def from_modulename ( cls , module_name ) : # XXX make this more robust (pyc files? zip archives? etc) slug = module_name . replace ( '.' , '/' ) paths = [ slug + '.py' , slug + '/__init__.py' ] # always search from current directory for base in [ '' ] + sys . path : for path in paths : fullpath = os . path . join ( base , path ) if os . path . exists ( fullpath ) : return cls ( fullpath , prefix = module_name ) else : raise ValueError ( "Module not found: %s" % module_name )
Build a PythonFile given a dotted module name like a . b . c
142
15
7,086
def context_range ( self , context ) : if not context . startswith ( self . prefix ) : context = self . prefix + '.' + context lo = hi = None for idx , line_context in enumerate ( self . lines , 1 ) : # context is hierarchical -- context spans itself # and any suffix. if line_context . startswith ( context ) : lo = lo or idx hi = idx if lo is None : raise ValueError ( "Context %s does not exist in file %s" % ( context , self . filename ) ) return lo , hi + 1
Return the 1 - offset right - open range of lines spanned by a particular context name .
127
19
7,087
def context ( self , line ) : # XXX due to a limitation in Visitor, # non-python code after the last python code # in a file is not added to self.lines, so we # have to guard against IndexErrors. idx = line - 1 if idx >= len ( self . lines ) : return self . prefix return self . lines [ idx ]
Return the context for a given 1 - offset line number .
80
12
7,088
def write ( label , plist , scope = USER ) : fname = compute_filename ( label , scope ) with open ( fname , "wb" ) as f : plistlib . writePlist ( plist , f ) return fname
Writes the given property list to the appropriate file on disk and returns the absolute filename .
54
18
7,089
def alphakt_pth ( v , temp , v0 , alpha0 , k0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : return alpha0 * k0 * ( temp - t_ref )
calculate thermal pressure from thermal expansion and bulk modulus
59
12
7,090
def _get_output_nodes ( self , output_path , error_path ) : status = cod_deposition_states . UNKNOWN messages = [ ] if output_path is not None : content = None with open ( output_path ) as f : content = f . read ( ) status , message = CifCodDepositParser . _deposit_result ( content ) messages . extend ( message . split ( '\n' ) ) if error_path is not None : with open ( error_path ) as f : content = f . readlines ( ) lines = [ x . strip ( '\n' ) for x in content ] messages . extend ( lines ) parameters = { 'output_messages' : messages , 'status' : status } output_nodes = [ ] output_nodes . append ( ( 'messages' , Dict ( dict = parameters ) ) ) if status == cod_deposition_states . SUCCESS : return True , output_nodes return False , output_nodes
Extracts output nodes from the standard output and standard error files
220
13
7,091
def filter_ ( self , columns , value ) : for column in columns : if column not in self . data . columns : raise ValueError ( "Column %s not in DataFrame columns: %s" % ( column , list ( self . data ) ) ) for column in columns : # Filtering on empty data series doesn't make sense at all and also would raise an error column_len = len ( self . data [ column ] ) if column_len > 0 and column_len != self . data [ column ] . isnull ( ) . sum ( ) : self . data = self . data [ self . data [ column ] != value ] return self . data
This method filter some of the rows where the value is found in each of the columns .
140
18
7,092
def _check_directory ( directory ) : if directory is not None : if not exists ( directory ) : raise CommandError ( "Cannot run command - directory {0} does not exist" . format ( directory ) ) if not isdir ( directory ) : raise CommandError ( "Cannot run command - specified directory {0} is not a directory." . format ( directory ) )
Raise exception if directory does not exist .
80
9
7,093
def load_tweets ( filename = 'tweets.zip' ) : basename , ext = os . path . splitext ( filename ) json_file = basename + '.json' json_path = os . path . join ( DATA_PATH , json_file ) zip_path = os . path . join ( DATA_PATH , basename + '.zip' ) if not os . path . isfile ( json_path ) : zf = ZipFile ( zip_path , 'r' ) zf . extract ( json_file , DATA_PATH ) with open ( json_path , 'rUb' ) as f : return json . load ( f )
r Extract the cached tweets database if necessary and load + parse the json .
145
15
7,094
def main ( args ) : global logging , log args = parse_args ( args ) logging . basicConfig ( format = LOG_FORMAT , level = logging . DEBUG if args . verbose else logging . INFO , stream = sys . stdout ) df = cat_tweets ( path = args . path , verbosity = args . verbose + 1 , numtweets = args . numtweets , ignore_suspicious = False ) log . info ( 'Combined {} tweets' . format ( len ( df ) ) ) df = drop_nan_columns ( df ) save_tweets ( df , path = args . path , filename = args . tweetfile ) geo = get_geo ( df , path = args . path , filename = args . geofile ) log . info ( "Combined {} tweets into a single file {} and set asside {} geo tweets in {}" . format ( len ( df ) , args . tweetfile , len ( geo ) , args . geofile ) ) return df , geo
API with args object containing configuration parameters
222
7
7,095
def drop_nan_columns ( df , thresh = 325 ) : if thresh < 1 : thresh = int ( thresh * df ) return df . dropna ( axis = 1 , thresh = thresh , inplace = False )
Drop columns that are mostly NaNs
53
7
7,096
def fast_deduplication_backup ( self , old_backup_entry , process_bar ) : # TODO: merge code with parts from deduplication_backup() src_path = self . dir_path . resolved_path log . debug ( "*** fast deduplication backup: '%s'" , src_path ) old_file_path = old_backup_entry . get_backup_path ( ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : try : old_file_path . link ( self . path_helper . abs_dst_filepath ) # call os.link() except OSError as err : log . error ( "Can't link '%s' to '%s': %s" % ( old_file_path , self . path_helper . abs_dst_filepath , err ) ) log . info ( "Mark %r with 'no link source'." , old_backup_entry ) old_backup_entry . no_link_source = True old_backup_entry . save ( ) # do a normal copy backup self . deduplication_backup ( process_bar ) return hash_hexdigest = old_backup_entry . content_info . hash_hexdigest hash_file . write ( hash_hexdigest ) file_size = self . dir_path . stat . st_size if file_size > 0 : # tqdm will not accept 0 bytes files ;) process_bar . update ( file_size ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) if self . _SIMULATE_SLOW_SPEED : log . error ( "Slow down speed for tests!" ) time . sleep ( self . _SIMULATE_SLOW_SPEED ) self . fast_backup = True # Was a fast backup used? self . file_linked = True
We can just link a old backup entry
604
8
7,097
def deduplication_backup ( self , process_bar ) : self . fast_backup = False # Was a fast backup used? src_path = self . dir_path . resolved_path log . debug ( "*** deduplication backup: '%s'" , src_path ) log . debug ( "abs_src_filepath: '%s'" , self . path_helper . abs_src_filepath ) log . debug ( "abs_dst_filepath: '%s'" , self . path_helper . abs_dst_filepath ) log . debug ( "abs_dst_hash_filepath: '%s'" , self . path_helper . abs_dst_hash_filepath ) log . debug ( "abs_dst_dir: '%s'" , self . path_helper . abs_dst_path ) if not self . path_helper . abs_dst_path . is_dir ( ) : try : self . path_helper . abs_dst_path . makedirs ( mode = phlb_config . default_new_path_mode ) except OSError as err : raise BackupFileError ( "Error creating out path: %s" % err ) else : assert not self . path_helper . abs_dst_filepath . is_file ( ) , ( "Out file already exists: %r" % self . path_helper . abs_src_filepath ) try : try : with self . path_helper . abs_src_filepath . open ( "rb" ) as in_file : with self . path_helper . abs_dst_hash_filepath . open ( "w" ) as hash_file : with self . path_helper . abs_dst_filepath . open ( "wb" ) as out_file : hash = self . _deduplication_backup ( self . dir_path , in_file , out_file , process_bar ) hash_hexdigest = hash . hexdigest ( ) hash_file . write ( hash_hexdigest ) except OSError as err : # FIXME: Better error message raise BackupFileError ( "Skip file %s error: %s" % ( self . path_helper . abs_src_filepath , err ) ) except KeyboardInterrupt : # Try to remove created files try : self . path_helper . abs_dst_filepath . unlink ( ) except OSError : pass try : self . path_helper . abs_dst_hash_filepath . unlink ( ) except OSError : pass raise KeyboardInterrupt old_backup_entry = deduplicate ( self . path_helper . abs_dst_filepath , hash_hexdigest ) if old_backup_entry is None : log . debug ( "File is unique." ) self . file_linked = False # Was a hardlink used? else : log . debug ( "File was deduplicated via hardlink to: %s" % old_backup_entry ) self . file_linked = True # Was a hardlink used? # set origin access/modified times to the new created backup file atime_ns = self . dir_path . stat . st_atime_ns mtime_ns = self . dir_path . stat . st_mtime_ns self . path_helper . abs_dst_filepath . utime ( ns = ( atime_ns , mtime_ns ) ) # call os.utime() log . debug ( "Set mtime to: %s" % mtime_ns ) BackupEntry . objects . create ( backup_run = self . backup_run , backup_entry_path = self . path_helper . abs_dst_filepath , hash_hexdigest = hash_hexdigest , ) self . fast_backup = False
Backup the current file and compare the content .
871
10
7,098
def _backup_dir_item ( self , dir_path , process_bar ) : self . path_helper . set_src_filepath ( dir_path ) if self . path_helper . abs_src_filepath is None : self . total_errored_items += 1 log . info ( "Can't backup %r" , dir_path ) # self.summary(no, dir_path.stat.st_mtime, end=" ") if dir_path . is_symlink : self . summary ( "TODO Symlink: %s" % dir_path ) return if dir_path . resolve_error is not None : self . summary ( "TODO resolve error: %s" % dir_path . resolve_error ) pprint_path ( dir_path ) return if dir_path . different_path : self . summary ( "TODO different path:" ) pprint_path ( dir_path ) return if dir_path . is_dir : self . summary ( "TODO dir: %s" % dir_path ) elif dir_path . is_file : # self.summary("Normal file: %s", dir_path) file_backup = FileBackup ( dir_path , self . path_helper , self . backup_run ) old_backup_entry = self . fast_compare ( dir_path ) if old_backup_entry is not None : # We can just link the file from a old backup file_backup . fast_deduplication_backup ( old_backup_entry , process_bar ) else : file_backup . deduplication_backup ( process_bar ) assert file_backup . fast_backup is not None , dir_path . path assert file_backup . file_linked is not None , dir_path . path file_size = dir_path . stat . st_size if file_backup . file_linked : # os.link() was used self . total_file_link_count += 1 self . total_stined_bytes += file_size else : self . total_new_file_count += 1 self . total_new_bytes += file_size if file_backup . fast_backup : self . total_fast_backup += 1 else : self . summary ( "TODO:" % dir_path ) pprint_path ( dir_path )
Backup one dir item
534
5
7,099
def print_update ( self ) : print ( "\r\n" ) now = datetime . datetime . now ( ) print ( "Update info: (from: %s)" % now . strftime ( "%c" ) ) current_total_size = self . total_stined_bytes + self . total_new_bytes if self . total_errored_items : print ( " * WARNING: %i omitted files!" % self . total_errored_items ) print ( " * fast backup: %i files" % self . total_fast_backup ) print ( " * new content saved: %i files (%s %.1f%%)" % ( self . total_new_file_count , human_filesize ( self . total_new_bytes ) , to_percent ( self . total_new_bytes , current_total_size ) , ) ) print ( " * stint space via hardlinks: %i files (%s %.1f%%)" % ( self . total_file_link_count , human_filesize ( self . total_stined_bytes ) , to_percent ( self . total_stined_bytes , current_total_size ) , ) ) duration = default_timer ( ) - self . start_time performance = current_total_size / duration / 1024.0 / 1024.0 print ( " * present performance: %.1fMB/s\n" % performance )
print some status information in between .
312
7