idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
47,900
def get_submodules ( module ) : if not inspect . ismodule ( module ) : raise RuntimeError ( 'Can only extract submodules from a module object, ' 'for example imported via importlib.import_module' ) submodules = get_members ( module , inspect . ismodule ) module_path = list ( getattr ( module , '__path__' , [ None ] ) ) [ 0 ] if module_path is not None : for item in listdir ( module_path ) : module_name = extract_module_name ( osp . join ( module_path , item ) ) if module_name is not None : try : submodules [ module_name ] = importlib . import_module ( '.{}' . format ( module_name ) , package = module . __name__ ) except ImportError : pass return submodules
This function imports all sub - modules of the supplied module and returns a dictionary with module names as keys and the sub - module objects as values . If the supplied parameter is not a module object a RuntimeError is raised .
47,901
def extract_module_name ( absolute_path ) : base_name = osp . basename ( osp . normpath ( absolute_path ) ) if base_name [ 0 ] in ( '.' , '_' ) : return None if osp . isdir ( absolute_path ) : return base_name module_name , extension = osp . splitext ( base_name ) if extension == '.py' : return module_name return None
This function tries to extract a valid module name from the basename of the supplied path . If it s a directory the directory name is returned if it s a file the file name without extension is returned . If the basename starts with _ or . or it s a file with an ending different from . py the function returns None
47,902
def dict_strict_update ( base_dict , update_dict ) : additional_keys = set ( update_dict . keys ( ) ) - set ( base_dict . keys ( ) ) if len ( additional_keys ) > 0 : raise RuntimeError ( 'The update dictionary contains keys that are not part of ' 'the base dictionary: {}' . format ( str ( additional_keys ) ) , additional_keys ) base_dict . update ( update_dict )
This function updates base_dict with update_dict if and only if update_dict does not contain keys that are not already in base_dict . It is essentially a more strict interpretation of the term updating the dict .
47,903
def format_doc_text ( text ) : return '\n' . join ( textwrap . fill ( line , width = 99 , initial_indent = ' ' , subsequent_indent = ' ' ) for line in inspect . cleandoc ( text ) . splitlines ( ) )
A very thin wrapper around textwrap . fill to consistently wrap documentation text for display in a command line environment . The text is wrapped to 99 characters with an indentation depth of 4 spaces . Each line is wrapped independently in order to preserve manually added line breaks .
47,904
def do_import ( self , * names ) : try : module_object = importlib . import_module ( self . _module ) objects = tuple ( getattr ( module_object , name ) for name in names ) except ImportError : def failing_init ( obj , * args , ** kwargs ) : raise self . _exception objects = tuple ( type ( name , ( object , ) , { '__init__' : failing_init } ) for name in names ) return objects if len ( objects ) != 1 else objects [ 0 ]
Tries to import names from the module specified on initialization of the FromOptionalDependency - object . In case an ImportError occurs the requested names are replaced with stub objects .
47,905
def get_api ( self ) : return { 'class' : type ( self . _object ) . __name__ , 'methods' : list ( self . _function_map . keys ( ) ) }
This method returns the class name and a list of exposed methods . It is exposed to RPC - clients by an instance of ExposedObjectCollection .
47,906
def remove_object ( self , name ) : if name not in self . _object_map : raise RuntimeError ( 'No object with name {} is registered.' . format ( name ) ) for fn_name in list ( self . _function_map . keys ( ) ) : if fn_name . startswith ( name + '.' ) or fn_name . startswith ( name + ':' ) : self . _remove_function ( fn_name ) del self . _object_map [ name ]
Remove the object exposed under that name . If no object is registered under the supplied name a RuntimeError is raised .
47,907
def set_set_point ( self , param ) : if self . temperature_low_limit <= param <= self . temperature_high_limit : self . set_point_temperature = param return ""
Sets the target temperature .
47,908
def set_circulating ( self , param ) : if param == 0 : self . is_circulating = param self . circulate_commanded = False elif param == 1 : self . is_circulating = param self . circulate_commanded = True return ""
Sets whether to circulate - in effect whether the heater is on .
47,909
def get_protocols ( self , device ) : return self . _reg . device_builder ( device , self . _rv ) . protocols
Returns a list of available protocols for the specified device .
47,910
def linear ( current , target , rate , dt ) : sign = ( target > current ) - ( target < current ) if not sign : return current new_value = current + sign * rate * dt if sign * new_value > sign * target : return target return new_value
This function returns the new value after moving towards target at the given speed constantly for the time dt .
47,911
def meta ( self ) : if not self . _pv . meta_data_property or not self . _meta_target : return { } return getattr ( self . _meta_target , self . _pv . meta_data_property )
Value of the bound meta - property on the target .
47,912
def doc ( self ) : return self . _pv . doc or inspect . getdoc ( getattr ( type ( self . _target ) , self . _pv . property , None ) ) or ''
Docstring of property on target or override specified on PV - object .
47,913
def bind ( self , * targets ) : self . property = 'value' self . meta_data_property = 'meta' return BoundPV ( self , self . _get_target ( self . property , * targets ) , self . _get_target ( self . meta_data_property , * targets ) )
Tries to bind the PV to one of the supplied targets . Targets are inspected according to the order in which they are supplied .
47,914
def _get_callable ( self , func , * targets ) : if not callable ( func ) : func_name = func func = next ( ( getattr ( obj , func , None ) for obj in targets if func in dir ( obj ) ) , None ) if not func : raise AttributeError ( 'No method with the name \'{}\' could be found on any of the target objects ' '(device, interface). Please check the spelling.' . format ( func_name ) ) return func
If func is already a callable it is returned directly . If it s a string it is assumed to be a method on one of the objects supplied in targets and that is returned . If no method with the specified name is found an AttributeError is raised .
47,915
def _function_has_n_args ( self , func , n ) : if inspect . ismethod ( func ) : n += 1 argspec = inspect . getargspec ( func ) defaults = argspec . defaults or ( ) return len ( argspec . args ) - len ( defaults ) == n
Returns true if func has n arguments . Arguments with default and self for methods are not considered .
47,916
def start_server ( self ) : if self . _server is None : self . _server = SimpleServer ( ) self . _server . createPV ( prefix = self . _options . prefix , pvdb = { k : v . config for k , v in self . interface . bound_pvs . items ( ) } ) self . _driver = PropertyExposingDriver ( interface = self . interface , device_lock = self . device_lock ) self . _driver . process_pv_updates ( force = True ) self . log . info ( 'Started serving PVs: %s' , ', ' . join ( ( self . _options . prefix + pv for pv in self . interface . bound_pvs . keys ( ) ) ) )
Creates a pcaspy - server .
47,917
def handle ( self , cycle_delay = 0.1 ) : if self . _server is not None : self . _server . process ( cycle_delay ) self . _driver . process_pv_updates ( )
Call this method to spend about cycle_delay seconds processing requests in the pcaspy server . Under load for example when running caget at a high frequency the actual time spent in the method may be much shorter . This effect is not corrected for .
47,918
def _make_request ( self , method , * args ) : response , request_id = self . _connection . json_rpc ( self . _prefix + method , * args ) if 'id' not in response : raise ProtocolException ( 'JSON-RPC response does not contain ID field.' ) if response [ 'id' ] != request_id : raise ProtocolException ( 'ID of JSON-RPC request ({}) did not match response ({}).' . format ( request_id , response [ 'id' ] ) ) if 'result' in response : return response [ 'result' ] if 'error' in response : if 'data' in response [ 'error' ] : exception_type = response [ 'error' ] [ 'data' ] [ 'type' ] exception_message = response [ 'error' ] [ 'data' ] [ 'message' ] if not hasattr ( exceptions , exception_type ) : raise RemoteException ( exception_type , exception_message ) else : exception = getattr ( exceptions , exception_type ) raise exception ( exception_message ) else : raise ProtocolException ( response [ 'error' ] [ 'message' ] )
This method performs a JSON - RPC request via the object s ZMQ socket . If successful the result is returned otherwise exceptions are raised . Server side exceptions are raised using the same type as on the server if they are part of the exceptions - module . Otherwise a RemoteException is raised .
47,919
def get_status ( self ) : self . device . serial_command_mode = True Tarray = [ 0x80 ] * 10 Tarray [ 0 ] = { 'stopped' : 0x01 , 'heat' : 0x10 , 'cool' : 0x20 , 'hold' : 0x30 , } . get ( self . device . _csm . state , 0x01 ) if Tarray [ 0 ] == 0x30 and self . device . hold_commanded : Tarray [ 0 ] = 0x50 if self . device . pump_overspeed : Tarray [ 1 ] |= 0x01 Tarray [ 2 ] = 0x80 + self . device . pump_speed Tarray [ 6 : 10 ] = [ ord ( x ) for x in "%04x" % ( int ( self . device . temperature * 10 ) & 0xFFFF ) ] return '' . join ( chr ( c ) for c in Tarray )
Models T Command functionality of device .
47,920
def set_rate ( self , param ) : rate = int ( param ) if 1 <= rate <= 15000 : self . device . temperature_rate = rate / 100.0 return ""
Models Rate Command functionality of device .
47,921
def set_limit ( self , param ) : limit = int ( param ) if - 2000 <= limit <= 6000 : self . device . temperature_limit = limit / 10.0 return ""
Models Limit Command functionality of device .
47,922
def pump_command ( self , param ) : lookup = [ c for c in "0123456789:;<=>?@ABCDEFGHIJKLMN" ] if param == "a0" : self . device . pump_manual_mode = False elif param == "m0" : self . device . pump_manual_mode = True elif param in lookup : self . device . manual_target_speed = lookup . index ( param ) return ""
Models LNP Pump Commands functionality of device .
47,923
def set_context ( self , new_context ) : self . _context = new_context if hasattr ( self , '_set_logging_context' ) : self . _set_logging_context ( self . _context )
Assigns the new context to the member variable _context .
47,924
def _initialize_data ( self ) : self . serial_command_mode = False self . pump_overspeed = False self . start_commanded = False self . stop_commanded = False self . hold_commanded = False self . temperature_rate = 5.0 self . temperature_limit = 0.0 self . pump_speed = 0 self . temperature = 24.0 self . pump_manual_mode = False self . manual_target_speed = 0
This method is called once on construction . After that it may be manually called again to reset the device to its default state .
47,925
def has_log ( target ) : logger_name = target . __name__ def get_logger_name ( context = None ) : log_names = [ root_logger_name , logger_name ] if context is not None : log_names . insert ( 1 , context if isinstance ( context , string_types ) else context . __class__ . __name__ ) return '.' . join ( log_names ) def _set_logging_context ( obj , context ) : obj . log . name = get_logger_name ( context ) target . log = logging . getLogger ( get_logger_name ( ) ) target . _set_logging_context = _set_logging_context return target
This is a decorator to add logging functionality to a class or function .
47,926
def get_usage_text ( parser , indent = None ) : usage_text = StringIO ( ) parser . print_help ( usage_text ) usage_string = usage_text . getvalue ( ) if indent is None : return usage_string return '\n' . join ( [ ' ' * indent + line for line in usage_string . split ( '\n' ) ] )
This small helper function extracts the help information from an ArgumentParser instance and indents the text by the number of spaces supplied in the indent - argument .
47,927
def stop ( self ) : self . _target = self . position self . log . info ( 'Stopping movement after user request.' ) return self . target , self . position
Stops the motor and returns the new target and position which are equal
47,928
def run_simulation ( argument_list = None ) : try : arguments = parser . parse_args ( argument_list or sys . argv [ 1 : ] ) if arguments . version : print ( __version__ ) return if arguments . relaxed_versions : print ( 'Unknown option --relaxed-versions. Did you mean --ignore-versions?' ) return loglevel = 'debug' if arguments . verify else arguments . output_level if loglevel != 'none' : logging . basicConfig ( level = getattr ( logging , loglevel . upper ( ) ) , format = default_log_format ) if arguments . add_path is not None : additional_path = os . path . abspath ( arguments . add_path ) logging . getLogger ( ) . debug ( 'Extending path with: %s' , additional_path ) sys . path . append ( additional_path ) strict_versions = use_strict_versions ( arguments . strict_versions , arguments . ignore_versions ) simulation_factory = SimulationFactory ( arguments . device_package , strict_versions ) if not arguments . device : devices = [ 'Please specify a device to simulate. The following devices are available:' ] for dev in simulation_factory . devices : devices . append ( ' ' + dev ) print ( '\n' . join ( devices ) ) return if arguments . list_protocols : print ( '\n' . join ( simulation_factory . get_protocols ( arguments . device ) ) ) return protocols = parse_adapter_options ( arguments . adapter_options ) if not arguments . no_interface else { } simulation = simulation_factory . create ( arguments . device , arguments . setup , protocols , arguments . rpc_host ) if arguments . show_interface : print ( simulation . _adapters . documentation ( ) ) return if arguments . list_adapter_options : configurations = simulation . _adapters . configuration ( ) for protocol , options in configurations . items ( ) : print ( '{}:' . format ( protocol ) ) for opt , val in options . items ( ) : print ( ' {} = {}' . format ( opt , val ) ) return simulation . cycle_delay = arguments . cycle_delay simulation . speed = arguments . speed if not arguments . verify : try : simulation . start ( ) except KeyboardInterrupt : print ( '\nInterrupt received; shutting down. Goodbye, cruel world!' ) simulation . log . critical ( 'Simulation aborted by user interaction' ) finally : simulation . stop ( ) except LewisException as e : print ( '\n' . join ( ( 'An error occurred:' , str ( e ) ) ) )
This is effectively the main function of a typical simulation run . Arguments passed in are parsed and used to construct and run the simulation .
47,929
def map_arguments ( self , arguments ) : if self . argument_mappings is None : return arguments return [ f ( a ) for f , a in zip ( self . argument_mappings , arguments ) ]
Returns the mapped function arguments . If no mapping functions are defined the arguments are returned as they were supplied .
47,930
def map_return_value ( self , return_value ) : if callable ( self . return_mapping ) : return self . return_mapping ( return_value ) if self . return_mapping is not None : return self . return_mapping return return_value
Returns the mapped return_value of a processed request . If no return_mapping has been defined the value is returned as is . If return_mapping is a static value that value is returned ignoring return_value completely .
47,931
def start_server ( self ) : if self . _server is None : if self . _options . telnet_mode : self . interface . in_terminator = '\r\n' self . interface . out_terminator = '\r\n' self . _server = StreamServer ( self . _options . bind_address , self . _options . port , self . interface , self . device_lock )
Starts the TCP stream server binding to the configured host and port . Host and port are configured via the command line arguments .
47,932
def handle ( self , cycle_delay = 0.1 ) : asyncore . loop ( cycle_delay , count = 1 ) self . _server . process ( int ( cycle_delay * 1000 ) )
Spend approximately cycle_delay seconds to process requests to the server .
47,933
def _override_data ( self , overrides ) : if overrides is not None : for name , val in overrides . items ( ) : self . log . debug ( 'Trying to override initial data (%s=%s)' , name , val ) if name not in dir ( self ) : raise AttributeError ( 'Can not override non-existing attribute' '\'{}\' of class \'{}\'.' . format ( name , type ( self ) . __name__ ) ) setattr ( self , name , val )
This method overrides data members of the class but does not allow for adding new members .
47,934
def get ( self , addr , count ) : addr -= self . _start_addr data = self . _data [ addr : addr + count ] if len ( data ) != count : addr += self . _start_addr raise IndexError ( "Invalid address range [{:#06x} - {:#06x}]" . format ( addr , addr + count ) ) return data
Read list of count values at addr memory location in DataBank .
47,935
def set ( self , addr , values ) : addr -= self . _start_addr end = addr + len ( values ) if not 0 <= addr <= end <= len ( self . _data ) : addr += self . _start_addr raise IndexError ( "Invalid address range [{:#06x} - {:#06x}]" . format ( addr , addr + len ( values ) ) ) self . _data [ addr : end ] = values
Write list values to addr memory location in DataBank .
47,936
def from_bytearray ( self , stream ) : fmt = '>HHHBB' size_header = struct . calcsize ( fmt ) if len ( stream ) < size_header : raise EOFError ( self . transaction_id , self . protocol_id , self . length , self . unit_id , self . fcode ) = struct . unpack ( fmt , bytes ( stream [ : size_header ] ) ) size_total = size_header + self . length - 2 if len ( stream ) < size_total : raise EOFError self . data = stream [ size_header : size_total ] del stream [ : size_total ]
Constructs this frame from input data stream consuming as many bytes as necessary from the beginning of the stream .
47,937
def to_bytearray ( self ) : header = bytearray ( struct . pack ( '>HHHBB' , self . transaction_id , self . protocol_id , self . length , self . unit_id , self . fcode ) ) return header + self . data
Convert this frame into its bytearray representation .
47,938
def is_valid ( self ) : conditions = [ self . protocol_id == 0 , 2 <= self . length <= 260 , len ( self . data ) == self . length - 2 , ] return all ( conditions )
Check integrity and validity of this frame .
47,939
def create_exception ( self , code ) : frame = deepcopy ( self ) frame . length = 3 frame . fcode += 0x80 frame . data = bytearray ( chr ( code ) ) return frame
Create an exception frame based on this frame .
47,940
def create_response ( self , data = None ) : frame = deepcopy ( self ) if data is not None : frame . data = data frame . length = 2 + len ( frame . data ) return frame
Create a response frame based on this frame .
47,941
def _all_expander ( fringe , iteration , viewer ) : expanded_neighbors = [ node . expand ( local_search = True ) for node in fringe ] if viewer : viewer . event ( 'expanded' , list ( fringe ) , expanded_neighbors ) list ( map ( fringe . extend , expanded_neighbors ) )
Expander that expands all nodes on the fringe .
47,942
def beam ( problem , beam_size = 100 , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _all_expander , iterations_limit = iterations_limit , fringe_size = beam_size , random_initial_states = True , stop_when_no_better = iterations_limit == 0 , viewer = viewer )
Beam search .
47,943
def _first_expander ( fringe , iteration , viewer ) : current = fringe [ 0 ] neighbors = current . expand ( local_search = True ) if viewer : viewer . event ( 'expanded' , [ current ] , [ neighbors ] ) fringe . extend ( neighbors )
Expander that expands only the first node on the fringe .
47,944
def beam_best_first ( problem , beam_size = 100 , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _first_expander , iterations_limit = iterations_limit , fringe_size = beam_size , random_initial_states = True , stop_when_no_better = iterations_limit == 0 , viewer = viewer )
Beam search best first .
47,945
def hill_climbing ( problem , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _first_expander , iterations_limit = iterations_limit , fringe_size = 1 , stop_when_no_better = True , viewer = viewer )
Hill climbing search .
47,946
def hill_climbing_stochastic ( problem , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _random_best_expander , iterations_limit = iterations_limit , fringe_size = 1 , stop_when_no_better = iterations_limit == 0 , viewer = viewer )
Stochastic hill climbing .
47,947
def hill_climbing_random_restarts ( problem , restarts_limit , iterations_limit = 0 , viewer = None ) : restarts = 0 best = None while restarts < restarts_limit : new = _local_search ( problem , _first_expander , iterations_limit = iterations_limit , fringe_size = 1 , random_initial_states = True , stop_when_no_better = True , viewer = viewer ) if not best or best . value < new . value : best = new restarts += 1 if viewer : viewer . event ( 'no_more_runs' , best , 'returned after %i runs' % restarts_limit ) return best
Hill climbing with random restarts .
47,948
def _exp_schedule ( iteration , k = 20 , lam = 0.005 , limit = 100 ) : return k * math . exp ( - lam * iteration )
Possible scheduler for simulated_annealing based on the aima example .
47,949
def simulated_annealing ( problem , schedule = _exp_schedule , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _create_simulated_annealing_expander ( schedule ) , iterations_limit = iterations_limit , fringe_size = 1 , stop_when_no_better = iterations_limit == 0 , viewer = viewer )
Simulated annealing .
47,950
def _create_genetic_expander ( problem , mutation_chance ) : def _expander ( fringe , iteration , viewer ) : fitness = [ x . value for x in fringe ] sampler = InverseTransformSampler ( fitness , fringe ) new_generation = [ ] expanded_nodes = [ ] expanded_neighbors = [ ] for _ in fringe : node1 = sampler . sample ( ) node2 = sampler . sample ( ) child = problem . crossover ( node1 . state , node2 . state ) action = 'crossover' if random . random ( ) < mutation_chance : child = problem . mutate ( child ) action += '+mutation' child_node = SearchNodeValueOrdered ( state = child , problem = problem , action = action ) new_generation . append ( child_node ) expanded_nodes . append ( node1 ) expanded_neighbors . append ( [ child_node ] ) expanded_nodes . append ( node2 ) expanded_neighbors . append ( [ child_node ] ) if viewer : viewer . event ( 'expanded' , expanded_nodes , expanded_neighbors ) fringe . clear ( ) for node in new_generation : fringe . append ( node ) return _expander
Creates an expander that expands the bests nodes of the population crossing over them .
47,951
def genetic ( problem , population_size = 100 , mutation_chance = 0.1 , iterations_limit = 0 , viewer = None ) : return _local_search ( problem , _create_genetic_expander ( problem , mutation_chance ) , iterations_limit = iterations_limit , fringe_size = population_size , random_initial_states = True , stop_when_no_better = iterations_limit == 0 , viewer = viewer )
Genetic search .
47,952
def _local_search ( problem , fringe_expander , iterations_limit = 0 , fringe_size = 1 , random_initial_states = False , stop_when_no_better = True , viewer = None ) : if viewer : viewer . event ( 'started' ) fringe = BoundedPriorityQueue ( fringe_size ) if random_initial_states : for _ in range ( fringe_size ) : s = problem . generate_random_state ( ) fringe . append ( SearchNodeValueOrdered ( state = s , problem = problem ) ) else : fringe . append ( SearchNodeValueOrdered ( state = problem . initial_state , problem = problem ) ) finish_reason = '' iteration = 0 run = True best = None while run : if viewer : viewer . event ( 'new_iteration' , list ( fringe ) ) old_best = fringe [ 0 ] fringe_expander ( fringe , iteration , viewer ) best = fringe [ 0 ] iteration += 1 if iterations_limit and iteration >= iterations_limit : run = False finish_reason = 'reaching iteration limit' elif old_best . value >= best . value and stop_when_no_better : run = False finish_reason = 'not being able to improve solution' if viewer : viewer . event ( 'finished' , fringe , best , 'returned after %s' % finish_reason ) return best
Basic algorithm for all local search algorithms .
47,953
def actions ( self , state ) : rows = string_to_list ( state ) row_e , col_e = find_location ( rows , 'e' ) actions = [ ] if row_e > 0 : actions . append ( rows [ row_e - 1 ] [ col_e ] ) if row_e < 2 : actions . append ( rows [ row_e + 1 ] [ col_e ] ) if col_e > 0 : actions . append ( rows [ row_e ] [ col_e - 1 ] ) if col_e < 2 : actions . append ( rows [ row_e ] [ col_e + 1 ] ) return actions
Returns a list of the pieces we can move to the empty space .
47,954
def is_attribute ( method , name = None ) : if name is None : name = method . __name__ method . is_attribute = True method . name = name return method
Decorator for methods that are attributes .
47,955
def load ( cls , filepath ) : with open ( filepath , "rb" ) as filehandler : classifier = pickle . load ( filehandler ) if not isinstance ( classifier , Classifier ) : raise ValueError ( "Pickled object is not a Classifier" ) return classifier
Loads a pickled version of the classifier saved in filepath
47,956
def tree_to_str ( root ) : xs = [ ] for value , node , depth in iter_tree ( root ) : template = "{indent}" if node is not root : template += "case={value}\t" if node . attribute is None : template += "result={result} -- P={prob:.2}" else : template += "split by {split}:\t" + "(partial result={result} -- P={prob:.2})" line = template . format ( indent = " " * depth , value = value , result = node . result [ 0 ] , prob = node . result [ 1 ] , split = str ( node . attribute ) ) xs . append ( line ) return "\n" . join ( xs )
Returns a string representation of a decision tree with root node root .
47,957
def save ( self , filepath ) : if not filepath or not isinstance ( filepath , str ) : raise ValueError ( "Invalid filepath" ) with open ( filepath , "wb" ) as filehandler : pickle . dump ( self , filehandler )
Saves the classifier to filepath . Because this classifier needs to save the dataset it must be something that can be pickled and not something like an iterator .
47,958
def _max_gain_split ( self , examples ) : gains = self . _new_set_of_gain_counters ( ) for example in examples : for gain in gains : gain . add ( example ) winner = max ( gains , key = lambda gain : gain . get_gain ( ) ) if not winner . get_target_class_counts ( ) : raise ValueError ( "Dataset is empty" ) return winner
Returns an OnlineInformationGain of the attribute with max gain based on examples .
47,959
def backtrack ( problem , variable_heuristic = '' , value_heuristic = '' , inference = True ) : assignment = { } domains = deepcopy ( problem . domains ) if variable_heuristic == MOST_CONSTRAINED_VARIABLE : variable_chooser = _most_constrained_variable_chooser elif variable_heuristic == HIGHEST_DEGREE_VARIABLE : variable_chooser = _highest_degree_variable_chooser else : variable_chooser = _basic_variable_chooser if value_heuristic == LEAST_CONSTRAINING_VALUE : values_sorter = _least_constraining_values_sorter else : values_sorter = _basic_values_sorter return _backtracking ( problem , assignment , domains , variable_chooser , values_sorter , inference = inference )
Backtracking search .
47,960
def _most_constrained_variable_chooser ( problem , variables , domains ) : return sorted ( variables , key = lambda v : len ( domains [ v ] ) ) [ 0 ]
Choose the variable that has less available values .
47,961
def _highest_degree_variable_chooser ( problem , variables , domains ) : return sorted ( variables , key = lambda v : problem . var_degrees [ v ] , reverse = True ) [ 0 ]
Choose the variable that is involved on more constraints .
47,962
def _count_conflicts ( problem , assignment , variable = None , value = None ) : return len ( _find_conflicts ( problem , assignment , variable , value ) )
Count the number of violated constraints on a given assignment .
47,963
def _find_conflicts ( problem , assignment , variable = None , value = None ) : if variable is not None and value is not None : assignment = deepcopy ( assignment ) assignment [ variable ] = value conflicts = [ ] for neighbors , constraint in problem . constraints : if all ( n in assignment for n in neighbors ) : if not _call_constraint ( assignment , neighbors , constraint ) : conflicts . append ( ( neighbors , constraint ) ) return conflicts
Find violated constraints on a given assignment with the possibility of specifying a new variable and value to add to the assignment before checking .
47,964
def _least_constraining_values_sorter ( problem , assignment , variable , domains ) : def update_assignment ( value ) : new_assignment = deepcopy ( assignment ) new_assignment [ variable ] = value return new_assignment values = sorted ( domains [ variable ] [ : ] , key = lambda v : _count_conflicts ( problem , assignment , variable , v ) ) return values
Sort values based on how many conflicts they generate if assigned .
47,965
def _backtracking ( problem , assignment , domains , variable_chooser , values_sorter , inference = True ) : from simpleai . search . arc import arc_consistency_3 if len ( assignment ) == len ( problem . variables ) : return assignment pending = [ v for v in problem . variables if v not in assignment ] variable = variable_chooser ( problem , pending , domains ) values = values_sorter ( problem , assignment , variable , domains ) for value in values : new_assignment = deepcopy ( assignment ) new_assignment [ variable ] = value if not _count_conflicts ( problem , new_assignment ) : new_domains = deepcopy ( domains ) new_domains [ variable ] = [ value ] if not inference or arc_consistency_3 ( new_domains , problem . constraints ) : result = _backtracking ( problem , new_assignment , new_domains , variable_chooser , values_sorter , inference = inference ) if result : return result return None
Internal recursive backtracking algorithm .
47,966
def _min_conflicts_value ( problem , assignment , variable ) : return argmin ( problem . domains [ variable ] , lambda x : _count_conflicts ( problem , assignment , variable , x ) )
Return the value generate the less number of conflicts . In case of tie a random value is selected among this values subset .
47,967
def min_conflicts ( problem , initial_assignment = None , iterations_limit = 0 ) : assignment = { } if initial_assignment : assignment . update ( initial_assignment ) else : for variable in problem . variables : value = _min_conflicts_value ( problem , assignment , variable ) assignment [ variable ] = value iteration = 0 run = True while run : conflicts = _find_conflicts ( problem , assignment ) conflict_variables = [ v for v in problem . variables if any ( v in conflict [ 0 ] for conflict in conflicts ) ] if conflict_variables : variable = random . choice ( conflict_variables ) value = _min_conflicts_value ( problem , assignment , variable ) assignment [ variable ] = value iteration += 1 if iterations_limit and iteration >= iterations_limit : run = False elif not _count_conflicts ( problem , assignment ) : run = False return assignment
Min conflicts search .
47,968
def convert_to_binary ( variables , domains , constraints ) : def wdiff ( vars_ ) : def diff ( variables , values ) : hidden , other = variables if hidden . startswith ( 'hidden' ) : idx = vars_ . index ( other ) return values [ 1 ] == values [ 0 ] [ idx ] else : idx = vars_ . index ( hidden ) return values [ 0 ] == values [ 1 ] [ idx ] diff . no_wrap = True return diff new_constraints = [ ] new_domains = copy ( domains ) new_variables = list ( variables ) last = 0 for vars_ , const in constraints : if len ( vars_ ) == 2 : new_constraints . append ( ( vars_ , const ) ) continue hidden = 'hidden%d' % last new_variables . append ( hidden ) last += 1 new_domains [ hidden ] = [ t for t in product ( * map ( domains . get , vars_ ) ) if const ( vars_ , t ) ] for var in vars_ : new_constraints . append ( ( ( hidden , var ) , wdiff ( vars_ ) ) ) return new_variables , new_domains , new_constraints
Returns new constraint list all binary using hidden variables .
47,969
def boltzmann_exploration ( actions , utilities , temperature , action_counter ) : utilities = [ utilities [ x ] for x in actions ] temperature = max ( temperature , 0.01 ) _max = max ( utilities ) _min = min ( utilities ) if _max == _min : return random . choice ( actions ) utilities = [ math . exp ( ( ( u - _min ) / ( _max - _min ) ) / temperature ) for u in utilities ] probs = [ u / sum ( utilities ) for u in utilities ] i = 0 tot = probs [ i ] r = random . random ( ) while i < len ( actions ) and r >= tot : i += 1 tot += probs [ i ] return actions [ i ]
returns an action with a probability depending on utilities and temperature
47,970
def mkconstraints ( ) : constraints = [ ] for j in range ( 1 , 10 ) : vars = [ "%s%d" % ( i , j ) for i in uppercase [ : 9 ] ] constraints . extend ( ( c , const_different ) for c in combinations ( vars , 2 ) ) for i in uppercase [ : 9 ] : vars = [ "%s%d" % ( i , j ) for j in range ( 1 , 10 ) ] constraints . extend ( ( c , const_different ) for c in combinations ( vars , 2 ) ) for b0 in [ 'ABC' , 'DEF' , 'GHI' ] : for b1 in [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] , [ 7 , 8 , 9 ] ] : vars = [ "%s%d" % ( i , j ) for i in b0 for j in b1 ] l = list ( ( c , const_different ) for c in combinations ( vars , 2 ) ) constraints . extend ( l ) return constraints
Make constraint list for binary constraint problem .
47,971
def precision ( classifier , testset ) : hit = 0 total = 0 for example in testset : if classifier . classify ( example ) [ 0 ] == classifier . target ( example ) : hit += 1 total += 1 if total == 0 : raise ValueError ( "Empty testset!" ) return hit / float ( total )
Runs the classifier for each example in testset and verifies that the classification is correct using the target .
47,972
def kfold ( dataset , problem , method , k = 10 ) : if k <= 1 : raise ValueError ( "k argument must be at least 2" ) dataset = list ( dataset ) random . shuffle ( dataset ) trials = 0 positive = 0 for i in range ( k ) : train = [ x for j , x in enumerate ( dataset ) if j % k != i ] test = [ x for j , x in enumerate ( dataset ) if j % k == i ] classifier = method ( train , problem ) for data in test : trials += 1 result = classifier . classify ( data ) if result is not None and result [ 0 ] == problem . target ( data ) : positive += 1 return float ( positive ) / float ( trials )
Does a k - fold on dataset with method . This is it randomly creates k - partitions of the dataset and k - times trains the method with k - 1 parts and runs it with the partition left . After all this returns the overall success ratio .
47,973
def actions ( self , state ) : 'actions are index where we can make a move' actions = [ ] for index , char in enumerate ( state ) : if char == '_' : actions . append ( index ) return actions
actions are index where we can make a move
47,974
def actions ( self , s ) : return [ a for a in self . _actions if self . _is_valid ( self . result ( s , a ) ) ]
Possible actions from a state .
47,975
def _is_valid ( self , s ) : return ( ( s [ 0 ] >= s [ 1 ] or s [ 0 ] == 0 ) ) and ( ( 3 - s [ 0 ] ) >= ( 3 - s [ 1 ] ) or s [ 0 ] == 3 ) and ( 0 <= s [ 0 ] <= 3 ) and ( 0 <= s [ 1 ] <= 3 )
Check if a state is valid .
47,976
def result ( self , s , a ) : if s [ 2 ] == 0 : return ( s [ 0 ] - a [ 1 ] [ 0 ] , s [ 1 ] - a [ 1 ] [ 1 ] , 1 ) else : return ( s [ 0 ] + a [ 1 ] [ 0 ] , s [ 1 ] + a [ 1 ] [ 1 ] , 0 )
Result of applying an action to a state .
47,977
def arc_consistency_3 ( domains , constraints ) : arcs = list ( all_arcs ( constraints ) ) pending_arcs = set ( arcs ) while pending_arcs : x , y = pending_arcs . pop ( ) if revise ( domains , ( x , y ) , constraints ) : if len ( domains [ x ] ) == 0 : return False pending_arcs = pending_arcs . union ( ( x2 , y2 ) for x2 , y2 in arcs if y2 == x ) return True
Makes a CSP problem arc consistent .
47,978
def expand ( self , local_search = False ) : new_nodes = [ ] for action in self . problem . actions ( self . state ) : new_state = self . problem . result ( self . state , action ) cost = self . problem . cost ( self . state , action , new_state ) nodefactory = self . __class__ new_nodes . append ( nodefactory ( state = new_state , parent = None if local_search else self , problem = self . problem , action = action , cost = self . cost + cost , depth = self . depth + 1 ) ) return new_nodes
Create successors .
47,979
def step ( self , viewer = None ) : "This method evolves one step in time" if not self . is_completed ( self . state ) : for agent in self . agents : action = agent . program ( self . percept ( agent , self . state ) ) next_state = self . do_action ( self . state , action , agent ) if viewer : viewer . event ( self . state , action , next_state , agent ) self . state = next_state if self . is_completed ( self . state ) : return
This method evolves one step in time
47,980
def breadth_first ( problem , graph_search = False , viewer = None ) : return _search ( problem , FifoList ( ) , graph_search = graph_search , viewer = viewer )
Breadth first search .
47,981
def depth_first ( problem , graph_search = False , viewer = None ) : return _search ( problem , LifoList ( ) , graph_search = graph_search , viewer = viewer )
Depth first search .
47,982
def limited_depth_first ( problem , depth_limit , graph_search = False , viewer = None ) : return _search ( problem , LifoList ( ) , graph_search = graph_search , depth_limit = depth_limit , viewer = viewer )
Limited depth first search .
47,983
def iterative_limited_depth_first ( problem , graph_search = False , viewer = None ) : solution = None limit = 0 while not solution : solution = limited_depth_first ( problem , depth_limit = limit , graph_search = graph_search , viewer = viewer ) limit += 1 if viewer : viewer . event ( 'no_more_runs' , solution , 'returned after %i runs' % limit ) return solution
Iterative limited depth first search .
47,984
def uniform_cost ( problem , graph_search = False , viewer = None ) : return _search ( problem , BoundedPriorityQueue ( ) , graph_search = graph_search , node_factory = SearchNodeCostOrdered , graph_replace_when_better = True , viewer = viewer )
Uniform cost search .
47,985
def greedy ( problem , graph_search = False , viewer = None ) : return _search ( problem , BoundedPriorityQueue ( ) , graph_search = graph_search , node_factory = SearchNodeHeuristicOrdered , graph_replace_when_better = True , viewer = viewer )
Greedy search .
47,986
def _search ( problem , fringe , graph_search = False , depth_limit = None , node_factory = SearchNode , graph_replace_when_better = False , viewer = None ) : if viewer : viewer . event ( 'started' ) memory = set ( ) initial_node = node_factory ( state = problem . initial_state , problem = problem ) fringe . append ( initial_node ) while fringe : if viewer : viewer . event ( 'new_iteration' , fringe . sorted ( ) ) node = fringe . pop ( ) if problem . is_goal ( node . state ) : if viewer : viewer . event ( 'chosen_node' , node , True ) viewer . event ( 'finished' , fringe . sorted ( ) , node , 'goal found' ) return node else : if viewer : viewer . event ( 'chosen_node' , node , False ) memory . add ( node . state ) if depth_limit is None or node . depth < depth_limit : expanded = node . expand ( ) if viewer : viewer . event ( 'expanded' , [ node ] , [ expanded ] ) for n in expanded : if graph_search : others = [ x for x in fringe if x . state == n . state ] assert len ( others ) in ( 0 , 1 ) if n . state not in memory and len ( others ) == 0 : fringe . append ( n ) elif graph_replace_when_better and len ( others ) > 0 and n < others [ 0 ] : fringe . remove ( others [ 0 ] ) fringe . append ( n ) else : fringe . append ( n ) if viewer : viewer . event ( 'finished' , fringe . sorted ( ) , None , 'goal not found' )
Basic search algorithm base of all the other search algorithms .
47,987
def _get_tree_paths ( tree , node_id , depth = 0 ) : if node_id == _tree . TREE_LEAF : raise ValueError ( "Invalid node_id %s" % _tree . TREE_LEAF ) left_child = tree . children_left [ node_id ] right_child = tree . children_right [ node_id ] if left_child != _tree . TREE_LEAF : left_paths = _get_tree_paths ( tree , left_child , depth = depth + 1 ) right_paths = _get_tree_paths ( tree , right_child , depth = depth + 1 ) for path in left_paths : path . append ( node_id ) for path in right_paths : path . append ( node_id ) paths = left_paths + right_paths else : paths = [ [ node_id ] ] return paths
Returns all paths through the tree as list of node_ids
47,988
def extract_keywords_from_text ( self , text ) : sentences = nltk . tokenize . sent_tokenize ( text ) self . extract_keywords_from_sentences ( sentences )
Method to extract keywords from the text provided .
47,989
def extract_keywords_from_sentences ( self , sentences ) : phrase_list = self . _generate_phrases ( sentences ) self . _build_frequency_dist ( phrase_list ) self . _build_word_co_occurance_graph ( phrase_list ) self . _build_ranklist ( phrase_list )
Method to extract keywords from the list of sentences provided .
47,990
def _build_word_co_occurance_graph ( self , phrase_list ) : co_occurance_graph = defaultdict ( lambda : defaultdict ( lambda : 0 ) ) for phrase in phrase_list : for ( word , coword ) in product ( phrase , phrase ) : co_occurance_graph [ word ] [ coword ] += 1 self . degree = defaultdict ( lambda : 0 ) for key in co_occurance_graph : self . degree [ key ] = sum ( co_occurance_graph [ key ] . values ( ) )
Builds the co - occurance graph of words in the given body of text to compute degree of each word .
47,991
def _build_ranklist ( self , phrase_list ) : self . rank_list = [ ] for phrase in phrase_list : rank = 0.0 for word in phrase : if self . metric == Metric . DEGREE_TO_FREQUENCY_RATIO : rank += 1.0 * self . degree [ word ] / self . frequency_dist [ word ] elif self . metric == Metric . WORD_DEGREE : rank += 1.0 * self . degree [ word ] else : rank += 1.0 * self . frequency_dist [ word ] self . rank_list . append ( ( rank , " " . join ( phrase ) ) ) self . rank_list . sort ( reverse = True ) self . ranked_phrases = [ ph [ 1 ] for ph in self . rank_list ]
Method to rank each contender phrase using the formula
47,992
def _generate_phrases ( self , sentences ) : phrase_list = set ( ) for sentence in sentences : word_list = [ word . lower ( ) for word in wordpunct_tokenize ( sentence ) ] phrase_list . update ( self . _get_phrase_list_from_words ( word_list ) ) return phrase_list
Method to generate contender phrases given the sentences of the text document .
47,993
def _retain_centroids ( numbers , thres ) : numbers . sort ( ) prev = - 1 ret = [ ] for n in numbers : if prev < 0 or n - prev > thres : ret . append ( n ) prev = n return ret
Only keep one number for each cluster within thres of each other
47,994
def _split_vlines_hlines ( lines ) : vlines , hlines = [ ] , [ ] for line in lines : ( vlines if line . x1 - line . x0 < 0.1 else hlines ) . append ( line ) return vlines , hlines
Separates lines into horizontal and vertical ones
47,995
def _npiter ( arr ) : for a in np . nditer ( arr , flags = [ "refs_ok" ] ) : c = a . item ( ) if c is not None : yield c
Wrapper for iterating numpy array
47,996
def get_normalized_grid ( self ) : log = logging . getLogger ( __name__ ) mega_rows = [ ] for row_id , row in enumerate ( self . _grid ) : subrow_across_cell = defaultdict ( list ) for col_id , cell in enumerate ( row ) : cell . texts . sort ( key = cmp_to_key ( reading_order ) ) log . debug ( "=" * 50 ) for m in cell . texts : subrow_across_cell [ m . yc_grid ] . append ( m ) log . debug ( pformat ( dict ( subrow_across_cell ) ) ) mega_rows . append ( subrow_across_cell ) return mega_rows
Analyzes subcell structure
47,997
def _mark_grid_bounds ( self , plane , region_bbox ) : vbars = np . zeros ( [ self . num_rows , self . num_cols + 1 ] , dtype = np . bool ) hbars = np . zeros ( [ self . num_rows + 1 , self . num_cols ] , dtype = np . bool ) def closest_idx ( arr , elem ) : left = bisect . bisect_left ( arr , elem ) - 1 right = bisect . bisect_right ( arr , elem ) - 1 return left if abs ( arr [ left ] - elem ) < abs ( arr [ right ] - elem ) else right for row , ( y0 , y1 ) in enumerate ( self . yranges ) : yc = ( y0 + y1 ) // 2 for l in plane . find ( ( region_bbox . x0 , yc , region_bbox . x1 , yc ) ) : vbars [ row , closest_idx ( self . xs , l . xc ) ] = True for col , ( x0 , x1 ) in enumerate ( self . xranges ) : xc = ( x0 + x1 ) // 2 for l in plane . find ( ( xc , region_bbox . y0 , xc , region_bbox . y1 ) ) : hbars [ closest_idx ( self . ys , l . yc ) , col ] = True return vbars , hbars
Assume all lines define a complete grid over the region_bbox . Detect which lines are missing so that we can recover merged cells .
47,998
def vectorize ( e , tolerance = 0.1 ) : tolerance = max ( tolerance , e . linewidth ) is_high = e . height > tolerance is_wide = e . width > tolerance if is_wide and not is_high : return ( e . width , 0.0 ) if is_high and not is_wide : return ( 0.0 , e . height )
vectorizes the pdf object s bounding box min_width is the width under which we consider it a line instead of a big rectangle
47,999
def aligned ( e1 , e2 ) : return ( any ( close ( c1 , c2 ) for c1 , c2 in zip ( e1 . bbox , e2 . bbox ) ) or x_center_aligned ( e1 , e2 ) or y_center_aligned ( e1 , e2 ) )
alignment is determined by two boxes having one exactly the same attribute which could mean parallel perpendicularly forming a corner etc .