repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusDataBank.set
def set(self, addr, values): """ Write list ``values`` to ``addr`` memory location in DataBank. :param addr: Address to write to :param values: list of values to write :except IndexError: Raised if address range falls outside valid range """ addr -= self._start_addr end = addr + len(values) if not 0 <= addr <= end <= len(self._data): addr += self._start_addr raise IndexError("Invalid address range [{:#06x} - {:#06x}]" .format(addr, addr + len(values))) self._data[addr:end] = values
python
def set(self, addr, values): """ Write list ``values`` to ``addr`` memory location in DataBank. :param addr: Address to write to :param values: list of values to write :except IndexError: Raised if address range falls outside valid range """ addr -= self._start_addr end = addr + len(values) if not 0 <= addr <= end <= len(self._data): addr += self._start_addr raise IndexError("Invalid address range [{:#06x} - {:#06x}]" .format(addr, addr + len(values))) self._data[addr:end] = values
[ "def", "set", "(", "self", ",", "addr", ",", "values", ")", ":", "addr", "-=", "self", ".", "_start_addr", "end", "=", "addr", "+", "len", "(", "values", ")", "if", "not", "0", "<=", "addr", "<=", "end", "<=", "len", "(", "self", ".", "_data", ")", ":", "addr", "+=", "self", ".", "_start_addr", "raise", "IndexError", "(", "\"Invalid address range [{:#06x} - {:#06x}]\"", ".", "format", "(", "addr", ",", "addr", "+", "len", "(", "values", ")", ")", ")", "self", ".", "_data", "[", "addr", ":", "end", "]", "=", "values" ]
Write list ``values`` to ``addr`` memory location in DataBank. :param addr: Address to write to :param values: list of values to write :except IndexError: Raised if address range falls outside valid range
[ "Write", "list", "values", "to", "addr", "memory", "location", "in", "DataBank", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L81-L95
train
236,800
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusTCPFrame.from_bytearray
def from_bytearray(self, stream): """ Constructs this frame from input data stream, consuming as many bytes as necessary from the beginning of the stream. If stream does not contain enough data to construct a complete modbus frame, an EOFError is raised and no data is consumed. :param stream: bytearray to consume data from to construct this frame. :except EOFError: Not enough data for complete frame; no data consumed. """ fmt = '>HHHBB' size_header = struct.calcsize(fmt) if len(stream) < size_header: raise EOFError ( self.transaction_id, self.protocol_id, self.length, self.unit_id, self.fcode ) = struct.unpack(fmt, bytes(stream[:size_header])) size_total = size_header + self.length - 2 if len(stream) < size_total: raise EOFError self.data = stream[size_header:size_total] del stream[:size_total]
python
def from_bytearray(self, stream): """ Constructs this frame from input data stream, consuming as many bytes as necessary from the beginning of the stream. If stream does not contain enough data to construct a complete modbus frame, an EOFError is raised and no data is consumed. :param stream: bytearray to consume data from to construct this frame. :except EOFError: Not enough data for complete frame; no data consumed. """ fmt = '>HHHBB' size_header = struct.calcsize(fmt) if len(stream) < size_header: raise EOFError ( self.transaction_id, self.protocol_id, self.length, self.unit_id, self.fcode ) = struct.unpack(fmt, bytes(stream[:size_header])) size_total = size_header + self.length - 2 if len(stream) < size_total: raise EOFError self.data = stream[size_header:size_total] del stream[:size_total]
[ "def", "from_bytearray", "(", "self", ",", "stream", ")", ":", "fmt", "=", "'>HHHBB'", "size_header", "=", "struct", ".", "calcsize", "(", "fmt", ")", "if", "len", "(", "stream", ")", "<", "size_header", ":", "raise", "EOFError", "(", "self", ".", "transaction_id", ",", "self", ".", "protocol_id", ",", "self", ".", "length", ",", "self", ".", "unit_id", ",", "self", ".", "fcode", ")", "=", "struct", ".", "unpack", "(", "fmt", ",", "bytes", "(", "stream", "[", ":", "size_header", "]", ")", ")", "size_total", "=", "size_header", "+", "self", ".", "length", "-", "2", "if", "len", "(", "stream", ")", "<", "size_total", ":", "raise", "EOFError", "self", ".", "data", "=", "stream", "[", "size_header", ":", "size_total", "]", "del", "stream", "[", ":", "size_total", "]" ]
Constructs this frame from input data stream, consuming as many bytes as necessary from the beginning of the stream. If stream does not contain enough data to construct a complete modbus frame, an EOFError is raised and no data is consumed. :param stream: bytearray to consume data from to construct this frame. :except EOFError: Not enough data for complete frame; no data consumed.
[ "Constructs", "this", "frame", "from", "input", "data", "stream", "consuming", "as", "many", "bytes", "as", "necessary", "from", "the", "beginning", "of", "the", "stream", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L171-L200
train
236,801
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusTCPFrame.to_bytearray
def to_bytearray(self): """ Convert this frame into its bytearray representation. :return: bytearray representation of this frame. """ header = bytearray(struct.pack( '>HHHBB', self.transaction_id, self.protocol_id, self.length, self.unit_id, self.fcode )) return header + self.data
python
def to_bytearray(self): """ Convert this frame into its bytearray representation. :return: bytearray representation of this frame. """ header = bytearray(struct.pack( '>HHHBB', self.transaction_id, self.protocol_id, self.length, self.unit_id, self.fcode )) return header + self.data
[ "def", "to_bytearray", "(", "self", ")", ":", "header", "=", "bytearray", "(", "struct", ".", "pack", "(", "'>HHHBB'", ",", "self", ".", "transaction_id", ",", "self", ".", "protocol_id", ",", "self", ".", "length", ",", "self", ".", "unit_id", ",", "self", ".", "fcode", ")", ")", "return", "header", "+", "self", ".", "data" ]
Convert this frame into its bytearray representation. :return: bytearray representation of this frame.
[ "Convert", "this", "frame", "into", "its", "bytearray", "representation", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L202-L216
train
236,802
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusTCPFrame.is_valid
def is_valid(self): """ Check integrity and validity of this frame. :return: bool True if this frame is structurally valid. """ conditions = [ self.protocol_id == 0, # Modbus always uses protocol 0 2 <= self.length <= 260, # Absolute length limits len(self.data) == self.length - 2, # Total length matches data length ] return all(conditions)
python
def is_valid(self): """ Check integrity and validity of this frame. :return: bool True if this frame is structurally valid. """ conditions = [ self.protocol_id == 0, # Modbus always uses protocol 0 2 <= self.length <= 260, # Absolute length limits len(self.data) == self.length - 2, # Total length matches data length ] return all(conditions)
[ "def", "is_valid", "(", "self", ")", ":", "conditions", "=", "[", "self", ".", "protocol_id", "==", "0", ",", "# Modbus always uses protocol 0", "2", "<=", "self", ".", "length", "<=", "260", ",", "# Absolute length limits", "len", "(", "self", ".", "data", ")", "==", "self", ".", "length", "-", "2", ",", "# Total length matches data length", "]", "return", "all", "(", "conditions", ")" ]
Check integrity and validity of this frame. :return: bool True if this frame is structurally valid.
[ "Check", "integrity", "and", "validity", "of", "this", "frame", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L218-L229
train
236,803
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusTCPFrame.create_exception
def create_exception(self, code): """ Create an exception frame based on this frame. :param code: Modbus exception code to use for this exception :return: ModbusTCPFrame instance that represents an exception """ frame = deepcopy(self) frame.length = 3 frame.fcode += 0x80 frame.data = bytearray(chr(code)) return frame
python
def create_exception(self, code): """ Create an exception frame based on this frame. :param code: Modbus exception code to use for this exception :return: ModbusTCPFrame instance that represents an exception """ frame = deepcopy(self) frame.length = 3 frame.fcode += 0x80 frame.data = bytearray(chr(code)) return frame
[ "def", "create_exception", "(", "self", ",", "code", ")", ":", "frame", "=", "deepcopy", "(", "self", ")", "frame", ".", "length", "=", "3", "frame", ".", "fcode", "+=", "0x80", "frame", ".", "data", "=", "bytearray", "(", "chr", "(", "code", ")", ")", "return", "frame" ]
Create an exception frame based on this frame. :param code: Modbus exception code to use for this exception :return: ModbusTCPFrame instance that represents an exception
[ "Create", "an", "exception", "frame", "based", "on", "this", "frame", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L231-L242
train
236,804
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
ModbusTCPFrame.create_response
def create_response(self, data=None): """ Create a response frame based on this frame. :param data: Data section of response as bytearray. If None, request data section is kept. :return: ModbusTCPFrame instance that represents a response """ frame = deepcopy(self) if data is not None: frame.data = data frame.length = 2 + len(frame.data) return frame
python
def create_response(self, data=None): """ Create a response frame based on this frame. :param data: Data section of response as bytearray. If None, request data section is kept. :return: ModbusTCPFrame instance that represents a response """ frame = deepcopy(self) if data is not None: frame.data = data frame.length = 2 + len(frame.data) return frame
[ "def", "create_response", "(", "self", ",", "data", "=", "None", ")", ":", "frame", "=", "deepcopy", "(", "self", ")", "if", "data", "is", "not", "None", ":", "frame", ".", "data", "=", "data", "frame", ".", "length", "=", "2", "+", "len", "(", "frame", ".", "data", ")", "return", "frame" ]
Create a response frame based on this frame. :param data: Data section of response as bytearray. If None, request data section is kept. :return: ModbusTCPFrame instance that represents a response
[ "Create", "a", "response", "frame", "based", "on", "this", "frame", "." ]
931d96b8c761550a6a58f6e61e202690db04233a
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L244-L255
train
236,805
simpleai-team/simpleai
simpleai/search/local.py
_all_expander
def _all_expander(fringe, iteration, viewer): ''' Expander that expands all nodes on the fringe. ''' expanded_neighbors = [node.expand(local_search=True) for node in fringe] if viewer: viewer.event('expanded', list(fringe), expanded_neighbors) list(map(fringe.extend, expanded_neighbors))
python
def _all_expander(fringe, iteration, viewer): ''' Expander that expands all nodes on the fringe. ''' expanded_neighbors = [node.expand(local_search=True) for node in fringe] if viewer: viewer.event('expanded', list(fringe), expanded_neighbors) list(map(fringe.extend, expanded_neighbors))
[ "def", "_all_expander", "(", "fringe", ",", "iteration", ",", "viewer", ")", ":", "expanded_neighbors", "=", "[", "node", ".", "expand", "(", "local_search", "=", "True", ")", "for", "node", "in", "fringe", "]", "if", "viewer", ":", "viewer", ".", "event", "(", "'expanded'", ",", "list", "(", "fringe", ")", ",", "expanded_neighbors", ")", "list", "(", "map", "(", "fringe", ".", "extend", ",", "expanded_neighbors", ")", ")" ]
Expander that expands all nodes on the fringe.
[ "Expander", "that", "expands", "all", "nodes", "on", "the", "fringe", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L8-L18
train
236,806
simpleai-team/simpleai
simpleai/search/local.py
beam
def beam(problem, beam_size=100, iterations_limit=0, viewer=None): ''' Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state. ''' return _local_search(problem, _all_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
python
def beam(problem, beam_size=100, iterations_limit=0, viewer=None): ''' Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state. ''' return _local_search(problem, _all_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
[ "def", "beam", "(", "problem", ",", "beam_size", "=", "100", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_all_expander", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "beam_size", ",", "random_initial_states", "=", "True", ",", "stop_when_no_better", "=", "iterations_limit", "==", "0", ",", "viewer", "=", "viewer", ")" ]
Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state.
[ "Beam", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L21-L38
train
236,807
simpleai-team/simpleai
simpleai/search/local.py
_first_expander
def _first_expander(fringe, iteration, viewer): ''' Expander that expands only the first node on the fringe. ''' current = fringe[0] neighbors = current.expand(local_search=True) if viewer: viewer.event('expanded', [current], [neighbors]) fringe.extend(neighbors)
python
def _first_expander(fringe, iteration, viewer): ''' Expander that expands only the first node on the fringe. ''' current = fringe[0] neighbors = current.expand(local_search=True) if viewer: viewer.event('expanded', [current], [neighbors]) fringe.extend(neighbors)
[ "def", "_first_expander", "(", "fringe", ",", "iteration", ",", "viewer", ")", ":", "current", "=", "fringe", "[", "0", "]", "neighbors", "=", "current", ".", "expand", "(", "local_search", "=", "True", ")", "if", "viewer", ":", "viewer", ".", "event", "(", "'expanded'", ",", "[", "current", "]", ",", "[", "neighbors", "]", ")", "fringe", ".", "extend", "(", "neighbors", ")" ]
Expander that expands only the first node on the fringe.
[ "Expander", "that", "expands", "only", "the", "first", "node", "on", "the", "fringe", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L41-L51
train
236,808
simpleai-team/simpleai
simpleai/search/local.py
beam_best_first
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None): ''' Beam search best first. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
python
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None): ''' Beam search best first. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
[ "def", "beam_best_first", "(", "problem", ",", "beam_size", "=", "100", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_first_expander", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "beam_size", ",", "random_initial_states", "=", "True", ",", "stop_when_no_better", "=", "iterations_limit", "==", "0", ",", "viewer", "=", "viewer", ")" ]
Beam search best first. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
[ "Beam", "search", "best", "first", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L55-L72
train
236,809
simpleai-team/simpleai
simpleai/search/local.py
hill_climbing
def hill_climbing(problem, iterations_limit=0, viewer=None): ''' Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=True, viewer=viewer)
python
def hill_climbing(problem, iterations_limit=0, viewer=None): ''' Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=True, viewer=viewer)
[ "def", "hill_climbing", "(", "problem", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_first_expander", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "1", ",", "stop_when_no_better", "=", "True", ",", "viewer", "=", "viewer", ")" ]
Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
[ "Hill", "climbing", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L75-L90
train
236,810
simpleai-team/simpleai
simpleai/search/local.py
hill_climbing_stochastic
def hill_climbing_stochastic(problem, iterations_limit=0, viewer=None): ''' Stochastic hill climbing. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _random_best_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=iterations_limit==0, viewer=viewer)
python
def hill_climbing_stochastic(problem, iterations_limit=0, viewer=None): ''' Stochastic hill climbing. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _random_best_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=iterations_limit==0, viewer=viewer)
[ "def", "hill_climbing_stochastic", "(", "problem", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_random_best_expander", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "1", ",", "stop_when_no_better", "=", "iterations_limit", "==", "0", ",", "viewer", "=", "viewer", ")" ]
Stochastic hill climbing. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
[ "Stochastic", "hill", "climbing", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L112-L127
train
236,811
simpleai-team/simpleai
simpleai/search/local.py
hill_climbing_random_restarts
def hill_climbing_random_restarts(problem, restarts_limit, iterations_limit=0, viewer=None): ''' Hill climbing with random restarts. restarts_limit specifies the number of times hill_climbing will be runned. If iterations_limit is specified, each hill_climbing will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state. ''' restarts = 0 best = None while restarts < restarts_limit: new = _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, random_initial_states=True, stop_when_no_better=True, viewer=viewer) if not best or best.value < new.value: best = new restarts += 1 if viewer: viewer.event('no_more_runs', best, 'returned after %i runs' % restarts_limit) return best
python
def hill_climbing_random_restarts(problem, restarts_limit, iterations_limit=0, viewer=None): ''' Hill climbing with random restarts. restarts_limit specifies the number of times hill_climbing will be runned. If iterations_limit is specified, each hill_climbing will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state. ''' restarts = 0 best = None while restarts < restarts_limit: new = _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, random_initial_states=True, stop_when_no_better=True, viewer=viewer) if not best or best.value < new.value: best = new restarts += 1 if viewer: viewer.event('no_more_runs', best, 'returned after %i runs' % restarts_limit) return best
[ "def", "hill_climbing_random_restarts", "(", "problem", ",", "restarts_limit", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "restarts", "=", "0", "best", "=", "None", "while", "restarts", "<", "restarts_limit", ":", "new", "=", "_local_search", "(", "problem", ",", "_first_expander", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "1", ",", "random_initial_states", "=", "True", ",", "stop_when_no_better", "=", "True", ",", "viewer", "=", "viewer", ")", "if", "not", "best", "or", "best", ".", "value", "<", "new", ".", "value", ":", "best", "=", "new", "restarts", "+=", "1", "if", "viewer", ":", "viewer", ".", "event", "(", "'no_more_runs'", ",", "best", ",", "'returned after %i runs'", "%", "restarts_limit", ")", "return", "best" ]
Hill climbing with random restarts. restarts_limit specifies the number of times hill_climbing will be runned. If iterations_limit is specified, each hill_climbing will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state.
[ "Hill", "climbing", "with", "random", "restarts", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L130-L161
train
236,812
simpleai-team/simpleai
simpleai/search/local.py
_exp_schedule
def _exp_schedule(iteration, k=20, lam=0.005, limit=100): ''' Possible scheduler for simulated_annealing, based on the aima example. ''' return k * math.exp(-lam * iteration)
python
def _exp_schedule(iteration, k=20, lam=0.005, limit=100): ''' Possible scheduler for simulated_annealing, based on the aima example. ''' return k * math.exp(-lam * iteration)
[ "def", "_exp_schedule", "(", "iteration", ",", "k", "=", "20", ",", "lam", "=", "0.005", ",", "limit", "=", "100", ")", ":", "return", "k", "*", "math", ".", "exp", "(", "-", "lam", "*", "iteration", ")" ]
Possible scheduler for simulated_annealing, based on the aima example.
[ "Possible", "scheduler", "for", "simulated_annealing", "based", "on", "the", "aima", "example", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L165-L169
train
236,813
simpleai-team/simpleai
simpleai/search/local.py
simulated_annealing
def simulated_annealing(problem, schedule=_exp_schedule, iterations_limit=0, viewer=None): ''' Simulated annealing. schedule is the scheduling function that decides the chance to choose worst nodes depending on the time. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _create_simulated_annealing_expander(schedule), iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=iterations_limit==0, viewer=viewer)
python
def simulated_annealing(problem, schedule=_exp_schedule, iterations_limit=0, viewer=None): ''' Simulated annealing. schedule is the scheduling function that decides the chance to choose worst nodes depending on the time. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _create_simulated_annealing_expander(schedule), iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=iterations_limit==0, viewer=viewer)
[ "def", "simulated_annealing", "(", "problem", ",", "schedule", "=", "_exp_schedule", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_create_simulated_annealing_expander", "(", "schedule", ")", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "1", ",", "stop_when_no_better", "=", "iterations_limit", "==", "0", ",", "viewer", "=", "viewer", ")" ]
Simulated annealing. schedule is the scheduling function that decides the chance to choose worst nodes depending on the time. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
[ "Simulated", "annealing", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L198-L215
train
236,814
simpleai-team/simpleai
simpleai/search/local.py
_create_genetic_expander
def _create_genetic_expander(problem, mutation_chance): ''' Creates an expander that expands the bests nodes of the population, crossing over them. ''' def _expander(fringe, iteration, viewer): fitness = [x.value for x in fringe] sampler = InverseTransformSampler(fitness, fringe) new_generation = [] expanded_nodes = [] expanded_neighbors = [] for _ in fringe: node1 = sampler.sample() node2 = sampler.sample() child = problem.crossover(node1.state, node2.state) action = 'crossover' if random.random() < mutation_chance: # Noooouuu! she is... he is... *IT* is a mutant! child = problem.mutate(child) action += '+mutation' child_node = SearchNodeValueOrdered(state=child, problem=problem, action=action) new_generation.append(child_node) expanded_nodes.append(node1) expanded_neighbors.append([child_node]) expanded_nodes.append(node2) expanded_neighbors.append([child_node]) if viewer: viewer.event('expanded', expanded_nodes, expanded_neighbors) fringe.clear() for node in new_generation: fringe.append(node) return _expander
python
def _create_genetic_expander(problem, mutation_chance): ''' Creates an expander that expands the bests nodes of the population, crossing over them. ''' def _expander(fringe, iteration, viewer): fitness = [x.value for x in fringe] sampler = InverseTransformSampler(fitness, fringe) new_generation = [] expanded_nodes = [] expanded_neighbors = [] for _ in fringe: node1 = sampler.sample() node2 = sampler.sample() child = problem.crossover(node1.state, node2.state) action = 'crossover' if random.random() < mutation_chance: # Noooouuu! she is... he is... *IT* is a mutant! child = problem.mutate(child) action += '+mutation' child_node = SearchNodeValueOrdered(state=child, problem=problem, action=action) new_generation.append(child_node) expanded_nodes.append(node1) expanded_neighbors.append([child_node]) expanded_nodes.append(node2) expanded_neighbors.append([child_node]) if viewer: viewer.event('expanded', expanded_nodes, expanded_neighbors) fringe.clear() for node in new_generation: fringe.append(node) return _expander
[ "def", "_create_genetic_expander", "(", "problem", ",", "mutation_chance", ")", ":", "def", "_expander", "(", "fringe", ",", "iteration", ",", "viewer", ")", ":", "fitness", "=", "[", "x", ".", "value", "for", "x", "in", "fringe", "]", "sampler", "=", "InverseTransformSampler", "(", "fitness", ",", "fringe", ")", "new_generation", "=", "[", "]", "expanded_nodes", "=", "[", "]", "expanded_neighbors", "=", "[", "]", "for", "_", "in", "fringe", ":", "node1", "=", "sampler", ".", "sample", "(", ")", "node2", "=", "sampler", ".", "sample", "(", ")", "child", "=", "problem", ".", "crossover", "(", "node1", ".", "state", ",", "node2", ".", "state", ")", "action", "=", "'crossover'", "if", "random", ".", "random", "(", ")", "<", "mutation_chance", ":", "# Noooouuu! she is... he is... *IT* is a mutant!", "child", "=", "problem", ".", "mutate", "(", "child", ")", "action", "+=", "'+mutation'", "child_node", "=", "SearchNodeValueOrdered", "(", "state", "=", "child", ",", "problem", "=", "problem", ",", "action", "=", "action", ")", "new_generation", ".", "append", "(", "child_node", ")", "expanded_nodes", ".", "append", "(", "node1", ")", "expanded_neighbors", ".", "append", "(", "[", "child_node", "]", ")", "expanded_nodes", ".", "append", "(", "node2", ")", "expanded_neighbors", ".", "append", "(", "[", "child_node", "]", ")", "if", "viewer", ":", "viewer", ".", "event", "(", "'expanded'", ",", "expanded_nodes", ",", "expanded_neighbors", ")", "fringe", ".", "clear", "(", ")", "for", "node", "in", "new_generation", ":", "fringe", ".", "append", "(", "node", ")", "return", "_expander" ]
Creates an expander that expands the bests nodes of the population, crossing over them.
[ "Creates", "an", "expander", "that", "expands", "the", "bests", "nodes", "of", "the", "population", "crossing", "over", "them", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L218-L256
train
236,815
simpleai-team/simpleai
simpleai/search/local.py
genetic
def genetic(problem, population_size=100, mutation_chance=0.1, iterations_limit=0, viewer=None): ''' Genetic search. population_size specifies the size of the population (ORLY). mutation_chance specifies the probability of a mutation on a child, varying from 0 to 1. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.generate_random_state, SearchProblem.crossover, SearchProblem.mutate and SearchProblem.value. ''' return _local_search(problem, _create_genetic_expander(problem, mutation_chance), iterations_limit=iterations_limit, fringe_size=population_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
python
def genetic(problem, population_size=100, mutation_chance=0.1, iterations_limit=0, viewer=None): ''' Genetic search. population_size specifies the size of the population (ORLY). mutation_chance specifies the probability of a mutation on a child, varying from 0 to 1. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.generate_random_state, SearchProblem.crossover, SearchProblem.mutate and SearchProblem.value. ''' return _local_search(problem, _create_genetic_expander(problem, mutation_chance), iterations_limit=iterations_limit, fringe_size=population_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
[ "def", "genetic", "(", "problem", ",", "population_size", "=", "100", ",", "mutation_chance", "=", "0.1", ",", "iterations_limit", "=", "0", ",", "viewer", "=", "None", ")", ":", "return", "_local_search", "(", "problem", ",", "_create_genetic_expander", "(", "problem", ",", "mutation_chance", ")", ",", "iterations_limit", "=", "iterations_limit", ",", "fringe_size", "=", "population_size", ",", "random_initial_states", "=", "True", ",", "stop_when_no_better", "=", "iterations_limit", "==", "0", ",", "viewer", "=", "viewer", ")" ]
Genetic search. population_size specifies the size of the population (ORLY). mutation_chance specifies the probability of a mutation on a child, varying from 0 to 1. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.generate_random_state, SearchProblem.crossover, SearchProblem.mutate and SearchProblem.value.
[ "Genetic", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L259-L279
train
236,816
simpleai-team/simpleai
simpleai/search/local.py
_local_search
def _local_search(problem, fringe_expander, iterations_limit=0, fringe_size=1, random_initial_states=False, stop_when_no_better=True, viewer=None): ''' Basic algorithm for all local search algorithms. ''' if viewer: viewer.event('started') fringe = BoundedPriorityQueue(fringe_size) if random_initial_states: for _ in range(fringe_size): s = problem.generate_random_state() fringe.append(SearchNodeValueOrdered(state=s, problem=problem)) else: fringe.append(SearchNodeValueOrdered(state=problem.initial_state, problem=problem)) finish_reason = '' iteration = 0 run = True best = None while run: if viewer: viewer.event('new_iteration', list(fringe)) old_best = fringe[0] fringe_expander(fringe, iteration, viewer) best = fringe[0] iteration += 1 if iterations_limit and iteration >= iterations_limit: run = False finish_reason = 'reaching iteration limit' elif old_best.value >= best.value and stop_when_no_better: run = False finish_reason = 'not being able to improve solution' if viewer: viewer.event('finished', fringe, best, 'returned after %s' % finish_reason) return best
python
def _local_search(problem, fringe_expander, iterations_limit=0, fringe_size=1, random_initial_states=False, stop_when_no_better=True, viewer=None): ''' Basic algorithm for all local search algorithms. ''' if viewer: viewer.event('started') fringe = BoundedPriorityQueue(fringe_size) if random_initial_states: for _ in range(fringe_size): s = problem.generate_random_state() fringe.append(SearchNodeValueOrdered(state=s, problem=problem)) else: fringe.append(SearchNodeValueOrdered(state=problem.initial_state, problem=problem)) finish_reason = '' iteration = 0 run = True best = None while run: if viewer: viewer.event('new_iteration', list(fringe)) old_best = fringe[0] fringe_expander(fringe, iteration, viewer) best = fringe[0] iteration += 1 if iterations_limit and iteration >= iterations_limit: run = False finish_reason = 'reaching iteration limit' elif old_best.value >= best.value and stop_when_no_better: run = False finish_reason = 'not being able to improve solution' if viewer: viewer.event('finished', fringe, best, 'returned after %s' % finish_reason) return best
[ "def", "_local_search", "(", "problem", ",", "fringe_expander", ",", "iterations_limit", "=", "0", ",", "fringe_size", "=", "1", ",", "random_initial_states", "=", "False", ",", "stop_when_no_better", "=", "True", ",", "viewer", "=", "None", ")", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'started'", ")", "fringe", "=", "BoundedPriorityQueue", "(", "fringe_size", ")", "if", "random_initial_states", ":", "for", "_", "in", "range", "(", "fringe_size", ")", ":", "s", "=", "problem", ".", "generate_random_state", "(", ")", "fringe", ".", "append", "(", "SearchNodeValueOrdered", "(", "state", "=", "s", ",", "problem", "=", "problem", ")", ")", "else", ":", "fringe", ".", "append", "(", "SearchNodeValueOrdered", "(", "state", "=", "problem", ".", "initial_state", ",", "problem", "=", "problem", ")", ")", "finish_reason", "=", "''", "iteration", "=", "0", "run", "=", "True", "best", "=", "None", "while", "run", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'new_iteration'", ",", "list", "(", "fringe", ")", ")", "old_best", "=", "fringe", "[", "0", "]", "fringe_expander", "(", "fringe", ",", "iteration", ",", "viewer", ")", "best", "=", "fringe", "[", "0", "]", "iteration", "+=", "1", "if", "iterations_limit", "and", "iteration", ">=", "iterations_limit", ":", "run", "=", "False", "finish_reason", "=", "'reaching iteration limit'", "elif", "old_best", ".", "value", ">=", "best", ".", "value", "and", "stop_when_no_better", ":", "run", "=", "False", "finish_reason", "=", "'not being able to improve solution'", "if", "viewer", ":", "viewer", ".", "event", "(", "'finished'", ",", "fringe", ",", "best", ",", "'returned after %s'", "%", "finish_reason", ")", "return", "best" ]
Basic algorithm for all local search algorithms.
[ "Basic", "algorithm", "for", "all", "local", "search", "algorithms", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L282-L325
train
236,817
simpleai-team/simpleai
samples/search/eight_puzzle.py
EigthPuzzleProblem.actions
def actions(self, state): '''Returns a list of the pieces we can move to the empty space.''' rows = string_to_list(state) row_e, col_e = find_location(rows, 'e') actions = [] if row_e > 0: actions.append(rows[row_e - 1][col_e]) if row_e < 2: actions.append(rows[row_e + 1][col_e]) if col_e > 0: actions.append(rows[row_e][col_e - 1]) if col_e < 2: actions.append(rows[row_e][col_e + 1]) return actions
python
def actions(self, state): '''Returns a list of the pieces we can move to the empty space.''' rows = string_to_list(state) row_e, col_e = find_location(rows, 'e') actions = [] if row_e > 0: actions.append(rows[row_e - 1][col_e]) if row_e < 2: actions.append(rows[row_e + 1][col_e]) if col_e > 0: actions.append(rows[row_e][col_e - 1]) if col_e < 2: actions.append(rows[row_e][col_e + 1]) return actions
[ "def", "actions", "(", "self", ",", "state", ")", ":", "rows", "=", "string_to_list", "(", "state", ")", "row_e", ",", "col_e", "=", "find_location", "(", "rows", ",", "'e'", ")", "actions", "=", "[", "]", "if", "row_e", ">", "0", ":", "actions", ".", "append", "(", "rows", "[", "row_e", "-", "1", "]", "[", "col_e", "]", ")", "if", "row_e", "<", "2", ":", "actions", ".", "append", "(", "rows", "[", "row_e", "+", "1", "]", "[", "col_e", "]", ")", "if", "col_e", ">", "0", ":", "actions", ".", "append", "(", "rows", "[", "row_e", "]", "[", "col_e", "-", "1", "]", ")", "if", "col_e", "<", "2", ":", "actions", ".", "append", "(", "rows", "[", "row_e", "]", "[", "col_e", "+", "1", "]", ")", "return", "actions" ]
Returns a list of the pieces we can move to the empty space.
[ "Returns", "a", "list", "of", "the", "pieces", "we", "can", "move", "to", "the", "empty", "space", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/eight_puzzle.py#L64-L79
train
236,818
simpleai-team/simpleai
simpleai/machine_learning/models.py
is_attribute
def is_attribute(method, name=None): """ Decorator for methods that are attributes. """ if name is None: name = method.__name__ method.is_attribute = True method.name = name return method
python
def is_attribute(method, name=None): """ Decorator for methods that are attributes. """ if name is None: name = method.__name__ method.is_attribute = True method.name = name return method
[ "def", "is_attribute", "(", "method", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "method", ".", "__name__", "method", ".", "is_attribute", "=", "True", "method", ".", "name", "=", "name", "return", "method" ]
Decorator for methods that are attributes.
[ "Decorator", "for", "methods", "that", "are", "attributes", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/models.py#L230-L238
train
236,819
simpleai-team/simpleai
simpleai/machine_learning/models.py
Classifier.load
def load(cls, filepath): """ Loads a pickled version of the classifier saved in `filepath` """ with open(filepath, "rb") as filehandler: classifier = pickle.load(filehandler) if not isinstance(classifier, Classifier): raise ValueError("Pickled object is not a Classifier") return classifier
python
def load(cls, filepath): """ Loads a pickled version of the classifier saved in `filepath` """ with open(filepath, "rb") as filehandler: classifier = pickle.load(filehandler) if not isinstance(classifier, Classifier): raise ValueError("Pickled object is not a Classifier") return classifier
[ "def", "load", "(", "cls", ",", "filepath", ")", ":", "with", "open", "(", "filepath", ",", "\"rb\"", ")", "as", "filehandler", ":", "classifier", "=", "pickle", ".", "load", "(", "filehandler", ")", "if", "not", "isinstance", "(", "classifier", ",", "Classifier", ")", ":", "raise", "ValueError", "(", "\"Pickled object is not a Classifier\"", ")", "return", "classifier" ]
Loads a pickled version of the classifier saved in `filepath`
[ "Loads", "a", "pickled", "version", "of", "the", "classifier", "saved", "in", "filepath" ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/models.py#L76-L86
train
236,820
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
tree_to_str
def tree_to_str(root): """ Returns a string representation of a decision tree with root node `root`. """ xs = [] for value, node, depth in iter_tree(root): template = "{indent}" if node is not root: template += "case={value}\t" if node.attribute is None: template += "result={result} -- P={prob:.2}" else: template += "split by {split}:\t" +\ "(partial result={result} -- P={prob:.2})" line = template.format(indent=" " * depth, value=value, result=node.result[0], prob=node.result[1], split=str(node.attribute)) xs.append(line) return "\n".join(xs)
python
def tree_to_str(root): """ Returns a string representation of a decision tree with root node `root`. """ xs = [] for value, node, depth in iter_tree(root): template = "{indent}" if node is not root: template += "case={value}\t" if node.attribute is None: template += "result={result} -- P={prob:.2}" else: template += "split by {split}:\t" +\ "(partial result={result} -- P={prob:.2})" line = template.format(indent=" " * depth, value=value, result=node.result[0], prob=node.result[1], split=str(node.attribute)) xs.append(line) return "\n".join(xs)
[ "def", "tree_to_str", "(", "root", ")", ":", "xs", "=", "[", "]", "for", "value", ",", "node", ",", "depth", "in", "iter_tree", "(", "root", ")", ":", "template", "=", "\"{indent}\"", "if", "node", "is", "not", "root", ":", "template", "+=", "\"case={value}\\t\"", "if", "node", ".", "attribute", "is", "None", ":", "template", "+=", "\"result={result} -- P={prob:.2}\"", "else", ":", "template", "+=", "\"split by {split}:\\t\"", "+", "\"(partial result={result} -- P={prob:.2})\"", "line", "=", "template", ".", "format", "(", "indent", "=", "\" \"", "*", "depth", ",", "value", "=", "value", ",", "result", "=", "node", ".", "result", "[", "0", "]", ",", "prob", "=", "node", ".", "result", "[", "1", "]", ",", "split", "=", "str", "(", "node", ".", "attribute", ")", ")", "xs", ".", "append", "(", "line", ")", "return", "\"\\n\"", ".", "join", "(", "xs", ")" ]
Returns a string representation of a decision tree with root node `root`.
[ "Returns", "a", "string", "representation", "of", "a", "decision", "tree", "with", "root", "node", "root", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/classifiers.py#L216-L238
train
236,821
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
KNearestNeighbors.save
def save(self, filepath): """ Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator. """ if not filepath or not isinstance(filepath, str): raise ValueError("Invalid filepath") with open(filepath, "wb") as filehandler: pickle.dump(self, filehandler)
python
def save(self, filepath): """ Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator. """ if not filepath or not isinstance(filepath, str): raise ValueError("Invalid filepath") with open(filepath, "wb") as filehandler: pickle.dump(self, filehandler)
[ "def", "save", "(", "self", ",", "filepath", ")", ":", "if", "not", "filepath", "or", "not", "isinstance", "(", "filepath", ",", "str", ")", ":", "raise", "ValueError", "(", "\"Invalid filepath\"", ")", "with", "open", "(", "filepath", ",", "\"wb\"", ")", "as", "filehandler", ":", "pickle", ".", "dump", "(", "self", ",", "filehandler", ")" ]
Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator.
[ "Saves", "the", "classifier", "to", "filepath", ".", "Because", "this", "classifier", "needs", "to", "save", "the", "dataset", "it", "must", "be", "something", "that", "can", "be", "pickled", "and", "not", "something", "like", "an", "iterator", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/classifiers.py#L180-L192
train
236,822
simpleai-team/simpleai
simpleai/machine_learning/classifiers.py
DecisionTreeLearner_Queued._max_gain_split
def _max_gain_split(self, examples): """ Returns an OnlineInformationGain of the attribute with max gain based on `examples`. """ gains = self._new_set_of_gain_counters() for example in examples: for gain in gains: gain.add(example) winner = max(gains, key=lambda gain: gain.get_gain()) if not winner.get_target_class_counts(): raise ValueError("Dataset is empty") return winner
python
def _max_gain_split(self, examples): """ Returns an OnlineInformationGain of the attribute with max gain based on `examples`. """ gains = self._new_set_of_gain_counters() for example in examples: for gain in gains: gain.add(example) winner = max(gains, key=lambda gain: gain.get_gain()) if not winner.get_target_class_counts(): raise ValueError("Dataset is empty") return winner
[ "def", "_max_gain_split", "(", "self", ",", "examples", ")", ":", "gains", "=", "self", ".", "_new_set_of_gain_counters", "(", ")", "for", "example", "in", "examples", ":", "for", "gain", "in", "gains", ":", "gain", ".", "add", "(", "example", ")", "winner", "=", "max", "(", "gains", ",", "key", "=", "lambda", "gain", ":", "gain", ".", "get_gain", "(", ")", ")", "if", "not", "winner", ".", "get_target_class_counts", "(", ")", ":", "raise", "ValueError", "(", "\"Dataset is empty\"", ")", "return", "winner" ]
Returns an OnlineInformationGain of the attribute with max gain based on `examples`.
[ "Returns", "an", "OnlineInformationGain", "of", "the", "attribute", "with", "max", "gain", "based", "on", "examples", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/classifiers.py#L322-L334
train
236,823
simpleai-team/simpleai
simpleai/search/csp.py
backtrack
def backtrack(problem, variable_heuristic='', value_heuristic='', inference=True): ''' Backtracking search. variable_heuristic is the heuristic for variable choosing, can be MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple ordered choosing. value_heuristic is the heuristic for value choosing, can be LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing. ''' assignment = {} domains = deepcopy(problem.domains) if variable_heuristic == MOST_CONSTRAINED_VARIABLE: variable_chooser = _most_constrained_variable_chooser elif variable_heuristic == HIGHEST_DEGREE_VARIABLE: variable_chooser = _highest_degree_variable_chooser else: variable_chooser = _basic_variable_chooser if value_heuristic == LEAST_CONSTRAINING_VALUE: values_sorter = _least_constraining_values_sorter else: values_sorter = _basic_values_sorter return _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=inference)
python
def backtrack(problem, variable_heuristic='', value_heuristic='', inference=True): ''' Backtracking search. variable_heuristic is the heuristic for variable choosing, can be MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple ordered choosing. value_heuristic is the heuristic for value choosing, can be LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing. ''' assignment = {} domains = deepcopy(problem.domains) if variable_heuristic == MOST_CONSTRAINED_VARIABLE: variable_chooser = _most_constrained_variable_chooser elif variable_heuristic == HIGHEST_DEGREE_VARIABLE: variable_chooser = _highest_degree_variable_chooser else: variable_chooser = _basic_variable_chooser if value_heuristic == LEAST_CONSTRAINING_VALUE: values_sorter = _least_constraining_values_sorter else: values_sorter = _basic_values_sorter return _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=inference)
[ "def", "backtrack", "(", "problem", ",", "variable_heuristic", "=", "''", ",", "value_heuristic", "=", "''", ",", "inference", "=", "True", ")", ":", "assignment", "=", "{", "}", "domains", "=", "deepcopy", "(", "problem", ".", "domains", ")", "if", "variable_heuristic", "==", "MOST_CONSTRAINED_VARIABLE", ":", "variable_chooser", "=", "_most_constrained_variable_chooser", "elif", "variable_heuristic", "==", "HIGHEST_DEGREE_VARIABLE", ":", "variable_chooser", "=", "_highest_degree_variable_chooser", "else", ":", "variable_chooser", "=", "_basic_variable_chooser", "if", "value_heuristic", "==", "LEAST_CONSTRAINING_VALUE", ":", "values_sorter", "=", "_least_constraining_values_sorter", "else", ":", "values_sorter", "=", "_basic_values_sorter", "return", "_backtracking", "(", "problem", ",", "assignment", ",", "domains", ",", "variable_chooser", ",", "values_sorter", ",", "inference", "=", "inference", ")" ]
Backtracking search. variable_heuristic is the heuristic for variable choosing, can be MOST_CONSTRAINED_VARIABLE, HIGHEST_DEGREE_VARIABLE, or blank for simple ordered choosing. value_heuristic is the heuristic for value choosing, can be LEAST_CONSTRAINING_VALUE or blank for simple ordered choosing.
[ "Backtracking", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L12-L41
train
236,824
simpleai-team/simpleai
simpleai/search/csp.py
_most_constrained_variable_chooser
def _most_constrained_variable_chooser(problem, variables, domains): ''' Choose the variable that has less available values. ''' # the variable with fewer values available return sorted(variables, key=lambda v: len(domains[v]))[0]
python
def _most_constrained_variable_chooser(problem, variables, domains): ''' Choose the variable that has less available values. ''' # the variable with fewer values available return sorted(variables, key=lambda v: len(domains[v]))[0]
[ "def", "_most_constrained_variable_chooser", "(", "problem", ",", "variables", ",", "domains", ")", ":", "# the variable with fewer values available", "return", "sorted", "(", "variables", ",", "key", "=", "lambda", "v", ":", "len", "(", "domains", "[", "v", "]", ")", ")", "[", "0", "]" ]
Choose the variable that has less available values.
[ "Choose", "the", "variable", "that", "has", "less", "available", "values", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L51-L56
train
236,825
simpleai-team/simpleai
simpleai/search/csp.py
_highest_degree_variable_chooser
def _highest_degree_variable_chooser(problem, variables, domains): ''' Choose the variable that is involved on more constraints. ''' # the variable involved in more constraints return sorted(variables, key=lambda v: problem.var_degrees[v], reverse=True)[0]
python
def _highest_degree_variable_chooser(problem, variables, domains): ''' Choose the variable that is involved on more constraints. ''' # the variable involved in more constraints return sorted(variables, key=lambda v: problem.var_degrees[v], reverse=True)[0]
[ "def", "_highest_degree_variable_chooser", "(", "problem", ",", "variables", ",", "domains", ")", ":", "# the variable involved in more constraints", "return", "sorted", "(", "variables", ",", "key", "=", "lambda", "v", ":", "problem", ".", "var_degrees", "[", "v", "]", ",", "reverse", "=", "True", ")", "[", "0", "]" ]
Choose the variable that is involved on more constraints.
[ "Choose", "the", "variable", "that", "is", "involved", "on", "more", "constraints", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L59-L64
train
236,826
simpleai-team/simpleai
simpleai/search/csp.py
_count_conflicts
def _count_conflicts(problem, assignment, variable=None, value=None): ''' Count the number of violated constraints on a given assignment. ''' return len(_find_conflicts(problem, assignment, variable, value))
python
def _count_conflicts(problem, assignment, variable=None, value=None): ''' Count the number of violated constraints on a given assignment. ''' return len(_find_conflicts(problem, assignment, variable, value))
[ "def", "_count_conflicts", "(", "problem", ",", "assignment", ",", "variable", "=", "None", ",", "value", "=", "None", ")", ":", "return", "len", "(", "_find_conflicts", "(", "problem", ",", "assignment", ",", "variable", ",", "value", ")", ")" ]
Count the number of violated constraints on a given assignment.
[ "Count", "the", "number", "of", "violated", "constraints", "on", "a", "given", "assignment", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L67-L71
train
236,827
simpleai-team/simpleai
simpleai/search/csp.py
_find_conflicts
def _find_conflicts(problem, assignment, variable=None, value=None): ''' Find violated constraints on a given assignment, with the possibility of specifying a new variable and value to add to the assignment before checking. ''' if variable is not None and value is not None: assignment = deepcopy(assignment) assignment[variable] = value conflicts = [] for neighbors, constraint in problem.constraints: # if all the neighbors on the constraint have values, check if conflict if all(n in assignment for n in neighbors): if not _call_constraint(assignment, neighbors, constraint): conflicts.append((neighbors, constraint)) return conflicts
python
def _find_conflicts(problem, assignment, variable=None, value=None): ''' Find violated constraints on a given assignment, with the possibility of specifying a new variable and value to add to the assignment before checking. ''' if variable is not None and value is not None: assignment = deepcopy(assignment) assignment[variable] = value conflicts = [] for neighbors, constraint in problem.constraints: # if all the neighbors on the constraint have values, check if conflict if all(n in assignment for n in neighbors): if not _call_constraint(assignment, neighbors, constraint): conflicts.append((neighbors, constraint)) return conflicts
[ "def", "_find_conflicts", "(", "problem", ",", "assignment", ",", "variable", "=", "None", ",", "value", "=", "None", ")", ":", "if", "variable", "is", "not", "None", "and", "value", "is", "not", "None", ":", "assignment", "=", "deepcopy", "(", "assignment", ")", "assignment", "[", "variable", "]", "=", "value", "conflicts", "=", "[", "]", "for", "neighbors", ",", "constraint", "in", "problem", ".", "constraints", ":", "# if all the neighbors on the constraint have values, check if conflict", "if", "all", "(", "n", "in", "assignment", "for", "n", "in", "neighbors", ")", ":", "if", "not", "_call_constraint", "(", "assignment", ",", "neighbors", ",", "constraint", ")", ":", "conflicts", ".", "append", "(", "(", "neighbors", ",", "constraint", ")", ")", "return", "conflicts" ]
Find violated constraints on a given assignment, with the possibility of specifying a new variable and value to add to the assignment before checking.
[ "Find", "violated", "constraints", "on", "a", "given", "assignment", "with", "the", "possibility", "of", "specifying", "a", "new", "variable", "and", "value", "to", "add", "to", "the", "assignment", "before", "checking", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L80-L97
train
236,828
simpleai-team/simpleai
simpleai/search/csp.py
_least_constraining_values_sorter
def _least_constraining_values_sorter(problem, assignment, variable, domains): ''' Sort values based on how many conflicts they generate if assigned. ''' # the value that generates less conflicts def update_assignment(value): new_assignment = deepcopy(assignment) new_assignment[variable] = value return new_assignment values = sorted(domains[variable][:], key=lambda v: _count_conflicts(problem, assignment, variable, v)) return values
python
def _least_constraining_values_sorter(problem, assignment, variable, domains): ''' Sort values based on how many conflicts they generate if assigned. ''' # the value that generates less conflicts def update_assignment(value): new_assignment = deepcopy(assignment) new_assignment[variable] = value return new_assignment values = sorted(domains[variable][:], key=lambda v: _count_conflicts(problem, assignment, variable, v)) return values
[ "def", "_least_constraining_values_sorter", "(", "problem", ",", "assignment", ",", "variable", ",", "domains", ")", ":", "# the value that generates less conflicts", "def", "update_assignment", "(", "value", ")", ":", "new_assignment", "=", "deepcopy", "(", "assignment", ")", "new_assignment", "[", "variable", "]", "=", "value", "return", "new_assignment", "values", "=", "sorted", "(", "domains", "[", "variable", "]", "[", ":", "]", ",", "key", "=", "lambda", "v", ":", "_count_conflicts", "(", "problem", ",", "assignment", ",", "variable", ",", "v", ")", ")", "return", "values" ]
Sort values based on how many conflicts they generate if assigned.
[ "Sort", "values", "based", "on", "how", "many", "conflicts", "they", "generate", "if", "assigned", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L107-L120
train
236,829
simpleai-team/simpleai
simpleai/search/csp.py
_backtracking
def _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=True): ''' Internal recursive backtracking algorithm. ''' from simpleai.search.arc import arc_consistency_3 if len(assignment) == len(problem.variables): return assignment pending = [v for v in problem.variables if v not in assignment] variable = variable_chooser(problem, pending, domains) values = values_sorter(problem, assignment, variable, domains) for value in values: new_assignment = deepcopy(assignment) new_assignment[variable] = value if not _count_conflicts(problem, new_assignment): # TODO on aima also checks if using fc new_domains = deepcopy(domains) new_domains[variable] = [value] if not inference or arc_consistency_3(new_domains, problem.constraints): result = _backtracking(problem, new_assignment, new_domains, variable_chooser, values_sorter, inference=inference) if result: return result return None
python
def _backtracking(problem, assignment, domains, variable_chooser, values_sorter, inference=True): ''' Internal recursive backtracking algorithm. ''' from simpleai.search.arc import arc_consistency_3 if len(assignment) == len(problem.variables): return assignment pending = [v for v in problem.variables if v not in assignment] variable = variable_chooser(problem, pending, domains) values = values_sorter(problem, assignment, variable, domains) for value in values: new_assignment = deepcopy(assignment) new_assignment[variable] = value if not _count_conflicts(problem, new_assignment): # TODO on aima also checks if using fc new_domains = deepcopy(domains) new_domains[variable] = [value] if not inference or arc_consistency_3(new_domains, problem.constraints): result = _backtracking(problem, new_assignment, new_domains, variable_chooser, values_sorter, inference=inference) if result: return result return None
[ "def", "_backtracking", "(", "problem", ",", "assignment", ",", "domains", ",", "variable_chooser", ",", "values_sorter", ",", "inference", "=", "True", ")", ":", "from", "simpleai", ".", "search", ".", "arc", "import", "arc_consistency_3", "if", "len", "(", "assignment", ")", "==", "len", "(", "problem", ".", "variables", ")", ":", "return", "assignment", "pending", "=", "[", "v", "for", "v", "in", "problem", ".", "variables", "if", "v", "not", "in", "assignment", "]", "variable", "=", "variable_chooser", "(", "problem", ",", "pending", ",", "domains", ")", "values", "=", "values_sorter", "(", "problem", ",", "assignment", ",", "variable", ",", "domains", ")", "for", "value", "in", "values", ":", "new_assignment", "=", "deepcopy", "(", "assignment", ")", "new_assignment", "[", "variable", "]", "=", "value", "if", "not", "_count_conflicts", "(", "problem", ",", "new_assignment", ")", ":", "# TODO on aima also checks if using fc", "new_domains", "=", "deepcopy", "(", "domains", ")", "new_domains", "[", "variable", "]", "=", "[", "value", "]", "if", "not", "inference", "or", "arc_consistency_3", "(", "new_domains", ",", "problem", ".", "constraints", ")", ":", "result", "=", "_backtracking", "(", "problem", ",", "new_assignment", ",", "new_domains", ",", "variable_chooser", ",", "values_sorter", ",", "inference", "=", "inference", ")", "if", "result", ":", "return", "result", "return", "None" ]
Internal recursive backtracking algorithm.
[ "Internal", "recursive", "backtracking", "algorithm", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L123-L155
train
236,830
simpleai-team/simpleai
simpleai/search/csp.py
_min_conflicts_value
def _min_conflicts_value(problem, assignment, variable): ''' Return the value generate the less number of conflicts. In case of tie, a random value is selected among this values subset. ''' return argmin(problem.domains[variable], lambda x: _count_conflicts(problem, assignment, variable, x))
python
def _min_conflicts_value(problem, assignment, variable): ''' Return the value generate the less number of conflicts. In case of tie, a random value is selected among this values subset. ''' return argmin(problem.domains[variable], lambda x: _count_conflicts(problem, assignment, variable, x))
[ "def", "_min_conflicts_value", "(", "problem", ",", "assignment", ",", "variable", ")", ":", "return", "argmin", "(", "problem", ".", "domains", "[", "variable", "]", ",", "lambda", "x", ":", "_count_conflicts", "(", "problem", ",", "assignment", ",", "variable", ",", "x", ")", ")" ]
Return the value generate the less number of conflicts. In case of tie, a random value is selected among this values subset.
[ "Return", "the", "value", "generate", "the", "less", "number", "of", "conflicts", ".", "In", "case", "of", "tie", "a", "random", "value", "is", "selected", "among", "this", "values", "subset", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L158-L163
train
236,831
simpleai-team/simpleai
simpleai/search/csp.py
min_conflicts
def min_conflicts(problem, initial_assignment=None, iterations_limit=0): """ Min conflicts search. initial_assignment the initial assignment, or None to generate a random one. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until if finds an assignment that doesn't generate conflicts (a solution). """ assignment = {} if initial_assignment: assignment.update(initial_assignment) else: for variable in problem.variables: value = _min_conflicts_value(problem, assignment, variable) assignment[variable] = value iteration = 0 run = True while run: conflicts = _find_conflicts(problem, assignment) conflict_variables = [v for v in problem.variables if any(v in conflict[0] for conflict in conflicts)] if conflict_variables: variable = random.choice(conflict_variables) value = _min_conflicts_value(problem, assignment, variable) assignment[variable] = value iteration += 1 if iterations_limit and iteration >= iterations_limit: run = False elif not _count_conflicts(problem, assignment): run = False return assignment
python
def min_conflicts(problem, initial_assignment=None, iterations_limit=0): """ Min conflicts search. initial_assignment the initial assignment, or None to generate a random one. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until if finds an assignment that doesn't generate conflicts (a solution). """ assignment = {} if initial_assignment: assignment.update(initial_assignment) else: for variable in problem.variables: value = _min_conflicts_value(problem, assignment, variable) assignment[variable] = value iteration = 0 run = True while run: conflicts = _find_conflicts(problem, assignment) conflict_variables = [v for v in problem.variables if any(v in conflict[0] for conflict in conflicts)] if conflict_variables: variable = random.choice(conflict_variables) value = _min_conflicts_value(problem, assignment, variable) assignment[variable] = value iteration += 1 if iterations_limit and iteration >= iterations_limit: run = False elif not _count_conflicts(problem, assignment): run = False return assignment
[ "def", "min_conflicts", "(", "problem", ",", "initial_assignment", "=", "None", ",", "iterations_limit", "=", "0", ")", ":", "assignment", "=", "{", "}", "if", "initial_assignment", ":", "assignment", ".", "update", "(", "initial_assignment", ")", "else", ":", "for", "variable", "in", "problem", ".", "variables", ":", "value", "=", "_min_conflicts_value", "(", "problem", ",", "assignment", ",", "variable", ")", "assignment", "[", "variable", "]", "=", "value", "iteration", "=", "0", "run", "=", "True", "while", "run", ":", "conflicts", "=", "_find_conflicts", "(", "problem", ",", "assignment", ")", "conflict_variables", "=", "[", "v", "for", "v", "in", "problem", ".", "variables", "if", "any", "(", "v", "in", "conflict", "[", "0", "]", "for", "conflict", "in", "conflicts", ")", "]", "if", "conflict_variables", ":", "variable", "=", "random", ".", "choice", "(", "conflict_variables", ")", "value", "=", "_min_conflicts_value", "(", "problem", ",", "assignment", ",", "variable", ")", "assignment", "[", "variable", "]", "=", "value", "iteration", "+=", "1", "if", "iterations_limit", "and", "iteration", ">=", "iterations_limit", ":", "run", "=", "False", "elif", "not", "_count_conflicts", "(", "problem", ",", "assignment", ")", ":", "run", "=", "False", "return", "assignment" ]
Min conflicts search. initial_assignment the initial assignment, or None to generate a random one. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until if finds an assignment that doesn't generate conflicts (a solution).
[ "Min", "conflicts", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L166-L204
train
236,832
simpleai-team/simpleai
simpleai/search/csp.py
convert_to_binary
def convert_to_binary(variables, domains, constraints): """ Returns new constraint list, all binary, using hidden variables. You can use it as previous step when creating a problem. """ def wdiff(vars_): def diff(variables, values): hidden, other = variables if hidden.startswith('hidden'): idx = vars_.index(other) return values[1] == values[0][idx] else: idx = vars_.index(hidden) return values[0] == values[1][idx] diff.no_wrap = True # so it's not wrapped to swap values return diff new_constraints = [] new_domains = copy(domains) new_variables = list(variables) last = 0 for vars_, const in constraints: if len(vars_) == 2: new_constraints.append((vars_, const)) continue hidden = 'hidden%d' % last new_variables.append(hidden) last += 1 new_domains[hidden] = [t for t in product(*map(domains.get, vars_)) if const(vars_, t)] for var in vars_: new_constraints.append(((hidden, var), wdiff(vars_))) return new_variables, new_domains, new_constraints
python
def convert_to_binary(variables, domains, constraints): """ Returns new constraint list, all binary, using hidden variables. You can use it as previous step when creating a problem. """ def wdiff(vars_): def diff(variables, values): hidden, other = variables if hidden.startswith('hidden'): idx = vars_.index(other) return values[1] == values[0][idx] else: idx = vars_.index(hidden) return values[0] == values[1][idx] diff.no_wrap = True # so it's not wrapped to swap values return diff new_constraints = [] new_domains = copy(domains) new_variables = list(variables) last = 0 for vars_, const in constraints: if len(vars_) == 2: new_constraints.append((vars_, const)) continue hidden = 'hidden%d' % last new_variables.append(hidden) last += 1 new_domains[hidden] = [t for t in product(*map(domains.get, vars_)) if const(vars_, t)] for var in vars_: new_constraints.append(((hidden, var), wdiff(vars_))) return new_variables, new_domains, new_constraints
[ "def", "convert_to_binary", "(", "variables", ",", "domains", ",", "constraints", ")", ":", "def", "wdiff", "(", "vars_", ")", ":", "def", "diff", "(", "variables", ",", "values", ")", ":", "hidden", ",", "other", "=", "variables", "if", "hidden", ".", "startswith", "(", "'hidden'", ")", ":", "idx", "=", "vars_", ".", "index", "(", "other", ")", "return", "values", "[", "1", "]", "==", "values", "[", "0", "]", "[", "idx", "]", "else", ":", "idx", "=", "vars_", ".", "index", "(", "hidden", ")", "return", "values", "[", "0", "]", "==", "values", "[", "1", "]", "[", "idx", "]", "diff", ".", "no_wrap", "=", "True", "# so it's not wrapped to swap values", "return", "diff", "new_constraints", "=", "[", "]", "new_domains", "=", "copy", "(", "domains", ")", "new_variables", "=", "list", "(", "variables", ")", "last", "=", "0", "for", "vars_", ",", "const", "in", "constraints", ":", "if", "len", "(", "vars_", ")", "==", "2", ":", "new_constraints", ".", "append", "(", "(", "vars_", ",", "const", ")", ")", "continue", "hidden", "=", "'hidden%d'", "%", "last", "new_variables", ".", "append", "(", "hidden", ")", "last", "+=", "1", "new_domains", "[", "hidden", "]", "=", "[", "t", "for", "t", "in", "product", "(", "*", "map", "(", "domains", ".", "get", ",", "vars_", ")", ")", "if", "const", "(", "vars_", ",", "t", ")", "]", "for", "var", "in", "vars_", ":", "new_constraints", ".", "append", "(", "(", "(", "hidden", ",", "var", ")", ",", "wdiff", "(", "vars_", ")", ")", ")", "return", "new_variables", ",", "new_domains", ",", "new_constraints" ]
Returns new constraint list, all binary, using hidden variables. You can use it as previous step when creating a problem.
[ "Returns", "new", "constraint", "list", "all", "binary", "using", "hidden", "variables", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/csp.py#L207-L242
train
236,833
simpleai-team/simpleai
simpleai/machine_learning/reinforcement_learning.py
boltzmann_exploration
def boltzmann_exploration(actions, utilities, temperature, action_counter): '''returns an action with a probability depending on utilities and temperature''' utilities = [utilities[x] for x in actions] temperature = max(temperature, 0.01) _max = max(utilities) _min = min(utilities) if _max == _min: return random.choice(actions) utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities] probs = [u / sum(utilities) for u in utilities] i = 0 tot = probs[i] r = random.random() while i < len(actions) and r >= tot: i += 1 tot += probs[i] return actions[i]
python
def boltzmann_exploration(actions, utilities, temperature, action_counter): '''returns an action with a probability depending on utilities and temperature''' utilities = [utilities[x] for x in actions] temperature = max(temperature, 0.01) _max = max(utilities) _min = min(utilities) if _max == _min: return random.choice(actions) utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities] probs = [u / sum(utilities) for u in utilities] i = 0 tot = probs[i] r = random.random() while i < len(actions) and r >= tot: i += 1 tot += probs[i] return actions[i]
[ "def", "boltzmann_exploration", "(", "actions", ",", "utilities", ",", "temperature", ",", "action_counter", ")", ":", "utilities", "=", "[", "utilities", "[", "x", "]", "for", "x", "in", "actions", "]", "temperature", "=", "max", "(", "temperature", ",", "0.01", ")", "_max", "=", "max", "(", "utilities", ")", "_min", "=", "min", "(", "utilities", ")", "if", "_max", "==", "_min", ":", "return", "random", ".", "choice", "(", "actions", ")", "utilities", "=", "[", "math", ".", "exp", "(", "(", "(", "u", "-", "_min", ")", "/", "(", "_max", "-", "_min", ")", ")", "/", "temperature", ")", "for", "u", "in", "utilities", "]", "probs", "=", "[", "u", "/", "sum", "(", "utilities", ")", "for", "u", "in", "utilities", "]", "i", "=", "0", "tot", "=", "probs", "[", "i", "]", "r", "=", "random", ".", "random", "(", ")", "while", "i", "<", "len", "(", "actions", ")", "and", "r", ">=", "tot", ":", "i", "+=", "1", "tot", "+=", "probs", "[", "i", "]", "return", "actions", "[", "i", "]" ]
returns an action with a probability depending on utilities and temperature
[ "returns", "an", "action", "with", "a", "probability", "depending", "on", "utilities", "and", "temperature" ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/reinforcement_learning.py#L28-L45
train
236,834
simpleai-team/simpleai
samples/search/sudoku.py
mkconstraints
def mkconstraints(): """ Make constraint list for binary constraint problem. """ constraints = [] for j in range(1, 10): vars = ["%s%d" % (i, j) for i in uppercase[:9]] constraints.extend((c, const_different) for c in combinations(vars, 2)) for i in uppercase[:9]: vars = ["%s%d" % (i, j) for j in range(1, 10)] constraints.extend((c, const_different) for c in combinations(vars, 2)) for b0 in ['ABC', 'DEF', 'GHI']: for b1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]: vars = ["%s%d" % (i, j) for i in b0 for j in b1] l = list((c, const_different) for c in combinations(vars, 2)) constraints.extend(l) return constraints
python
def mkconstraints(): """ Make constraint list for binary constraint problem. """ constraints = [] for j in range(1, 10): vars = ["%s%d" % (i, j) for i in uppercase[:9]] constraints.extend((c, const_different) for c in combinations(vars, 2)) for i in uppercase[:9]: vars = ["%s%d" % (i, j) for j in range(1, 10)] constraints.extend((c, const_different) for c in combinations(vars, 2)) for b0 in ['ABC', 'DEF', 'GHI']: for b1 in [[1, 2, 3], [4, 5, 6], [7, 8, 9]]: vars = ["%s%d" % (i, j) for i in b0 for j in b1] l = list((c, const_different) for c in combinations(vars, 2)) constraints.extend(l) return constraints
[ "def", "mkconstraints", "(", ")", ":", "constraints", "=", "[", "]", "for", "j", "in", "range", "(", "1", ",", "10", ")", ":", "vars", "=", "[", "\"%s%d\"", "%", "(", "i", ",", "j", ")", "for", "i", "in", "uppercase", "[", ":", "9", "]", "]", "constraints", ".", "extend", "(", "(", "c", ",", "const_different", ")", "for", "c", "in", "combinations", "(", "vars", ",", "2", ")", ")", "for", "i", "in", "uppercase", "[", ":", "9", "]", ":", "vars", "=", "[", "\"%s%d\"", "%", "(", "i", ",", "j", ")", "for", "j", "in", "range", "(", "1", ",", "10", ")", "]", "constraints", ".", "extend", "(", "(", "c", ",", "const_different", ")", "for", "c", "in", "combinations", "(", "vars", ",", "2", ")", ")", "for", "b0", "in", "[", "'ABC'", ",", "'DEF'", ",", "'GHI'", "]", ":", "for", "b1", "in", "[", "[", "1", ",", "2", ",", "3", "]", ",", "[", "4", ",", "5", ",", "6", "]", ",", "[", "7", ",", "8", ",", "9", "]", "]", ":", "vars", "=", "[", "\"%s%d\"", "%", "(", "i", ",", "j", ")", "for", "i", "in", "b0", "for", "j", "in", "b1", "]", "l", "=", "list", "(", "(", "c", ",", "const_different", ")", "for", "c", "in", "combinations", "(", "vars", ",", "2", ")", ")", "constraints", ".", "extend", "(", "l", ")", "return", "constraints" ]
Make constraint list for binary constraint problem.
[ "Make", "constraint", "list", "for", "binary", "constraint", "problem", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/sudoku.py#L57-L77
train
236,835
simpleai-team/simpleai
simpleai/machine_learning/evaluation.py
precision
def precision(classifier, testset): """ Runs the classifier for each example in `testset` and verifies that the classification is correct using the `target`. Returns a number between 0.0 and 1.0 with the precision of classification for this test set. """ hit = 0 total = 0 for example in testset: if classifier.classify(example)[0] == classifier.target(example): hit += 1 total += 1 if total == 0: raise ValueError("Empty testset!") return hit / float(total)
python
def precision(classifier, testset): """ Runs the classifier for each example in `testset` and verifies that the classification is correct using the `target`. Returns a number between 0.0 and 1.0 with the precision of classification for this test set. """ hit = 0 total = 0 for example in testset: if classifier.classify(example)[0] == classifier.target(example): hit += 1 total += 1 if total == 0: raise ValueError("Empty testset!") return hit / float(total)
[ "def", "precision", "(", "classifier", ",", "testset", ")", ":", "hit", "=", "0", "total", "=", "0", "for", "example", "in", "testset", ":", "if", "classifier", ".", "classify", "(", "example", ")", "[", "0", "]", "==", "classifier", ".", "target", "(", "example", ")", ":", "hit", "+=", "1", "total", "+=", "1", "if", "total", "==", "0", ":", "raise", "ValueError", "(", "\"Empty testset!\"", ")", "return", "hit", "/", "float", "(", "total", ")" ]
Runs the classifier for each example in `testset` and verifies that the classification is correct using the `target`. Returns a number between 0.0 and 1.0 with the precision of classification for this test set.
[ "Runs", "the", "classifier", "for", "each", "example", "in", "testset", "and", "verifies", "that", "the", "classification", "is", "correct", "using", "the", "target", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/evaluation.py#L12-L30
train
236,836
simpleai-team/simpleai
simpleai/machine_learning/evaluation.py
kfold
def kfold(dataset, problem, method, k=10): """ Does a k-fold on `dataset` with `method`. This is, it randomly creates k-partitions of the dataset, and k-times trains the method with k-1 parts and runs it with the partition left. After all this, returns the overall success ratio. """ if k <= 1: raise ValueError("k argument must be at least 2") dataset = list(dataset) random.shuffle(dataset) trials = 0 positive = 0 for i in range(k): train = [x for j, x in enumerate(dataset) if j % k != i] test = [x for j, x in enumerate(dataset) if j % k == i] classifier = method(train, problem) for data in test: trials += 1 result = classifier.classify(data) if result is not None and result[0] == problem.target(data): positive += 1 return float(positive) / float(trials)
python
def kfold(dataset, problem, method, k=10): """ Does a k-fold on `dataset` with `method`. This is, it randomly creates k-partitions of the dataset, and k-times trains the method with k-1 parts and runs it with the partition left. After all this, returns the overall success ratio. """ if k <= 1: raise ValueError("k argument must be at least 2") dataset = list(dataset) random.shuffle(dataset) trials = 0 positive = 0 for i in range(k): train = [x for j, x in enumerate(dataset) if j % k != i] test = [x for j, x in enumerate(dataset) if j % k == i] classifier = method(train, problem) for data in test: trials += 1 result = classifier.classify(data) if result is not None and result[0] == problem.target(data): positive += 1 return float(positive) / float(trials)
[ "def", "kfold", "(", "dataset", ",", "problem", ",", "method", ",", "k", "=", "10", ")", ":", "if", "k", "<=", "1", ":", "raise", "ValueError", "(", "\"k argument must be at least 2\"", ")", "dataset", "=", "list", "(", "dataset", ")", "random", ".", "shuffle", "(", "dataset", ")", "trials", "=", "0", "positive", "=", "0", "for", "i", "in", "range", "(", "k", ")", ":", "train", "=", "[", "x", "for", "j", ",", "x", "in", "enumerate", "(", "dataset", ")", "if", "j", "%", "k", "!=", "i", "]", "test", "=", "[", "x", "for", "j", ",", "x", "in", "enumerate", "(", "dataset", ")", "if", "j", "%", "k", "==", "i", "]", "classifier", "=", "method", "(", "train", ",", "problem", ")", "for", "data", "in", "test", ":", "trials", "+=", "1", "result", "=", "classifier", ".", "classify", "(", "data", ")", "if", "result", "is", "not", "None", "and", "result", "[", "0", "]", "==", "problem", ".", "target", "(", "data", ")", ":", "positive", "+=", "1", "return", "float", "(", "positive", ")", "/", "float", "(", "trials", ")" ]
Does a k-fold on `dataset` with `method`. This is, it randomly creates k-partitions of the dataset, and k-times trains the method with k-1 parts and runs it with the partition left. After all this, returns the overall success ratio.
[ "Does", "a", "k", "-", "fold", "on", "dataset", "with", "method", ".", "This", "is", "it", "randomly", "creates", "k", "-", "partitions", "of", "the", "dataset", "and", "k", "-", "times", "trains", "the", "method", "with", "k", "-", "1", "parts", "and", "runs", "it", "with", "the", "partition", "left", ".", "After", "all", "this", "returns", "the", "overall", "success", "ratio", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/evaluation.py#L33-L59
train
236,837
simpleai-team/simpleai
samples/machine_learning/tic_tac_toe.py
TicTacToeProblem.actions
def actions(self, state): 'actions are index where we can make a move' actions = [] for index, char in enumerate(state): if char == '_': actions.append(index) return actions
python
def actions(self, state): 'actions are index where we can make a move' actions = [] for index, char in enumerate(state): if char == '_': actions.append(index) return actions
[ "def", "actions", "(", "self", ",", "state", ")", ":", "actions", "=", "[", "]", "for", "index", ",", "char", "in", "enumerate", "(", "state", ")", ":", "if", "char", "==", "'_'", ":", "actions", ".", "append", "(", "index", ")", "return", "actions" ]
actions are index where we can make a move
[ "actions", "are", "index", "where", "we", "can", "make", "a", "move" ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/machine_learning/tic_tac_toe.py#L14-L20
train
236,838
simpleai-team/simpleai
samples/search/missioners.py
MissionersProblem.actions
def actions(self, s): '''Possible actions from a state.''' # we try to generate every possible state and then filter those # states that are valid return [a for a in self._actions if self._is_valid(self.result(s, a))]
python
def actions(self, s): '''Possible actions from a state.''' # we try to generate every possible state and then filter those # states that are valid return [a for a in self._actions if self._is_valid(self.result(s, a))]
[ "def", "actions", "(", "self", ",", "s", ")", ":", "# we try to generate every possible state and then filter those", "# states that are valid", "return", "[", "a", "for", "a", "in", "self", ".", "_actions", "if", "self", ".", "_is_valid", "(", "self", ".", "result", "(", "s", ",", "a", ")", ")", "]" ]
Possible actions from a state.
[ "Possible", "actions", "from", "a", "state", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/missioners.py#L21-L25
train
236,839
simpleai-team/simpleai
samples/search/missioners.py
MissionersProblem._is_valid
def _is_valid(self, s): '''Check if a state is valid.''' # valid states: no more cannibals than missioners on each side, # and numbers between 0 and 3 return ((s[0] >= s[1] or s[0] == 0)) and \ ((3 - s[0]) >= (3 - s[1]) or s[0] == 3) and \ (0 <= s[0] <= 3) and \ (0 <= s[1] <= 3)
python
def _is_valid(self, s): '''Check if a state is valid.''' # valid states: no more cannibals than missioners on each side, # and numbers between 0 and 3 return ((s[0] >= s[1] or s[0] == 0)) and \ ((3 - s[0]) >= (3 - s[1]) or s[0] == 3) and \ (0 <= s[0] <= 3) and \ (0 <= s[1] <= 3)
[ "def", "_is_valid", "(", "self", ",", "s", ")", ":", "# valid states: no more cannibals than missioners on each side,", "# and numbers between 0 and 3", "return", "(", "(", "s", "[", "0", "]", ">=", "s", "[", "1", "]", "or", "s", "[", "0", "]", "==", "0", ")", ")", "and", "(", "(", "3", "-", "s", "[", "0", "]", ")", ">=", "(", "3", "-", "s", "[", "1", "]", ")", "or", "s", "[", "0", "]", "==", "3", ")", "and", "(", "0", "<=", "s", "[", "0", "]", "<=", "3", ")", "and", "(", "0", "<=", "s", "[", "1", "]", "<=", "3", ")" ]
Check if a state is valid.
[ "Check", "if", "a", "state", "is", "valid", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/missioners.py#L27-L34
train
236,840
simpleai-team/simpleai
samples/search/missioners.py
MissionersProblem.result
def result(self, s, a): '''Result of applying an action to a state.''' # result: boat on opposite side, and numbers of missioners and # cannibals updated according to the move if s[2] == 0: return (s[0] - a[1][0], s[1] - a[1][1], 1) else: return (s[0] + a[1][0], s[1] + a[1][1], 0)
python
def result(self, s, a): '''Result of applying an action to a state.''' # result: boat on opposite side, and numbers of missioners and # cannibals updated according to the move if s[2] == 0: return (s[0] - a[1][0], s[1] - a[1][1], 1) else: return (s[0] + a[1][0], s[1] + a[1][1], 0)
[ "def", "result", "(", "self", ",", "s", ",", "a", ")", ":", "# result: boat on opposite side, and numbers of missioners and", "# cannibals updated according to the move", "if", "s", "[", "2", "]", "==", "0", ":", "return", "(", "s", "[", "0", "]", "-", "a", "[", "1", "]", "[", "0", "]", ",", "s", "[", "1", "]", "-", "a", "[", "1", "]", "[", "1", "]", ",", "1", ")", "else", ":", "return", "(", "s", "[", "0", "]", "+", "a", "[", "1", "]", "[", "0", "]", ",", "s", "[", "1", "]", "+", "a", "[", "1", "]", "[", "1", "]", ",", "0", ")" ]
Result of applying an action to a state.
[ "Result", "of", "applying", "an", "action", "to", "a", "state", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/missioners.py#L36-L43
train
236,841
simpleai-team/simpleai
simpleai/search/arc.py
arc_consistency_3
def arc_consistency_3(domains, constraints): """ Makes a CSP problem arc consistent. Ignores any constraint that is not binary. """ arcs = list(all_arcs(constraints)) pending_arcs = set(arcs) while pending_arcs: x, y = pending_arcs.pop() if revise(domains, (x, y), constraints): if len(domains[x]) == 0: return False pending_arcs = pending_arcs.union((x2, y2) for x2, y2 in arcs if y2 == x) return True
python
def arc_consistency_3(domains, constraints): """ Makes a CSP problem arc consistent. Ignores any constraint that is not binary. """ arcs = list(all_arcs(constraints)) pending_arcs = set(arcs) while pending_arcs: x, y = pending_arcs.pop() if revise(domains, (x, y), constraints): if len(domains[x]) == 0: return False pending_arcs = pending_arcs.union((x2, y2) for x2, y2 in arcs if y2 == x) return True
[ "def", "arc_consistency_3", "(", "domains", ",", "constraints", ")", ":", "arcs", "=", "list", "(", "all_arcs", "(", "constraints", ")", ")", "pending_arcs", "=", "set", "(", "arcs", ")", "while", "pending_arcs", ":", "x", ",", "y", "=", "pending_arcs", ".", "pop", "(", ")", "if", "revise", "(", "domains", ",", "(", "x", ",", "y", ")", ",", "constraints", ")", ":", "if", "len", "(", "domains", "[", "x", "]", ")", "==", "0", ":", "return", "False", "pending_arcs", "=", "pending_arcs", ".", "union", "(", "(", "x2", ",", "y2", ")", "for", "x2", ",", "y2", "in", "arcs", "if", "y2", "==", "x", ")", "return", "True" ]
Makes a CSP problem arc consistent. Ignores any constraint that is not binary.
[ "Makes", "a", "CSP", "problem", "arc", "consistent", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/arc.py#L58-L74
train
236,842
simpleai-team/simpleai
simpleai/search/models.py
SearchNode.expand
def expand(self, local_search=False): '''Create successors.''' new_nodes = [] for action in self.problem.actions(self.state): new_state = self.problem.result(self.state, action) cost = self.problem.cost(self.state, action, new_state) nodefactory = self.__class__ new_nodes.append(nodefactory(state=new_state, parent=None if local_search else self, problem=self.problem, action=action, cost=self.cost + cost, depth=self.depth + 1)) return new_nodes
python
def expand(self, local_search=False): '''Create successors.''' new_nodes = [] for action in self.problem.actions(self.state): new_state = self.problem.result(self.state, action) cost = self.problem.cost(self.state, action, new_state) nodefactory = self.__class__ new_nodes.append(nodefactory(state=new_state, parent=None if local_search else self, problem=self.problem, action=action, cost=self.cost + cost, depth=self.depth + 1)) return new_nodes
[ "def", "expand", "(", "self", ",", "local_search", "=", "False", ")", ":", "new_nodes", "=", "[", "]", "for", "action", "in", "self", ".", "problem", ".", "actions", "(", "self", ".", "state", ")", ":", "new_state", "=", "self", ".", "problem", ".", "result", "(", "self", ".", "state", ",", "action", ")", "cost", "=", "self", ".", "problem", ".", "cost", "(", "self", ".", "state", ",", "action", ",", "new_state", ")", "nodefactory", "=", "self", ".", "__class__", "new_nodes", ".", "append", "(", "nodefactory", "(", "state", "=", "new_state", ",", "parent", "=", "None", "if", "local_search", "else", "self", ",", "problem", "=", "self", ".", "problem", ",", "action", "=", "action", ",", "cost", "=", "self", ".", "cost", "+", "cost", ",", "depth", "=", "self", ".", "depth", "+", "1", ")", ")", "return", "new_nodes" ]
Create successors.
[ "Create", "successors", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/models.py#L102-L117
train
236,843
simpleai-team/simpleai
simpleai/environments.py
Environment.step
def step(self, viewer=None): "This method evolves one step in time" if not self.is_completed(self.state): for agent in self.agents: action = agent.program(self.percept(agent, self.state)) next_state = self.do_action(self.state, action, agent) if viewer: viewer.event(self.state, action, next_state, agent) self.state = next_state if self.is_completed(self.state): return
python
def step(self, viewer=None): "This method evolves one step in time" if not self.is_completed(self.state): for agent in self.agents: action = agent.program(self.percept(agent, self.state)) next_state = self.do_action(self.state, action, agent) if viewer: viewer.event(self.state, action, next_state, agent) self.state = next_state if self.is_completed(self.state): return
[ "def", "step", "(", "self", ",", "viewer", "=", "None", ")", ":", "if", "not", "self", ".", "is_completed", "(", "self", ".", "state", ")", ":", "for", "agent", "in", "self", ".", "agents", ":", "action", "=", "agent", ".", "program", "(", "self", ".", "percept", "(", "agent", ",", "self", ".", "state", ")", ")", "next_state", "=", "self", ".", "do_action", "(", "self", ".", "state", ",", "action", ",", "agent", ")", "if", "viewer", ":", "viewer", ".", "event", "(", "self", ".", "state", ",", "action", ",", "next_state", ",", "agent", ")", "self", ".", "state", "=", "next_state", "if", "self", ".", "is_completed", "(", "self", ".", "state", ")", ":", "return" ]
This method evolves one step in time
[ "This", "method", "evolves", "one", "step", "in", "time" ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/environments.py#L18-L28
train
236,844
simpleai-team/simpleai
simpleai/search/traditional.py
breadth_first
def breadth_first(problem, graph_search=False, viewer=None): ''' Breadth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, FifoList(), graph_search=graph_search, viewer=viewer)
python
def breadth_first(problem, graph_search=False, viewer=None): ''' Breadth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, FifoList(), graph_search=graph_search, viewer=viewer)
[ "def", "breadth_first", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "return", "_search", "(", "problem", ",", "FifoList", "(", ")", ",", "graph_search", "=", "graph_search", ",", "viewer", "=", "viewer", ")" ]
Breadth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
[ "Breadth", "first", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L8-L19
train
236,845
simpleai-team/simpleai
simpleai/search/traditional.py
depth_first
def depth_first(problem, graph_search=False, viewer=None): ''' Depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, LifoList(), graph_search=graph_search, viewer=viewer)
python
def depth_first(problem, graph_search=False, viewer=None): ''' Depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, LifoList(), graph_search=graph_search, viewer=viewer)
[ "def", "depth_first", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "return", "_search", "(", "problem", ",", "LifoList", "(", ")", ",", "graph_search", "=", "graph_search", ",", "viewer", "=", "viewer", ")" ]
Depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
[ "Depth", "first", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L22-L33
train
236,846
simpleai-team/simpleai
simpleai/search/traditional.py
limited_depth_first
def limited_depth_first(problem, depth_limit, graph_search=False, viewer=None): ''' Limited depth first search. Depth_limit is the maximum depth allowed, being depth 0 the initial state. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, LifoList(), graph_search=graph_search, depth_limit=depth_limit, viewer=viewer)
python
def limited_depth_first(problem, depth_limit, graph_search=False, viewer=None): ''' Limited depth first search. Depth_limit is the maximum depth allowed, being depth 0 the initial state. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' return _search(problem, LifoList(), graph_search=graph_search, depth_limit=depth_limit, viewer=viewer)
[ "def", "limited_depth_first", "(", "problem", ",", "depth_limit", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "return", "_search", "(", "problem", ",", "LifoList", "(", ")", ",", "graph_search", "=", "graph_search", ",", "depth_limit", "=", "depth_limit", ",", "viewer", "=", "viewer", ")" ]
Limited depth first search. Depth_limit is the maximum depth allowed, being depth 0 the initial state. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
[ "Limited", "depth", "first", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L36-L49
train
236,847
simpleai-team/simpleai
simpleai/search/traditional.py
iterative_limited_depth_first
def iterative_limited_depth_first(problem, graph_search=False, viewer=None): ''' Iterative limited depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' solution = None limit = 0 while not solution: solution = limited_depth_first(problem, depth_limit=limit, graph_search=graph_search, viewer=viewer) limit += 1 if viewer: viewer.event('no_more_runs', solution, 'returned after %i runs' % limit) return solution
python
def iterative_limited_depth_first(problem, graph_search=False, viewer=None): ''' Iterative limited depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' solution = None limit = 0 while not solution: solution = limited_depth_first(problem, depth_limit=limit, graph_search=graph_search, viewer=viewer) limit += 1 if viewer: viewer.event('no_more_runs', solution, 'returned after %i runs' % limit) return solution
[ "def", "iterative_limited_depth_first", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "solution", "=", "None", "limit", "=", "0", "while", "not", "solution", ":", "solution", "=", "limited_depth_first", "(", "problem", ",", "depth_limit", "=", "limit", ",", "graph_search", "=", "graph_search", ",", "viewer", "=", "viewer", ")", "limit", "+=", "1", "if", "viewer", ":", "viewer", ".", "event", "(", "'no_more_runs'", ",", "solution", ",", "'returned after %i runs'", "%", "limit", ")", "return", "solution" ]
Iterative limited depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
[ "Iterative", "limited", "depth", "first", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L52-L73
train
236,848
simpleai-team/simpleai
simpleai/search/traditional.py
uniform_cost
def uniform_cost(problem, graph_search=False, viewer=None): ''' Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeCostOrdered, graph_replace_when_better=True, viewer=viewer)
python
def uniform_cost(problem, graph_search=False, viewer=None): ''' Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeCostOrdered, graph_replace_when_better=True, viewer=viewer)
[ "def", "uniform_cost", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "return", "_search", "(", "problem", ",", "BoundedPriorityQueue", "(", ")", ",", "graph_search", "=", "graph_search", ",", "node_factory", "=", "SearchNodeCostOrdered", ",", "graph_replace_when_better", "=", "True", ",", "viewer", "=", "viewer", ")" ]
Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost.
[ "Uniform", "cost", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L76-L89
train
236,849
simpleai-team/simpleai
simpleai/search/traditional.py
greedy
def greedy(problem, graph_search=False, viewer=None): ''' Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeHeuristicOrdered, graph_replace_when_better=True, viewer=viewer)
python
def greedy(problem, graph_search=False, viewer=None): ''' Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic. ''' return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeHeuristicOrdered, graph_replace_when_better=True, viewer=viewer)
[ "def", "greedy", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "return", "_search", "(", "problem", ",", "BoundedPriorityQueue", "(", ")", ",", "graph_search", "=", "graph_search", ",", "node_factory", "=", "SearchNodeHeuristicOrdered", ",", "graph_replace_when_better", "=", "True", ",", "viewer", "=", "viewer", ")" ]
Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic.
[ "Greedy", "search", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L92-L105
train
236,850
simpleai-team/simpleai
simpleai/search/traditional.py
_search
def _search(problem, fringe, graph_search=False, depth_limit=None, node_factory=SearchNode, graph_replace_when_better=False, viewer=None): ''' Basic search algorithm, base of all the other search algorithms. ''' if viewer: viewer.event('started') memory = set() initial_node = node_factory(state=problem.initial_state, problem=problem) fringe.append(initial_node) while fringe: if viewer: viewer.event('new_iteration', fringe.sorted()) node = fringe.pop() if problem.is_goal(node.state): if viewer: viewer.event('chosen_node', node, True) viewer.event('finished', fringe.sorted(), node, 'goal found') return node else: if viewer: viewer.event('chosen_node', node, False) memory.add(node.state) if depth_limit is None or node.depth < depth_limit: expanded = node.expand() if viewer: viewer.event('expanded', [node], [expanded]) for n in expanded: if graph_search: others = [x for x in fringe if x.state == n.state] assert len(others) in (0, 1) if n.state not in memory and len(others) == 0: fringe.append(n) elif graph_replace_when_better and len(others) > 0 and n < others[0]: fringe.remove(others[0]) fringe.append(n) else: fringe.append(n) if viewer: viewer.event('finished', fringe.sorted(), None, 'goal not found')
python
def _search(problem, fringe, graph_search=False, depth_limit=None, node_factory=SearchNode, graph_replace_when_better=False, viewer=None): ''' Basic search algorithm, base of all the other search algorithms. ''' if viewer: viewer.event('started') memory = set() initial_node = node_factory(state=problem.initial_state, problem=problem) fringe.append(initial_node) while fringe: if viewer: viewer.event('new_iteration', fringe.sorted()) node = fringe.pop() if problem.is_goal(node.state): if viewer: viewer.event('chosen_node', node, True) viewer.event('finished', fringe.sorted(), node, 'goal found') return node else: if viewer: viewer.event('chosen_node', node, False) memory.add(node.state) if depth_limit is None or node.depth < depth_limit: expanded = node.expand() if viewer: viewer.event('expanded', [node], [expanded]) for n in expanded: if graph_search: others = [x for x in fringe if x.state == n.state] assert len(others) in (0, 1) if n.state not in memory and len(others) == 0: fringe.append(n) elif graph_replace_when_better and len(others) > 0 and n < others[0]: fringe.remove(others[0]) fringe.append(n) else: fringe.append(n) if viewer: viewer.event('finished', fringe.sorted(), None, 'goal not found')
[ "def", "_search", "(", "problem", ",", "fringe", ",", "graph_search", "=", "False", ",", "depth_limit", "=", "None", ",", "node_factory", "=", "SearchNode", ",", "graph_replace_when_better", "=", "False", ",", "viewer", "=", "None", ")", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'started'", ")", "memory", "=", "set", "(", ")", "initial_node", "=", "node_factory", "(", "state", "=", "problem", ".", "initial_state", ",", "problem", "=", "problem", ")", "fringe", ".", "append", "(", "initial_node", ")", "while", "fringe", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'new_iteration'", ",", "fringe", ".", "sorted", "(", ")", ")", "node", "=", "fringe", ".", "pop", "(", ")", "if", "problem", ".", "is_goal", "(", "node", ".", "state", ")", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'chosen_node'", ",", "node", ",", "True", ")", "viewer", ".", "event", "(", "'finished'", ",", "fringe", ".", "sorted", "(", ")", ",", "node", ",", "'goal found'", ")", "return", "node", "else", ":", "if", "viewer", ":", "viewer", ".", "event", "(", "'chosen_node'", ",", "node", ",", "False", ")", "memory", ".", "add", "(", "node", ".", "state", ")", "if", "depth_limit", "is", "None", "or", "node", ".", "depth", "<", "depth_limit", ":", "expanded", "=", "node", ".", "expand", "(", ")", "if", "viewer", ":", "viewer", ".", "event", "(", "'expanded'", ",", "[", "node", "]", ",", "[", "expanded", "]", ")", "for", "n", "in", "expanded", ":", "if", "graph_search", ":", "others", "=", "[", "x", "for", "x", "in", "fringe", "if", "x", ".", "state", "==", "n", ".", "state", "]", "assert", "len", "(", "others", ")", "in", "(", "0", ",", "1", ")", "if", "n", ".", "state", "not", "in", "memory", "and", "len", "(", "others", ")", "==", "0", ":", "fringe", ".", "append", "(", "n", ")", "elif", "graph_replace_when_better", "and", "len", "(", "others", ")", ">", "0", "and", "n", "<", "others", "[", "0", "]", ":", "fringe", ".", "remove", "(", "others", "[", "0", "]", ")", "fringe", ".", "append", "(", "n", ")", "else", ":", "fringe", ".", "append", "(", "n", ")", "if", "viewer", ":", "viewer", ".", "event", "(", "'finished'", ",", "fringe", ".", "sorted", "(", ")", ",", "None", ",", "'goal not found'", ")" ]
Basic search algorithm, base of all the other search algorithms.
[ "Basic", "search", "algorithm", "base", "of", "all", "the", "other", "search", "algorithms", "." ]
2836befa7e970013f62e0ee75562652aacac6f65
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L124-L173
train
236,851
andosa/treeinterpreter
treeinterpreter/treeinterpreter.py
_get_tree_paths
def _get_tree_paths(tree, node_id, depth=0): """ Returns all paths through the tree as list of node_ids """ if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) left_child = tree.children_left[node_id] right_child = tree.children_right[node_id] if left_child != _tree.TREE_LEAF: left_paths = _get_tree_paths(tree, left_child, depth=depth + 1) right_paths = _get_tree_paths(tree, right_child, depth=depth + 1) for path in left_paths: path.append(node_id) for path in right_paths: path.append(node_id) paths = left_paths + right_paths else: paths = [[node_id]] return paths
python
def _get_tree_paths(tree, node_id, depth=0): """ Returns all paths through the tree as list of node_ids """ if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) left_child = tree.children_left[node_id] right_child = tree.children_right[node_id] if left_child != _tree.TREE_LEAF: left_paths = _get_tree_paths(tree, left_child, depth=depth + 1) right_paths = _get_tree_paths(tree, right_child, depth=depth + 1) for path in left_paths: path.append(node_id) for path in right_paths: path.append(node_id) paths = left_paths + right_paths else: paths = [[node_id]] return paths
[ "def", "_get_tree_paths", "(", "tree", ",", "node_id", ",", "depth", "=", "0", ")", ":", "if", "node_id", "==", "_tree", ".", "TREE_LEAF", ":", "raise", "ValueError", "(", "\"Invalid node_id %s\"", "%", "_tree", ".", "TREE_LEAF", ")", "left_child", "=", "tree", ".", "children_left", "[", "node_id", "]", "right_child", "=", "tree", ".", "children_right", "[", "node_id", "]", "if", "left_child", "!=", "_tree", ".", "TREE_LEAF", ":", "left_paths", "=", "_get_tree_paths", "(", "tree", ",", "left_child", ",", "depth", "=", "depth", "+", "1", ")", "right_paths", "=", "_get_tree_paths", "(", "tree", ",", "right_child", ",", "depth", "=", "depth", "+", "1", ")", "for", "path", "in", "left_paths", ":", "path", ".", "append", "(", "node_id", ")", "for", "path", "in", "right_paths", ":", "path", ".", "append", "(", "node_id", ")", "paths", "=", "left_paths", "+", "right_paths", "else", ":", "paths", "=", "[", "[", "node_id", "]", "]", "return", "paths" ]
Returns all paths through the tree as list of node_ids
[ "Returns", "all", "paths", "through", "the", "tree", "as", "list", "of", "node_ids" ]
c4294ad6ad74ea574ca41aa81b6f7f1545da0186
https://github.com/andosa/treeinterpreter/blob/c4294ad6ad74ea574ca41aa81b6f7f1545da0186/treeinterpreter/treeinterpreter.py#L12-L33
train
236,852
csurfer/rake-nltk
rake_nltk/rake.py
Rake.extract_keywords_from_text
def extract_keywords_from_text(self, text): """Method to extract keywords from the text provided. :param text: Text to extract keywords from, provided as a string. """ sentences = nltk.tokenize.sent_tokenize(text) self.extract_keywords_from_sentences(sentences)
python
def extract_keywords_from_text(self, text): """Method to extract keywords from the text provided. :param text: Text to extract keywords from, provided as a string. """ sentences = nltk.tokenize.sent_tokenize(text) self.extract_keywords_from_sentences(sentences)
[ "def", "extract_keywords_from_text", "(", "self", ",", "text", ")", ":", "sentences", "=", "nltk", ".", "tokenize", ".", "sent_tokenize", "(", "text", ")", "self", ".", "extract_keywords_from_sentences", "(", "sentences", ")" ]
Method to extract keywords from the text provided. :param text: Text to extract keywords from, provided as a string.
[ "Method", "to", "extract", "keywords", "from", "the", "text", "provided", "." ]
e36116d6074c5ddfbc69bce4440f0342355ceb2e
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L76-L82
train
236,853
csurfer/rake-nltk
rake_nltk/rake.py
Rake.extract_keywords_from_sentences
def extract_keywords_from_sentences(self, sentences): """Method to extract keywords from the list of sentences provided. :param sentences: Text to extraxt keywords from, provided as a list of strings, where each string is a sentence. """ phrase_list = self._generate_phrases(sentences) self._build_frequency_dist(phrase_list) self._build_word_co_occurance_graph(phrase_list) self._build_ranklist(phrase_list)
python
def extract_keywords_from_sentences(self, sentences): """Method to extract keywords from the list of sentences provided. :param sentences: Text to extraxt keywords from, provided as a list of strings, where each string is a sentence. """ phrase_list = self._generate_phrases(sentences) self._build_frequency_dist(phrase_list) self._build_word_co_occurance_graph(phrase_list) self._build_ranklist(phrase_list)
[ "def", "extract_keywords_from_sentences", "(", "self", ",", "sentences", ")", ":", "phrase_list", "=", "self", ".", "_generate_phrases", "(", "sentences", ")", "self", ".", "_build_frequency_dist", "(", "phrase_list", ")", "self", ".", "_build_word_co_occurance_graph", "(", "phrase_list", ")", "self", ".", "_build_ranklist", "(", "phrase_list", ")" ]
Method to extract keywords from the list of sentences provided. :param sentences: Text to extraxt keywords from, provided as a list of strings, where each string is a sentence.
[ "Method", "to", "extract", "keywords", "from", "the", "list", "of", "sentences", "provided", "." ]
e36116d6074c5ddfbc69bce4440f0342355ceb2e
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L84-L93
train
236,854
csurfer/rake-nltk
rake_nltk/rake.py
Rake._build_word_co_occurance_graph
def _build_word_co_occurance_graph(self, phrase_list): """Builds the co-occurance graph of words in the given body of text to compute degree of each word. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase. """ co_occurance_graph = defaultdict(lambda: defaultdict(lambda: 0)) for phrase in phrase_list: # For each phrase in the phrase list, count co-occurances of the # word with other words in the phrase. # # Note: Keep the co-occurances graph as is, to help facilitate its # use in other creative ways if required later. for (word, coword) in product(phrase, phrase): co_occurance_graph[word][coword] += 1 self.degree = defaultdict(lambda: 0) for key in co_occurance_graph: self.degree[key] = sum(co_occurance_graph[key].values())
python
def _build_word_co_occurance_graph(self, phrase_list): """Builds the co-occurance graph of words in the given body of text to compute degree of each word. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase. """ co_occurance_graph = defaultdict(lambda: defaultdict(lambda: 0)) for phrase in phrase_list: # For each phrase in the phrase list, count co-occurances of the # word with other words in the phrase. # # Note: Keep the co-occurances graph as is, to help facilitate its # use in other creative ways if required later. for (word, coword) in product(phrase, phrase): co_occurance_graph[word][coword] += 1 self.degree = defaultdict(lambda: 0) for key in co_occurance_graph: self.degree[key] = sum(co_occurance_graph[key].values())
[ "def", "_build_word_co_occurance_graph", "(", "self", ",", "phrase_list", ")", ":", "co_occurance_graph", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "lambda", ":", "0", ")", ")", "for", "phrase", "in", "phrase_list", ":", "# For each phrase in the phrase list, count co-occurances of the", "# word with other words in the phrase.", "#", "# Note: Keep the co-occurances graph as is, to help facilitate its", "# use in other creative ways if required later.", "for", "(", "word", ",", "coword", ")", "in", "product", "(", "phrase", ",", "phrase", ")", ":", "co_occurance_graph", "[", "word", "]", "[", "coword", "]", "+=", "1", "self", ".", "degree", "=", "defaultdict", "(", "lambda", ":", "0", ")", "for", "key", "in", "co_occurance_graph", ":", "self", ".", "degree", "[", "key", "]", "=", "sum", "(", "co_occurance_graph", "[", "key", "]", ".", "values", "(", ")", ")" ]
Builds the co-occurance graph of words in the given body of text to compute degree of each word. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase.
[ "Builds", "the", "co", "-", "occurance", "graph", "of", "words", "in", "the", "given", "body", "of", "text", "to", "compute", "degree", "of", "each", "word", "." ]
e36116d6074c5ddfbc69bce4440f0342355ceb2e
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L135-L153
train
236,855
csurfer/rake-nltk
rake_nltk/rake.py
Rake._build_ranklist
def _build_ranklist(self, phrase_list): """Method to rank each contender phrase using the formula phrase_score = sum of scores of words in the phrase. word_score = d(w)/f(w) where d is degree and f is frequency. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase. """ self.rank_list = [] for phrase in phrase_list: rank = 0.0 for word in phrase: if self.metric == Metric.DEGREE_TO_FREQUENCY_RATIO: rank += 1.0 * self.degree[word] / self.frequency_dist[word] elif self.metric == Metric.WORD_DEGREE: rank += 1.0 * self.degree[word] else: rank += 1.0 * self.frequency_dist[word] self.rank_list.append((rank, " ".join(phrase))) self.rank_list.sort(reverse=True) self.ranked_phrases = [ph[1] for ph in self.rank_list]
python
def _build_ranklist(self, phrase_list): """Method to rank each contender phrase using the formula phrase_score = sum of scores of words in the phrase. word_score = d(w)/f(w) where d is degree and f is frequency. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase. """ self.rank_list = [] for phrase in phrase_list: rank = 0.0 for word in phrase: if self.metric == Metric.DEGREE_TO_FREQUENCY_RATIO: rank += 1.0 * self.degree[word] / self.frequency_dist[word] elif self.metric == Metric.WORD_DEGREE: rank += 1.0 * self.degree[word] else: rank += 1.0 * self.frequency_dist[word] self.rank_list.append((rank, " ".join(phrase))) self.rank_list.sort(reverse=True) self.ranked_phrases = [ph[1] for ph in self.rank_list]
[ "def", "_build_ranklist", "(", "self", ",", "phrase_list", ")", ":", "self", ".", "rank_list", "=", "[", "]", "for", "phrase", "in", "phrase_list", ":", "rank", "=", "0.0", "for", "word", "in", "phrase", ":", "if", "self", ".", "metric", "==", "Metric", ".", "DEGREE_TO_FREQUENCY_RATIO", ":", "rank", "+=", "1.0", "*", "self", ".", "degree", "[", "word", "]", "/", "self", ".", "frequency_dist", "[", "word", "]", "elif", "self", ".", "metric", "==", "Metric", ".", "WORD_DEGREE", ":", "rank", "+=", "1.0", "*", "self", ".", "degree", "[", "word", "]", "else", ":", "rank", "+=", "1.0", "*", "self", ".", "frequency_dist", "[", "word", "]", "self", ".", "rank_list", ".", "append", "(", "(", "rank", ",", "\" \"", ".", "join", "(", "phrase", ")", ")", ")", "self", ".", "rank_list", ".", "sort", "(", "reverse", "=", "True", ")", "self", ".", "ranked_phrases", "=", "[", "ph", "[", "1", "]", "for", "ph", "in", "self", ".", "rank_list", "]" ]
Method to rank each contender phrase using the formula phrase_score = sum of scores of words in the phrase. word_score = d(w)/f(w) where d is degree and f is frequency. :param phrase_list: List of List of strings where each sublist is a collection of words which form a contender phrase.
[ "Method", "to", "rank", "each", "contender", "phrase", "using", "the", "formula" ]
e36116d6074c5ddfbc69bce4440f0342355ceb2e
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L155-L176
train
236,856
csurfer/rake-nltk
rake_nltk/rake.py
Rake._generate_phrases
def _generate_phrases(self, sentences): """Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase. """ phrase_list = set() # Create contender phrases from sentences. for sentence in sentences: word_list = [word.lower() for word in wordpunct_tokenize(sentence)] phrase_list.update(self._get_phrase_list_from_words(word_list)) return phrase_list
python
def _generate_phrases(self, sentences): """Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase. """ phrase_list = set() # Create contender phrases from sentences. for sentence in sentences: word_list = [word.lower() for word in wordpunct_tokenize(sentence)] phrase_list.update(self._get_phrase_list_from_words(word_list)) return phrase_list
[ "def", "_generate_phrases", "(", "self", ",", "sentences", ")", ":", "phrase_list", "=", "set", "(", ")", "# Create contender phrases from sentences.", "for", "sentence", "in", "sentences", ":", "word_list", "=", "[", "word", ".", "lower", "(", ")", "for", "word", "in", "wordpunct_tokenize", "(", "sentence", ")", "]", "phrase_list", ".", "update", "(", "self", ".", "_get_phrase_list_from_words", "(", "word_list", ")", ")", "return", "phrase_list" ]
Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase.
[ "Method", "to", "generate", "contender", "phrases", "given", "the", "sentences", "of", "the", "text", "document", "." ]
e36116d6074c5ddfbc69bce4440f0342355ceb2e
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L178-L192
train
236,857
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_retain_centroids
def _retain_centroids(numbers, thres): """Only keep one number for each cluster within thres of each other""" numbers.sort() prev = -1 ret = [] for n in numbers: if prev < 0 or n - prev > thres: ret.append(n) prev = n return ret
python
def _retain_centroids(numbers, thres): """Only keep one number for each cluster within thres of each other""" numbers.sort() prev = -1 ret = [] for n in numbers: if prev < 0 or n - prev > thres: ret.append(n) prev = n return ret
[ "def", "_retain_centroids", "(", "numbers", ",", "thres", ")", ":", "numbers", ".", "sort", "(", ")", "prev", "=", "-", "1", "ret", "=", "[", "]", "for", "n", "in", "numbers", ":", "if", "prev", "<", "0", "or", "n", "-", "prev", ">", "thres", ":", "ret", ".", "append", "(", "n", ")", "prev", "=", "n", "return", "ret" ]
Only keep one number for each cluster within thres of each other
[ "Only", "keep", "one", "number", "for", "each", "cluster", "within", "thres", "of", "each", "other" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L178-L187
train
236,858
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_split_vlines_hlines
def _split_vlines_hlines(lines): """Separates lines into horizontal and vertical ones""" vlines, hlines = [], [] for line in lines: (vlines if line.x1 - line.x0 < 0.1 else hlines).append(line) return vlines, hlines
python
def _split_vlines_hlines(lines): """Separates lines into horizontal and vertical ones""" vlines, hlines = [], [] for line in lines: (vlines if line.x1 - line.x0 < 0.1 else hlines).append(line) return vlines, hlines
[ "def", "_split_vlines_hlines", "(", "lines", ")", ":", "vlines", ",", "hlines", "=", "[", "]", ",", "[", "]", "for", "line", "in", "lines", ":", "(", "vlines", "if", "line", ".", "x1", "-", "line", ".", "x0", "<", "0.1", "else", "hlines", ")", ".", "append", "(", "line", ")", "return", "vlines", ",", "hlines" ]
Separates lines into horizontal and vertical ones
[ "Separates", "lines", "into", "horizontal", "and", "vertical", "ones" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L190-L195
train
236,859
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_npiter
def _npiter(arr): """Wrapper for iterating numpy array""" for a in np.nditer(arr, flags=["refs_ok"]): c = a.item() if c is not None: yield c
python
def _npiter(arr): """Wrapper for iterating numpy array""" for a in np.nditer(arr, flags=["refs_ok"]): c = a.item() if c is not None: yield c
[ "def", "_npiter", "(", "arr", ")", ":", "for", "a", "in", "np", ".", "nditer", "(", "arr", ",", "flags", "=", "[", "\"refs_ok\"", "]", ")", ":", "c", "=", "a", ".", "item", "(", ")", "if", "c", "is", "not", "None", ":", "yield", "c" ]
Wrapper for iterating numpy array
[ "Wrapper", "for", "iterating", "numpy", "array" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L198-L203
train
236,860
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
Grid.get_normalized_grid
def get_normalized_grid(self): """ Analyzes subcell structure """ log = logging.getLogger(__name__) # Resolve multirow mentions, TODO: validate against all PDFs # subcol_count = 0 mega_rows = [] for row_id, row in enumerate(self._grid): # maps yc_grid -> [mentions] subrow_across_cell = defaultdict(list) for col_id, cell in enumerate(row): # Keep cell text in reading order cell.texts.sort(key=cmp_to_key(reading_order)) log.debug("=" * 50) for m in cell.texts: subrow_across_cell[m.yc_grid].append(m) # prev = m log.debug(pformat(dict(subrow_across_cell))) mega_rows.append(subrow_across_cell) # Multiline paragraph check # Subrow/Subcolumn return mega_rows
python
def get_normalized_grid(self): """ Analyzes subcell structure """ log = logging.getLogger(__name__) # Resolve multirow mentions, TODO: validate against all PDFs # subcol_count = 0 mega_rows = [] for row_id, row in enumerate(self._grid): # maps yc_grid -> [mentions] subrow_across_cell = defaultdict(list) for col_id, cell in enumerate(row): # Keep cell text in reading order cell.texts.sort(key=cmp_to_key(reading_order)) log.debug("=" * 50) for m in cell.texts: subrow_across_cell[m.yc_grid].append(m) # prev = m log.debug(pformat(dict(subrow_across_cell))) mega_rows.append(subrow_across_cell) # Multiline paragraph check # Subrow/Subcolumn return mega_rows
[ "def", "get_normalized_grid", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Resolve multirow mentions, TODO: validate against all PDFs", "# subcol_count = 0", "mega_rows", "=", "[", "]", "for", "row_id", ",", "row", "in", "enumerate", "(", "self", ".", "_grid", ")", ":", "# maps yc_grid -> [mentions]", "subrow_across_cell", "=", "defaultdict", "(", "list", ")", "for", "col_id", ",", "cell", "in", "enumerate", "(", "row", ")", ":", "# Keep cell text in reading order", "cell", ".", "texts", ".", "sort", "(", "key", "=", "cmp_to_key", "(", "reading_order", ")", ")", "log", ".", "debug", "(", "\"=\"", "*", "50", ")", "for", "m", "in", "cell", ".", "texts", ":", "subrow_across_cell", "[", "m", ".", "yc_grid", "]", ".", "append", "(", "m", ")", "# prev = m", "log", ".", "debug", "(", "pformat", "(", "dict", "(", "subrow_across_cell", ")", ")", ")", "mega_rows", ".", "append", "(", "subrow_across_cell", ")", "# Multiline paragraph check", "# Subrow/Subcolumn", "return", "mega_rows" ]
Analyzes subcell structure
[ "Analyzes", "subcell", "structure" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L118-L144
train
236,861
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
Grid._mark_grid_bounds
def _mark_grid_bounds(self, plane, region_bbox): """ Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells. """ # Grid boundaries vbars = np.zeros([self.num_rows, self.num_cols + 1], dtype=np.bool) hbars = np.zeros([self.num_rows + 1, self.num_cols], dtype=np.bool) def closest_idx(arr, elem): left = bisect.bisect_left(arr, elem) - 1 right = bisect.bisect_right(arr, elem) - 1 return left if abs(arr[left] - elem) < abs(arr[right] - elem) else right # Figure out which separating segments are missing, i.e. merge cells for row, (y0, y1) in enumerate(self.yranges): yc = (y0 + y1) // 2 for l in plane.find((region_bbox.x0, yc, region_bbox.x1, yc)): vbars[row, closest_idx(self.xs, l.xc)] = True for col, (x0, x1) in enumerate(self.xranges): xc = (x0 + x1) // 2 for l in plane.find((xc, region_bbox.y0, xc, region_bbox.y1)): hbars[closest_idx(self.ys, l.yc), col] = True return vbars, hbars
python
def _mark_grid_bounds(self, plane, region_bbox): """ Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells. """ # Grid boundaries vbars = np.zeros([self.num_rows, self.num_cols + 1], dtype=np.bool) hbars = np.zeros([self.num_rows + 1, self.num_cols], dtype=np.bool) def closest_idx(arr, elem): left = bisect.bisect_left(arr, elem) - 1 right = bisect.bisect_right(arr, elem) - 1 return left if abs(arr[left] - elem) < abs(arr[right] - elem) else right # Figure out which separating segments are missing, i.e. merge cells for row, (y0, y1) in enumerate(self.yranges): yc = (y0 + y1) // 2 for l in plane.find((region_bbox.x0, yc, region_bbox.x1, yc)): vbars[row, closest_idx(self.xs, l.xc)] = True for col, (x0, x1) in enumerate(self.xranges): xc = (x0 + x1) // 2 for l in plane.find((xc, region_bbox.y0, xc, region_bbox.y1)): hbars[closest_idx(self.ys, l.yc), col] = True return vbars, hbars
[ "def", "_mark_grid_bounds", "(", "self", ",", "plane", ",", "region_bbox", ")", ":", "# Grid boundaries", "vbars", "=", "np", ".", "zeros", "(", "[", "self", ".", "num_rows", ",", "self", ".", "num_cols", "+", "1", "]", ",", "dtype", "=", "np", ".", "bool", ")", "hbars", "=", "np", ".", "zeros", "(", "[", "self", ".", "num_rows", "+", "1", ",", "self", ".", "num_cols", "]", ",", "dtype", "=", "np", ".", "bool", ")", "def", "closest_idx", "(", "arr", ",", "elem", ")", ":", "left", "=", "bisect", ".", "bisect_left", "(", "arr", ",", "elem", ")", "-", "1", "right", "=", "bisect", ".", "bisect_right", "(", "arr", ",", "elem", ")", "-", "1", "return", "left", "if", "abs", "(", "arr", "[", "left", "]", "-", "elem", ")", "<", "abs", "(", "arr", "[", "right", "]", "-", "elem", ")", "else", "right", "# Figure out which separating segments are missing, i.e. merge cells", "for", "row", ",", "(", "y0", ",", "y1", ")", "in", "enumerate", "(", "self", ".", "yranges", ")", ":", "yc", "=", "(", "y0", "+", "y1", ")", "//", "2", "for", "l", "in", "plane", ".", "find", "(", "(", "region_bbox", ".", "x0", ",", "yc", ",", "region_bbox", ".", "x1", ",", "yc", ")", ")", ":", "vbars", "[", "row", ",", "closest_idx", "(", "self", ".", "xs", ",", "l", ".", "xc", ")", "]", "=", "True", "for", "col", ",", "(", "x0", ",", "x1", ")", "in", "enumerate", "(", "self", ".", "xranges", ")", ":", "xc", "=", "(", "x0", "+", "x1", ")", "//", "2", "for", "l", "in", "plane", ".", "find", "(", "(", "xc", ",", "region_bbox", ".", "y0", ",", "xc", ",", "region_bbox", ".", "y1", ")", ")", ":", "hbars", "[", "closest_idx", "(", "self", ".", "ys", ",", "l", ".", "yc", ")", ",", "col", "]", "=", "True", "return", "vbars", ",", "hbars" ]
Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells.
[ "Assume", "all", "lines", "define", "a", "complete", "grid", "over", "the", "region_bbox", ".", "Detect", "which", "lines", "are", "missing", "so", "that", "we", "can", "recover", "merged", "cells", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L146-L170
train
236,862
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
vectorize
def vectorize(e, tolerance=0.1): """ vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle """ tolerance = max(tolerance, e.linewidth) is_high = e.height > tolerance is_wide = e.width > tolerance # if skewed towards a line if is_wide and not is_high: return (e.width, 0.0) if is_high and not is_wide: return (0.0, e.height)
python
def vectorize(e, tolerance=0.1): """ vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle """ tolerance = max(tolerance, e.linewidth) is_high = e.height > tolerance is_wide = e.width > tolerance # if skewed towards a line if is_wide and not is_high: return (e.width, 0.0) if is_high and not is_wide: return (0.0, e.height)
[ "def", "vectorize", "(", "e", ",", "tolerance", "=", "0.1", ")", ":", "tolerance", "=", "max", "(", "tolerance", ",", "e", ".", "linewidth", ")", "is_high", "=", "e", ".", "height", ">", "tolerance", "is_wide", "=", "e", ".", "width", ">", "tolerance", "# if skewed towards a line", "if", "is_wide", "and", "not", "is_high", ":", "return", "(", "e", ".", "width", ",", "0.0", ")", "if", "is_high", "and", "not", "is_wide", ":", "return", "(", "0.0", ",", "e", ".", "height", ")" ]
vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle
[ "vectorizes", "the", "pdf", "object", "s", "bounding", "box", "min_width", "is", "the", "width", "under", "which", "we", "consider", "it", "a", "line", "instead", "of", "a", "big", "rectangle" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L36-L49
train
236,863
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
aligned
def aligned(e1, e2): """ alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc. """ return ( any(close(c1, c2) for c1, c2 in zip(e1.bbox, e2.bbox)) or x_center_aligned(e1, e2) or y_center_aligned(e1, e2) )
python
def aligned(e1, e2): """ alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc. """ return ( any(close(c1, c2) for c1, c2 in zip(e1.bbox, e2.bbox)) or x_center_aligned(e1, e2) or y_center_aligned(e1, e2) )
[ "def", "aligned", "(", "e1", ",", "e2", ")", ":", "return", "(", "any", "(", "close", "(", "c1", ",", "c2", ")", "for", "c1", ",", "c2", "in", "zip", "(", "e1", ".", "bbox", ",", "e2", ".", "bbox", ")", ")", "or", "x_center_aligned", "(", "e1", ",", "e2", ")", "or", "y_center_aligned", "(", "e1", ",", "e2", ")", ")" ]
alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc.
[ "alignment", "is", "determined", "by", "two", "boxes", "having", "one", "exactly", "the", "same", "attribute", "which", "could", "mean", "parallel", "perpendicularly", "forming", "a", "corner", "etc", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L52-L62
train
236,864
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
bound_bboxes
def bound_bboxes(bboxes): """ Finds the minimal bbox that contains all given bboxes """ group_x0 = min(map(lambda l: l[x0], bboxes)) group_y0 = min(map(lambda l: l[y0], bboxes)) group_x1 = max(map(lambda l: l[x1], bboxes)) group_y1 = max(map(lambda l: l[y1], bboxes)) return (group_x0, group_y0, group_x1, group_y1)
python
def bound_bboxes(bboxes): """ Finds the minimal bbox that contains all given bboxes """ group_x0 = min(map(lambda l: l[x0], bboxes)) group_y0 = min(map(lambda l: l[y0], bboxes)) group_x1 = max(map(lambda l: l[x1], bboxes)) group_y1 = max(map(lambda l: l[y1], bboxes)) return (group_x0, group_y0, group_x1, group_y1)
[ "def", "bound_bboxes", "(", "bboxes", ")", ":", "group_x0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "x0", "]", ",", "bboxes", ")", ")", "group_y0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "y0", "]", ",", "bboxes", ")", ")", "group_x1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "x1", "]", ",", "bboxes", ")", ")", "group_y1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "y1", "]", ",", "bboxes", ")", ")", "return", "(", "group_x0", ",", "group_y0", ",", "group_x1", ",", "group_y1", ")" ]
Finds the minimal bbox that contains all given bboxes
[ "Finds", "the", "minimal", "bbox", "that", "contains", "all", "given", "bboxes" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L106-L114
train
236,865
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
bound_elems
def bound_elems(elems): """ Finds the minimal bbox that contains all given elems """ group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
python
def bound_elems(elems): """ Finds the minimal bbox that contains all given elems """ group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
[ "def", "bound_elems", "(", "elems", ")", ":", "group_x0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", ".", "x0", ",", "elems", ")", ")", "group_y0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", ".", "y0", ",", "elems", ")", ")", "group_x1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", ".", "x1", ",", "elems", ")", ")", "group_y1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", ".", "y1", ",", "elems", ")", ")", "return", "(", "group_x0", ",", "group_y0", ",", "group_x1", ",", "group_y1", ")" ]
Finds the minimal bbox that contains all given elems
[ "Finds", "the", "minimal", "bbox", "that", "contains", "all", "given", "elems" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L117-L125
train
236,866
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
intersect
def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
python
def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
[ "def", "intersect", "(", "a", ",", "b", ")", ":", "if", "a", "[", "x0", "]", "==", "a", "[", "x1", "]", "or", "a", "[", "y0", "]", "==", "a", "[", "y1", "]", ":", "return", "False", "if", "b", "[", "x0", "]", "==", "b", "[", "x1", "]", "or", "b", "[", "y0", "]", "==", "b", "[", "y1", "]", ":", "return", "False", "return", "a", "[", "x0", "]", "<=", "b", "[", "x1", "]", "and", "b", "[", "x0", "]", "<=", "a", "[", "x1", "]", "and", "a", "[", "y0", "]", "<=", "b", "[", "y1", "]", "and", "b", "[", "y0", "]", "<=", "a", "[", "y1", "]" ]
Check if two rectangles intersect
[ "Check", "if", "two", "rectangles", "intersect" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L128-L136
train
236,867
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
reading_order
def reading_order(e1, e2): """ A comparator to sort bboxes from top to bottom, left to right """ b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
python
def reading_order(e1, e2): """ A comparator to sort bboxes from top to bottom, left to right """ b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
[ "def", "reading_order", "(", "e1", ",", "e2", ")", ":", "b1", "=", "e1", ".", "bbox", "b2", "=", "e2", ".", "bbox", "if", "round", "(", "b1", "[", "y0", "]", ")", "==", "round", "(", "b2", "[", "y0", "]", ")", "or", "round", "(", "b1", "[", "y1", "]", ")", "==", "round", "(", "b2", "[", "y1", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "x0", "]", ",", "b2", "[", "x0", "]", ")", "return", "float_cmp", "(", "b1", "[", "y0", "]", ",", "b2", "[", "y0", "]", ")" ]
A comparator to sort bboxes from top to bottom, left to right
[ "A", "comparator", "to", "sort", "bboxes", "from", "top", "to", "bottom", "left", "to", "right" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L155-L163
train
236,868
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
xy_reading_order
def xy_reading_order(e1, e2): """ A comparator to sort bboxes from left to right, top to bottom """ b1 = e1.bbox b2 = e2.bbox if round(b1[x0]) == round(b2[x0]): return float_cmp(b1[y0], b2[y0]) return float_cmp(b1[x0], b2[x0])
python
def xy_reading_order(e1, e2): """ A comparator to sort bboxes from left to right, top to bottom """ b1 = e1.bbox b2 = e2.bbox if round(b1[x0]) == round(b2[x0]): return float_cmp(b1[y0], b2[y0]) return float_cmp(b1[x0], b2[x0])
[ "def", "xy_reading_order", "(", "e1", ",", "e2", ")", ":", "b1", "=", "e1", ".", "bbox", "b2", "=", "e2", ".", "bbox", "if", "round", "(", "b1", "[", "x0", "]", ")", "==", "round", "(", "b2", "[", "x0", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "y0", "]", ",", "b2", "[", "y0", "]", ")", "return", "float_cmp", "(", "b1", "[", "x0", "]", ",", "b2", "[", "x0", "]", ")" ]
A comparator to sort bboxes from left to right, top to bottom
[ "A", "comparator", "to", "sort", "bboxes", "from", "left", "to", "right", "top", "to", "bottom" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L166-L174
train
236,869
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
column_order
def column_order(b1, b2): """ A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right] """ (top, left, bottom) = (1, 2, 3) # TODO(senwu): Reimplement the functionality of this comparator to # detect the number of columns, and sort those in reading order. # TODO: This is just a simple top to bottom, left to right comparator # for now. if round(b1[top]) == round(b2[top]) or round(b1[bottom]) == round(b2[bottom]): return float_cmp(b1[left], b2[left]) return float_cmp(b1[top], b2[top])
python
def column_order(b1, b2): """ A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right] """ (top, left, bottom) = (1, 2, 3) # TODO(senwu): Reimplement the functionality of this comparator to # detect the number of columns, and sort those in reading order. # TODO: This is just a simple top to bottom, left to right comparator # for now. if round(b1[top]) == round(b2[top]) or round(b1[bottom]) == round(b2[bottom]): return float_cmp(b1[left], b2[left]) return float_cmp(b1[top], b2[top])
[ "def", "column_order", "(", "b1", ",", "b2", ")", ":", "(", "top", ",", "left", ",", "bottom", ")", "=", "(", "1", ",", "2", ",", "3", ")", "# TODO(senwu): Reimplement the functionality of this comparator to", "# detect the number of columns, and sort those in reading order.", "# TODO: This is just a simple top to bottom, left to right comparator", "# for now.", "if", "round", "(", "b1", "[", "top", "]", ")", "==", "round", "(", "b2", "[", "top", "]", ")", "or", "round", "(", "b1", "[", "bottom", "]", ")", "==", "round", "(", "b2", "[", "bottom", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "left", "]", ",", "b2", "[", "left", "]", ")", "return", "float_cmp", "(", "b1", "[", "top", "]", ",", "b2", "[", "top", "]", ")" ]
A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right]
[ "A", "comparator", "that", "sorts", "bboxes", "first", "by", "columns", "where", "a", "column", "is", "made", "up", "of", "all", "bboxes", "that", "overlap", "then", "by", "vertical", "position", "in", "each", "column", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L177-L193
train
236,870
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
merge_intervals
def merge_intervals(elems, overlap_thres=2.0): """ Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals """ overlap_thres = max(0.0, overlap_thres) ordered = sorted(elems, key=lambda e: e.x0) intervals = [] cur = [-overlap_thres, -overlap_thres] for e in ordered: if e.x0 - cur[1] > overlap_thres: # Check interval validity if cur[1] > 0.0: intervals.append(cur) cur = [e.x0, e.x1] continue cur[1] = max(cur[1], e.x1) intervals.append(cur) # Freeze the interval to tuples return map(tuple, intervals)
python
def merge_intervals(elems, overlap_thres=2.0): """ Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals """ overlap_thres = max(0.0, overlap_thres) ordered = sorted(elems, key=lambda e: e.x0) intervals = [] cur = [-overlap_thres, -overlap_thres] for e in ordered: if e.x0 - cur[1] > overlap_thres: # Check interval validity if cur[1] > 0.0: intervals.append(cur) cur = [e.x0, e.x1] continue cur[1] = max(cur[1], e.x1) intervals.append(cur) # Freeze the interval to tuples return map(tuple, intervals)
[ "def", "merge_intervals", "(", "elems", ",", "overlap_thres", "=", "2.0", ")", ":", "overlap_thres", "=", "max", "(", "0.0", ",", "overlap_thres", ")", "ordered", "=", "sorted", "(", "elems", ",", "key", "=", "lambda", "e", ":", "e", ".", "x0", ")", "intervals", "=", "[", "]", "cur", "=", "[", "-", "overlap_thres", ",", "-", "overlap_thres", "]", "for", "e", "in", "ordered", ":", "if", "e", ".", "x0", "-", "cur", "[", "1", "]", ">", "overlap_thres", ":", "# Check interval validity", "if", "cur", "[", "1", "]", ">", "0.0", ":", "intervals", ".", "append", "(", "cur", ")", "cur", "=", "[", "e", ".", "x0", ",", "e", ".", "x1", "]", "continue", "cur", "[", "1", "]", "=", "max", "(", "cur", "[", "1", "]", ",", "e", ".", "x1", ")", "intervals", ".", "append", "(", "cur", ")", "# Freeze the interval to tuples", "return", "map", "(", "tuple", ",", "intervals", ")" ]
Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals
[ "Project", "in", "x", "axis", "Sort", "by", "start", "Go", "through", "segments", "and", "keep", "max", "x1" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L212-L235
train
236,871
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
predict_heatmap
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"): """ Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return: """ if not os.path.isdir(img_dir): print("\nCreating image folder at {}".format(img_dir)) os.makedirs(img_dir) pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num)) if not os.path.isfile(img_path): # create image for a page in the pdf document and save it in img_dir save_image(pdf_path, img_path, page_num) image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim)) image = img_to_array(image, data_format=K.image_data_format()) image = ( image.reshape((img_dim, img_dim, 1)) .repeat(3, axis=2) .reshape((1, img_dim, img_dim, 3)) ) return ( image.astype(np.uint8).reshape((img_dim, img_dim, 3)), model.predict(image).reshape((img_dim, img_dim)), )
python
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"): """ Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return: """ if not os.path.isdir(img_dir): print("\nCreating image folder at {}".format(img_dir)) os.makedirs(img_dir) pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num)) if not os.path.isfile(img_path): # create image for a page in the pdf document and save it in img_dir save_image(pdf_path, img_path, page_num) image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim)) image = img_to_array(image, data_format=K.image_data_format()) image = ( image.reshape((img_dim, img_dim, 1)) .repeat(3, axis=2) .reshape((1, img_dim, img_dim, 3)) ) return ( image.astype(np.uint8).reshape((img_dim, img_dim, 3)), model.predict(image).reshape((img_dim, img_dim)), )
[ "def", "predict_heatmap", "(", "pdf_path", ",", "page_num", ",", "model", ",", "img_dim", "=", "448", ",", "img_dir", "=", "\"tmp/img\"", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "img_dir", ")", ":", "print", "(", "\"\\nCreating image folder at {}\"", ".", "format", "(", "img_dir", ")", ")", "os", ".", "makedirs", "(", "img_dir", ")", "pdf_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "pdf_path", ")", ")", "[", "0", "]", "# TODO: add hashing function to make sure name is unique", "# TODO: add parallelization", "img_path", "=", "os", ".", "path", ".", "join", "(", "img_dir", ",", "pdf_name", "+", "\"-{}.png\"", ".", "format", "(", "page_num", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "img_path", ")", ":", "# create image for a page in the pdf document and save it in img_dir", "save_image", "(", "pdf_path", ",", "img_path", ",", "page_num", ")", "image", "=", "load_img", "(", "img_path", ",", "grayscale", "=", "True", ",", "target_size", "=", "(", "img_dim", ",", "img_dim", ")", ")", "image", "=", "img_to_array", "(", "image", ",", "data_format", "=", "K", ".", "image_data_format", "(", ")", ")", "image", "=", "(", "image", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "1", ")", ")", ".", "repeat", "(", "3", ",", "axis", "=", "2", ")", ".", "reshape", "(", "(", "1", ",", "img_dim", ",", "img_dim", ",", "3", ")", ")", ")", "return", "(", "image", ".", "astype", "(", "np", ".", "uint8", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "3", ")", ")", ",", "model", ".", "predict", "(", "image", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ")", ")", ",", ")" ]
Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return:
[ "Return", "an", "image", "corresponding", "to", "the", "page", "of", "the", "pdf", "documents", "saved", "at", "pdf_path", ".", "If", "the", "image", "is", "not", "found", "in", "img_dir", "this", "function", "creates", "it", "and", "saves", "it", "in", "img_dir", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L11-L41
train
236,872
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
do_intersect
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
python
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
[ "def", "do_intersect", "(", "bb1", ",", "bb2", ")", ":", "if", "bb1", "[", "0", "]", "+", "bb1", "[", "2", "]", "<", "bb2", "[", "0", "]", "or", "bb2", "[", "0", "]", "+", "bb2", "[", "2", "]", "<", "bb1", "[", "0", "]", ":", "return", "False", "if", "bb1", "[", "1", "]", "+", "bb1", "[", "3", "]", "<", "bb2", "[", "1", "]", "or", "bb2", "[", "1", "]", "+", "bb2", "[", "3", "]", "<", "bb1", "[", "1", "]", ":", "return", "False", "return", "True" ]
Helper function that returns True if two bounding boxes overlap.
[ "Helper", "function", "that", "returns", "True", "if", "two", "bounding", "boxes", "overlap", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L63-L71
train
236,873
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
get_bboxes
def get_bboxes( img, mask, nb_boxes=100, score_thresh=0.5, iou_thresh=0.2, prop_size=0.09, prop_scale=1.2, ): """ Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height) """ min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size) scale = int(img.shape[0] * prop_scale) # TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes img_lbl, regions = selectivesearch.selective_search( img, scale=scale, sigma=0.8, min_size=min_size ) rect = [None] * nb_boxes max_iou = -1 * np.ones(nb_boxes) mask = 1.0 * (mask > score_thresh) # compute iou for each candidate bounding box and save top nb_bboxes for region in regions: left, top, width, height = region["rect"] intersection = mask[top : top + height, left : left + width].sum() union = height * width + mask.sum() - intersection iou = intersection / union idx = np.argmin(max_iou) if iou > max_iou[idx]: max_iou[idx] = iou rect[idx] = region["rect"] # Exclusive maximum remove_indexes = max_iou == -1 bboxes = [] filtered_ious = [] for idx in np.argsort([-x for x in max_iou]): if remove_indexes[idx]: # no more tables bounding boxes break if len(bboxes) == 0: # first candidate table bounding box if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] else: # No tables in this document break else: # If it doensn't intersect with any other bounding box if not any( [do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))] ): if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] return bboxes, filtered_ious
python
def get_bboxes( img, mask, nb_boxes=100, score_thresh=0.5, iou_thresh=0.2, prop_size=0.09, prop_scale=1.2, ): """ Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height) """ min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size) scale = int(img.shape[0] * prop_scale) # TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes img_lbl, regions = selectivesearch.selective_search( img, scale=scale, sigma=0.8, min_size=min_size ) rect = [None] * nb_boxes max_iou = -1 * np.ones(nb_boxes) mask = 1.0 * (mask > score_thresh) # compute iou for each candidate bounding box and save top nb_bboxes for region in regions: left, top, width, height = region["rect"] intersection = mask[top : top + height, left : left + width].sum() union = height * width + mask.sum() - intersection iou = intersection / union idx = np.argmin(max_iou) if iou > max_iou[idx]: max_iou[idx] = iou rect[idx] = region["rect"] # Exclusive maximum remove_indexes = max_iou == -1 bboxes = [] filtered_ious = [] for idx in np.argsort([-x for x in max_iou]): if remove_indexes[idx]: # no more tables bounding boxes break if len(bboxes) == 0: # first candidate table bounding box if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] else: # No tables in this document break else: # If it doensn't intersect with any other bounding box if not any( [do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))] ): if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] return bboxes, filtered_ious
[ "def", "get_bboxes", "(", "img", ",", "mask", ",", "nb_boxes", "=", "100", ",", "score_thresh", "=", "0.5", ",", "iou_thresh", "=", "0.2", ",", "prop_size", "=", "0.09", ",", "prop_scale", "=", "1.2", ",", ")", ":", "min_size", "=", "int", "(", "img", ".", "shape", "[", "0", "]", "*", "prop_size", "*", "img", ".", "shape", "[", "1", "]", "*", "prop_size", ")", "scale", "=", "int", "(", "img", ".", "shape", "[", "0", "]", "*", "prop_scale", ")", "# TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes", "img_lbl", ",", "regions", "=", "selectivesearch", ".", "selective_search", "(", "img", ",", "scale", "=", "scale", ",", "sigma", "=", "0.8", ",", "min_size", "=", "min_size", ")", "rect", "=", "[", "None", "]", "*", "nb_boxes", "max_iou", "=", "-", "1", "*", "np", ".", "ones", "(", "nb_boxes", ")", "mask", "=", "1.0", "*", "(", "mask", ">", "score_thresh", ")", "# compute iou for each candidate bounding box and save top nb_bboxes", "for", "region", "in", "regions", ":", "left", ",", "top", ",", "width", ",", "height", "=", "region", "[", "\"rect\"", "]", "intersection", "=", "mask", "[", "top", ":", "top", "+", "height", ",", "left", ":", "left", "+", "width", "]", ".", "sum", "(", ")", "union", "=", "height", "*", "width", "+", "mask", ".", "sum", "(", ")", "-", "intersection", "iou", "=", "intersection", "/", "union", "idx", "=", "np", ".", "argmin", "(", "max_iou", ")", "if", "iou", ">", "max_iou", "[", "idx", "]", ":", "max_iou", "[", "idx", "]", "=", "iou", "rect", "[", "idx", "]", "=", "region", "[", "\"rect\"", "]", "# Exclusive maximum", "remove_indexes", "=", "max_iou", "==", "-", "1", "bboxes", "=", "[", "]", "filtered_ious", "=", "[", "]", "for", "idx", "in", "np", ".", "argsort", "(", "[", "-", "x", "for", "x", "in", "max_iou", "]", ")", ":", "if", "remove_indexes", "[", "idx", "]", ":", "# no more tables bounding boxes", "break", "if", "len", "(", "bboxes", ")", "==", "0", ":", "# first candidate table bounding box", "if", "max_iou", "[", "idx", "]", ">", "iou_thresh", ":", "bboxes", "+=", "[", "rect", "[", "idx", "]", "]", "filtered_ious", "+=", "[", "max_iou", "[", "idx", "]", "]", "else", ":", "# No tables in this document", "break", "else", ":", "# If it doensn't intersect with any other bounding box", "if", "not", "any", "(", "[", "do_intersect", "(", "rect", "[", "idx", "]", ",", "bboxes", "[", "k", "]", ")", "for", "k", "in", "range", "(", "len", "(", "bboxes", ")", ")", "]", ")", ":", "if", "max_iou", "[", "idx", "]", ">", "iou_thresh", ":", "bboxes", "+=", "[", "rect", "[", "idx", "]", "]", "filtered_ious", "+=", "[", "max_iou", "[", "idx", "]", "]", "return", "bboxes", ",", "filtered_ious" ]
Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height)
[ "Uses", "selective", "search", "to", "generate", "candidate", "bounding", "boxes", "and", "keeps", "the", "ones", "that", "have", "the", "largest", "iou", "with", "the", "predicted", "mask", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L74-L141
train
236,874
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_print_dict
def _print_dict(elem_dict): """ Print a dict in a readable way """ for key, value in sorted(elem_dict.iteritems()): if isinstance(value, collections.Iterable): print(key, len(value)) else: print(key, value)
python
def _print_dict(elem_dict): """ Print a dict in a readable way """ for key, value in sorted(elem_dict.iteritems()): if isinstance(value, collections.Iterable): print(key, len(value)) else: print(key, value)
[ "def", "_print_dict", "(", "elem_dict", ")", ":", "for", "key", ",", "value", "in", "sorted", "(", "elem_dict", ".", "iteritems", "(", ")", ")", ":", "if", "isinstance", "(", "value", ",", "collections", ".", "Iterable", ")", ":", "print", "(", "key", ",", "len", "(", "value", ")", ")", "else", ":", "print", "(", "key", ",", "value", ")" ]
Print a dict in a readable way
[ "Print", "a", "dict", "in", "a", "readable", "way" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L221-L229
train
236,875
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_font_of_mention
def _font_of_mention(m): """ Returns the font type and size of the first alphanumeric char in the text or None if there isn't any. """ for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0)
python
def _font_of_mention(m): """ Returns the font type and size of the first alphanumeric char in the text or None if there isn't any. """ for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0)
[ "def", "_font_of_mention", "(", "m", ")", ":", "for", "ch", "in", "m", ":", "if", "isinstance", "(", "ch", ",", "LTChar", ")", "and", "ch", ".", "get_text", "(", ")", ".", "isalnum", "(", ")", ":", "return", "(", "ch", ".", "fontname", ",", "_font_size_of", "(", "ch", ")", ")", "return", "(", "None", ",", "0", ")" ]
Returns the font type and size of the first alphanumeric char in the text or None if there isn't any.
[ "Returns", "the", "font", "type", "and", "size", "of", "the", "first", "alphanumeric", "char", "in", "the", "text", "or", "None", "if", "there", "isn", "t", "any", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L238-L246
train
236,876
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_allowed_char
def _allowed_char(c): """ Returns whether the given unicode char is allowed in output """ c = ord(c) if c < 0: return False if c < 128: return _ascii_allowed[c] # Genereally allow unicodes, TODO: check for unicode control characters # characters return True
python
def _allowed_char(c): """ Returns whether the given unicode char is allowed in output """ c = ord(c) if c < 0: return False if c < 128: return _ascii_allowed[c] # Genereally allow unicodes, TODO: check for unicode control characters # characters return True
[ "def", "_allowed_char", "(", "c", ")", ":", "c", "=", "ord", "(", "c", ")", "if", "c", "<", "0", ":", "return", "False", "if", "c", "<", "128", ":", "return", "_ascii_allowed", "[", "c", "]", "# Genereally allow unicodes, TODO: check for unicode control characters", "# characters", "return", "True" ]
Returns whether the given unicode char is allowed in output
[ "Returns", "whether", "the", "given", "unicode", "char", "is", "allowed", "in", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L258-L269
train
236,877
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
keep_allowed_chars
def keep_allowed_chars(text): """ Cleans the text for output """ # print ','.join(str(ord(c)) for c in text) return "".join(" " if c == "\n" else c for c in text.strip() if _allowed_char(c))
python
def keep_allowed_chars(text): """ Cleans the text for output """ # print ','.join(str(ord(c)) for c in text) return "".join(" " if c == "\n" else c for c in text.strip() if _allowed_char(c))
[ "def", "keep_allowed_chars", "(", "text", ")", ":", "# print ','.join(str(ord(c)) for c in text)", "return", "\"\"", ".", "join", "(", "\" \"", "if", "c", "==", "\"\\n\"", "else", "c", "for", "c", "in", "text", ".", "strip", "(", ")", "if", "_allowed_char", "(", "c", ")", ")" ]
Cleans the text for output
[ "Cleans", "the", "text", "for", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L272-L277
train
236,878
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
CustomPDFPageAggregator.paint_path
def paint_path(self, gstate, stroke, fill, evenodd, path): """ Converting long paths to small segments each time we m=Move or h=ClosePath for polygon """ shape = "".join(x[0] for x in path) prev_split = 0 for i in range(len(shape)): if shape[i] == "m" and prev_split != i: self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split:i] ) prev_split = i if shape[i] == "h": self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split : i + 1] ) prev_split = i + 1 # clean up remaining segments if prev_split < len(shape): self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:])
python
def paint_path(self, gstate, stroke, fill, evenodd, path): """ Converting long paths to small segments each time we m=Move or h=ClosePath for polygon """ shape = "".join(x[0] for x in path) prev_split = 0 for i in range(len(shape)): if shape[i] == "m" and prev_split != i: self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split:i] ) prev_split = i if shape[i] == "h": self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split : i + 1] ) prev_split = i + 1 # clean up remaining segments if prev_split < len(shape): self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:])
[ "def", "paint_path", "(", "self", ",", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", ")", ":", "shape", "=", "\"\"", ".", "join", "(", "x", "[", "0", "]", "for", "x", "in", "path", ")", "prev_split", "=", "0", "for", "i", "in", "range", "(", "len", "(", "shape", ")", ")", ":", "if", "shape", "[", "i", "]", "==", "\"m\"", "and", "prev_split", "!=", "i", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "i", "]", ")", "prev_split", "=", "i", "if", "shape", "[", "i", "]", "==", "\"h\"", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "i", "+", "1", "]", ")", "prev_split", "=", "i", "+", "1", "# clean up remaining segments", "if", "prev_split", "<", "len", "(", "shape", ")", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "]", ")" ]
Converting long paths to small segments each time we m=Move or h=ClosePath for polygon
[ "Converting", "long", "paths", "to", "small", "segments", "each", "time", "we", "m", "=", "Move", "or", "h", "=", "ClosePath", "for", "polygon" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L53-L74
train
236,879
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
CustomPDFPageAggregator.paint_single_path
def paint_single_path(self, gstate, stroke, fill, evenodd, path): """ Converting a single path draw command into lines and curves objects """ if len(path) < 2: return shape = "".join(x[0] for x in path) pts = [] for p in path: for i in range(1, len(p), 2): pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1]))) # Line mode if self.line_only_shape.match(shape): # check for sloped lines first has_slope = False for i in range(len(pts) - 1): if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]: has_slope = True break if not has_slope: for i in range(len(pts) - 1): self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1])) # Adding the closing line for a polygon, especially rectangles if shape.endswith("h"): self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1])) return # Add the curve as an arbitrary polyline (belzier curve info is lost here) self.cur_item.add(LTCurve(gstate.linewidth, pts))
python
def paint_single_path(self, gstate, stroke, fill, evenodd, path): """ Converting a single path draw command into lines and curves objects """ if len(path) < 2: return shape = "".join(x[0] for x in path) pts = [] for p in path: for i in range(1, len(p), 2): pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1]))) # Line mode if self.line_only_shape.match(shape): # check for sloped lines first has_slope = False for i in range(len(pts) - 1): if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]: has_slope = True break if not has_slope: for i in range(len(pts) - 1): self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1])) # Adding the closing line for a polygon, especially rectangles if shape.endswith("h"): self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1])) return # Add the curve as an arbitrary polyline (belzier curve info is lost here) self.cur_item.add(LTCurve(gstate.linewidth, pts))
[ "def", "paint_single_path", "(", "self", ",", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", ")", ":", "if", "len", "(", "path", ")", "<", "2", ":", "return", "shape", "=", "\"\"", ".", "join", "(", "x", "[", "0", "]", "for", "x", "in", "path", ")", "pts", "=", "[", "]", "for", "p", "in", "path", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "p", ")", ",", "2", ")", ":", "pts", ".", "append", "(", "apply_matrix_pt", "(", "self", ".", "ctm", ",", "(", "p", "[", "i", "]", ",", "p", "[", "i", "+", "1", "]", ")", ")", ")", "# Line mode", "if", "self", ".", "line_only_shape", ".", "match", "(", "shape", ")", ":", "# check for sloped lines first", "has_slope", "=", "False", "for", "i", "in", "range", "(", "len", "(", "pts", ")", "-", "1", ")", ":", "if", "pts", "[", "i", "]", "[", "0", "]", "!=", "pts", "[", "i", "+", "1", "]", "[", "0", "]", "and", "pts", "[", "i", "]", "[", "1", "]", "!=", "pts", "[", "i", "+", "1", "]", "[", "1", "]", ":", "has_slope", "=", "True", "break", "if", "not", "has_slope", ":", "for", "i", "in", "range", "(", "len", "(", "pts", ")", "-", "1", ")", ":", "self", ".", "cur_item", ".", "add", "(", "LTLine", "(", "gstate", ".", "linewidth", ",", "pts", "[", "i", "]", ",", "pts", "[", "i", "+", "1", "]", ")", ")", "# Adding the closing line for a polygon, especially rectangles", "if", "shape", ".", "endswith", "(", "\"h\"", ")", ":", "self", ".", "cur_item", ".", "add", "(", "LTLine", "(", "gstate", ".", "linewidth", ",", "pts", "[", "0", "]", ",", "pts", "[", "-", "1", "]", ")", ")", "return", "# Add the curve as an arbitrary polyline (belzier curve info is lost here)", "self", ".", "cur_item", ".", "add", "(", "LTCurve", "(", "gstate", ".", "linewidth", ",", "pts", ")", ")" ]
Converting a single path draw command into lines and curves objects
[ "Converting", "a", "single", "path", "draw", "command", "into", "lines", "and", "curves", "objects" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L76-L107
train
236,880
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
traverse_layout
def traverse_layout(root, callback): """ Tree walker and invokes the callback as it traverse pdf object tree """ callback(root) if isinstance(root, collections.Iterable): for child in root: traverse_layout(child, callback)
python
def traverse_layout(root, callback): """ Tree walker and invokes the callback as it traverse pdf object tree """ callback(root) if isinstance(root, collections.Iterable): for child in root: traverse_layout(child, callback)
[ "def", "traverse_layout", "(", "root", ",", "callback", ")", ":", "callback", "(", "root", ")", "if", "isinstance", "(", "root", ",", "collections", ".", "Iterable", ")", ":", "for", "child", "in", "root", ":", "traverse_layout", "(", "child", ",", "callback", ")" ]
Tree walker and invokes the callback as it traverse pdf object tree
[ "Tree", "walker", "and", "invokes", "the", "callback", "as", "it", "traverse", "pdf", "object", "tree" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L17-L25
train
236,881
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
get_near_items
def get_near_items(tree, tree_key): """ Check both possible neighbors for key in a binary tree """ try: yield tree.floor_item(tree_key) except KeyError: pass try: yield tree.ceiling_item(tree_key) except KeyError: pass
python
def get_near_items(tree, tree_key): """ Check both possible neighbors for key in a binary tree """ try: yield tree.floor_item(tree_key) except KeyError: pass try: yield tree.ceiling_item(tree_key) except KeyError: pass
[ "def", "get_near_items", "(", "tree", ",", "tree_key", ")", ":", "try", ":", "yield", "tree", ".", "floor_item", "(", "tree_key", ")", "except", "KeyError", ":", "pass", "try", ":", "yield", "tree", ".", "ceiling_item", "(", "tree_key", ")", "except", "KeyError", ":", "pass" ]
Check both possible neighbors for key in a binary tree
[ "Check", "both", "possible", "neighbors", "for", "key", "in", "a", "binary", "tree" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L28-L40
train
236,882
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
align_add
def align_add(tree, key, item, align_thres=2.0): """ Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold """ for near_key, near_list in get_near_items(tree, key): if abs(key - near_key) < align_thres: near_list.append(item) return # Create a new group if no items are close tree[key] = [item]
python
def align_add(tree, key, item, align_thres=2.0): """ Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold """ for near_key, near_list in get_near_items(tree, key): if abs(key - near_key) < align_thres: near_list.append(item) return # Create a new group if no items are close tree[key] = [item]
[ "def", "align_add", "(", "tree", ",", "key", ",", "item", ",", "align_thres", "=", "2.0", ")", ":", "for", "near_key", ",", "near_list", "in", "get_near_items", "(", "tree", ",", "key", ")", ":", "if", "abs", "(", "key", "-", "near_key", ")", "<", "align_thres", ":", "near_list", ".", "append", "(", "item", ")", "return", "# Create a new group if no items are close", "tree", "[", "key", "]", "=", "[", "item", "]" ]
Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold
[ "Adding", "the", "item", "object", "to", "a", "binary", "tree", "with", "the", "given", "key", "while", "allow", "for", "small", "key", "differences", "close_enough_func", "that", "checks", "if", "two", "keys", "are", "within", "threshold" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L43-L55
train
236,883
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
collect_table_content
def collect_table_content(table_bboxes, elems): """ Returns a list of elements that are contained inside the corresponding supplied bbox. """ # list of table content chars table_contents = [[] for _ in range(len(table_bboxes))] prev_content = None prev_bbox = None for cid, c in enumerate(elems): # Annotations should not fall outside alone if isinstance(c, LTAnno): if prev_content is not None: prev_content.append(c) continue # Generally speaking table contents should be included sequentially # and we can avoid checking all tables for elems inside # Elements only need to intersect the bbox for table as some # formatting of fonts may result in slightly out of bbox text if prev_bbox is not None and intersect(prev_bbox, c.bbox): prev_content.append(c) continue # Search the rest of the tables for membership when done with # the current one for table_id, table_bbox in enumerate(table_bboxes): if intersect(table_bbox, c.bbox): prev_bbox = table_bbox prev_content = table_contents[table_id] prev_content.append(c) break return table_contents
python
def collect_table_content(table_bboxes, elems): """ Returns a list of elements that are contained inside the corresponding supplied bbox. """ # list of table content chars table_contents = [[] for _ in range(len(table_bboxes))] prev_content = None prev_bbox = None for cid, c in enumerate(elems): # Annotations should not fall outside alone if isinstance(c, LTAnno): if prev_content is not None: prev_content.append(c) continue # Generally speaking table contents should be included sequentially # and we can avoid checking all tables for elems inside # Elements only need to intersect the bbox for table as some # formatting of fonts may result in slightly out of bbox text if prev_bbox is not None and intersect(prev_bbox, c.bbox): prev_content.append(c) continue # Search the rest of the tables for membership when done with # the current one for table_id, table_bbox in enumerate(table_bboxes): if intersect(table_bbox, c.bbox): prev_bbox = table_bbox prev_content = table_contents[table_id] prev_content.append(c) break return table_contents
[ "def", "collect_table_content", "(", "table_bboxes", ",", "elems", ")", ":", "# list of table content chars", "table_contents", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "table_bboxes", ")", ")", "]", "prev_content", "=", "None", "prev_bbox", "=", "None", "for", "cid", ",", "c", "in", "enumerate", "(", "elems", ")", ":", "# Annotations should not fall outside alone", "if", "isinstance", "(", "c", ",", "LTAnno", ")", ":", "if", "prev_content", "is", "not", "None", ":", "prev_content", ".", "append", "(", "c", ")", "continue", "# Generally speaking table contents should be included sequentially", "# and we can avoid checking all tables for elems inside", "# Elements only need to intersect the bbox for table as some", "# formatting of fonts may result in slightly out of bbox text", "if", "prev_bbox", "is", "not", "None", "and", "intersect", "(", "prev_bbox", ",", "c", ".", "bbox", ")", ":", "prev_content", ".", "append", "(", "c", ")", "continue", "# Search the rest of the tables for membership when done with", "# the current one", "for", "table_id", ",", "table_bbox", "in", "enumerate", "(", "table_bboxes", ")", ":", "if", "intersect", "(", "table_bbox", ",", "c", ".", "bbox", ")", ":", "prev_bbox", "=", "table_bbox", "prev_content", "=", "table_contents", "[", "table_id", "]", "prev_content", ".", "append", "(", "c", ")", "break", "return", "table_contents" ]
Returns a list of elements that are contained inside the corresponding supplied bbox.
[ "Returns", "a", "list", "of", "elements", "that", "are", "contained", "inside", "the", "corresponding", "supplied", "bbox", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L120-L150
train
236,884
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
project_onto
def project_onto(objs, axis, min_gap_size=4.0): """ Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals. """ if axis == "x": axis = 0 if axis == "y": axis = 1 axis_end = axis + 2 if axis == 0: # if projecting onto X axis objs.sort(key=lambda o: o.x0) else: objs.sort(key=lambda o: o.y0) intervals = [] groups = [] start_i = 0 start = objs[0].bbox[axis] end = objs[0].bbox[axis_end] # Use _inf_bbox to trigger the last interval divide for o_i, o in enumerate(chain(objs, [_inf_bbox])): # Get current interval o_start = o.bbox[axis] o_end = o.bbox[axis_end] # start new interval when gap with previous end is big if o_start > end + min_gap_size: # Append new interval coordinates for children intervals.append((start, end)) # Append child object group on page groups.append(objs[start_i:o_i]) # Mark next obj list range start_i = o_i start = o_start # Always check to extend current interval to new end if o_end > end: end = o_end # else do nothing return intervals, groups
python
def project_onto(objs, axis, min_gap_size=4.0): """ Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals. """ if axis == "x": axis = 0 if axis == "y": axis = 1 axis_end = axis + 2 if axis == 0: # if projecting onto X axis objs.sort(key=lambda o: o.x0) else: objs.sort(key=lambda o: o.y0) intervals = [] groups = [] start_i = 0 start = objs[0].bbox[axis] end = objs[0].bbox[axis_end] # Use _inf_bbox to trigger the last interval divide for o_i, o in enumerate(chain(objs, [_inf_bbox])): # Get current interval o_start = o.bbox[axis] o_end = o.bbox[axis_end] # start new interval when gap with previous end is big if o_start > end + min_gap_size: # Append new interval coordinates for children intervals.append((start, end)) # Append child object group on page groups.append(objs[start_i:o_i]) # Mark next obj list range start_i = o_i start = o_start # Always check to extend current interval to new end if o_end > end: end = o_end # else do nothing return intervals, groups
[ "def", "project_onto", "(", "objs", ",", "axis", ",", "min_gap_size", "=", "4.0", ")", ":", "if", "axis", "==", "\"x\"", ":", "axis", "=", "0", "if", "axis", "==", "\"y\"", ":", "axis", "=", "1", "axis_end", "=", "axis", "+", "2", "if", "axis", "==", "0", ":", "# if projecting onto X axis", "objs", ".", "sort", "(", "key", "=", "lambda", "o", ":", "o", ".", "x0", ")", "else", ":", "objs", ".", "sort", "(", "key", "=", "lambda", "o", ":", "o", ".", "y0", ")", "intervals", "=", "[", "]", "groups", "=", "[", "]", "start_i", "=", "0", "start", "=", "objs", "[", "0", "]", ".", "bbox", "[", "axis", "]", "end", "=", "objs", "[", "0", "]", ".", "bbox", "[", "axis_end", "]", "# Use _inf_bbox to trigger the last interval divide", "for", "o_i", ",", "o", "in", "enumerate", "(", "chain", "(", "objs", ",", "[", "_inf_bbox", "]", ")", ")", ":", "# Get current interval", "o_start", "=", "o", ".", "bbox", "[", "axis", "]", "o_end", "=", "o", ".", "bbox", "[", "axis_end", "]", "# start new interval when gap with previous end is big", "if", "o_start", ">", "end", "+", "min_gap_size", ":", "# Append new interval coordinates for children", "intervals", ".", "append", "(", "(", "start", ",", "end", ")", ")", "# Append child object group on page", "groups", ".", "append", "(", "objs", "[", "start_i", ":", "o_i", "]", ")", "# Mark next obj list range", "start_i", "=", "o_i", "start", "=", "o_start", "# Always check to extend current interval to new end", "if", "o_end", ">", "end", ":", "end", "=", "o_end", "# else do nothing", "return", "intervals", ",", "groups" ]
Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals.
[ "Projects", "object", "bboxes", "onto", "the", "axis", "and", "return", "the", "unioned", "intervals", "and", "groups", "of", "objects", "in", "intervals", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L168-L214
train
236,885
HazyResearch/pdftotree
pdftotree/utils/pdf/render.py
Renderer.draw_rect
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
python
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
[ "def", "draw_rect", "(", "self", ",", "bbox", ",", "cell_val", ")", ":", "new_x0", "=", "int", "(", "bbox", "[", "x0", "]", ")", "new_y0", "=", "int", "(", "bbox", "[", "y0", "]", ")", "new_x1", "=", "max", "(", "new_x0", "+", "1", ",", "int", "(", "bbox", "[", "x1", "]", ")", ")", "new_y1", "=", "max", "(", "new_y0", "+", "1", ",", "int", "(", "bbox", "[", "y1", "]", ")", ")", "self", ".", "grid", "[", "new_x0", ":", "new_x1", ",", "new_y0", ":", "new_y1", "]", "=", "cell_val" ]
Fills the bbox with the content values Float bbox values are normalized to have non-zero area
[ "Fills", "the", "bbox", "with", "the", "content", "values", "Float", "bbox", "values", "are", "normalized", "to", "have", "non", "-", "zero", "area" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/render.py#L57-L67
train
236,886
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_parsers.py
parse_layout
def parse_layout(elems, font_stat, combine=False): """ Parses pdf texts into a hypergraph grouped into rows and columns and then output """ boxes_segments = elems.segments boxes_curves = elems.curves boxes_figures = elems.figures page_width = elems.layout.width # page_height = elems.layout.height boxes = elems.mentions avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat) width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves) char_width = get_char_width(boxes) grid_size = avg_font_pts / 2.0 for i, m in enumerate(boxes + elems.figures): m.id = i m.feats = defaultdict(bool) prefix = "" if isinstance(m, LTTextLine) and m.font_name: prefix = m.font_name + "-" + str(m.font_size) + "-" m.xc = (m.x0 + m.x1) / 2.0 m.yc = (m.y0 + m.y1) / 2.0 m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size tbls, tbl_features = cluster_vertically_aligned_boxes( boxes, elems.layout.bbox, avg_font_pts, width, char_width, boxes_segments, boxes_curves, boxes_figures, page_width, combine, ) return tbls, tbl_features
python
def parse_layout(elems, font_stat, combine=False): """ Parses pdf texts into a hypergraph grouped into rows and columns and then output """ boxes_segments = elems.segments boxes_curves = elems.curves boxes_figures = elems.figures page_width = elems.layout.width # page_height = elems.layout.height boxes = elems.mentions avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat) width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves) char_width = get_char_width(boxes) grid_size = avg_font_pts / 2.0 for i, m in enumerate(boxes + elems.figures): m.id = i m.feats = defaultdict(bool) prefix = "" if isinstance(m, LTTextLine) and m.font_name: prefix = m.font_name + "-" + str(m.font_size) + "-" m.xc = (m.x0 + m.x1) / 2.0 m.yc = (m.y0 + m.y1) / 2.0 m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size tbls, tbl_features = cluster_vertically_aligned_boxes( boxes, elems.layout.bbox, avg_font_pts, width, char_width, boxes_segments, boxes_curves, boxes_figures, page_width, combine, ) return tbls, tbl_features
[ "def", "parse_layout", "(", "elems", ",", "font_stat", ",", "combine", "=", "False", ")", ":", "boxes_segments", "=", "elems", ".", "segments", "boxes_curves", "=", "elems", ".", "curves", "boxes_figures", "=", "elems", ".", "figures", "page_width", "=", "elems", ".", "layout", ".", "width", "# page_height = elems.layout.height", "boxes", "=", "elems", ".", "mentions", "avg_font_pts", "=", "get_most_common_font_pts", "(", "elems", ".", "mentions", ",", "font_stat", ")", "width", "=", "get_page_width", "(", "boxes", "+", "boxes_segments", "+", "boxes_figures", "+", "boxes_curves", ")", "char_width", "=", "get_char_width", "(", "boxes", ")", "grid_size", "=", "avg_font_pts", "/", "2.0", "for", "i", ",", "m", "in", "enumerate", "(", "boxes", "+", "elems", ".", "figures", ")", ":", "m", ".", "id", "=", "i", "m", ".", "feats", "=", "defaultdict", "(", "bool", ")", "prefix", "=", "\"\"", "if", "isinstance", "(", "m", ",", "LTTextLine", ")", "and", "m", ".", "font_name", ":", "prefix", "=", "m", ".", "font_name", "+", "\"-\"", "+", "str", "(", "m", ".", "font_size", ")", "+", "\"-\"", "m", ".", "xc", "=", "(", "m", ".", "x0", "+", "m", ".", "x1", ")", "/", "2.0", "m", ".", "yc", "=", "(", "m", ".", "y0", "+", "m", ".", "y1", ")", "/", "2.0", "m", ".", "feats", "[", "prefix", "+", "\"x0\"", "]", "=", "m", ".", "x0_grid", "=", "m", ".", "x0", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"x1\"", "]", "=", "m", ".", "x1_grid", "=", "m", ".", "x1", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"xc\"", "]", "=", "m", ".", "xc_grid", "=", "m", ".", "xc", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"yc\"", "]", "=", "m", ".", "yc_grid", "=", "m", ".", "yc", "//", "grid_size", "tbls", ",", "tbl_features", "=", "cluster_vertically_aligned_boxes", "(", "boxes", ",", "elems", ".", "layout", ".", "bbox", ",", "avg_font_pts", ",", "width", ",", "char_width", ",", "boxes_segments", ",", "boxes_curves", ",", "boxes_figures", ",", "page_width", ",", "combine", ",", ")", "return", "tbls", ",", "tbl_features" ]
Parses pdf texts into a hypergraph grouped into rows and columns and then output
[ "Parses", "pdf", "texts", "into", "a", "hypergraph", "grouped", "into", "rows", "and", "columns", "and", "then", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_parsers.py#L23-L63
train
236,887
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_parsers.py
merge_nodes
def merge_nodes(nodes, plane, page_stat, merge_indices): """ Merges overlapping nodes """ # Merge inner boxes to the best outer box # nodes.sort(key=Node.area) to_be_removed = set() for inner_idx in range(len(nodes)): inner = nodes[inner_idx] outers = [] outers_indices = [] for outer_idx in range(len(nodes)): outer = nodes[outer_idx] if outer is inner or outer in to_be_removed: continue if intersect(outer.bbox, inner.bbox): outers.append(outer) outers_indices.append(outer_idx) if not outers: continue # Best is defined as min L1 distance to outer center best_outer = min( outers, key=lambda outer: l1(center(outer.bbox), center(inner.bbox)) ) best_outer_idx = outers_indices[outers.index(best_outer)] to_be_removed.add(inner) best_outer.merge(inner) for cid_iter in range(len(merge_indices)): if merge_indices[cid_iter] == merge_indices[inner_idx]: merge_indices[cid_iter] = merge_indices[best_outer_idx] return nodes, merge_indices
python
def merge_nodes(nodes, plane, page_stat, merge_indices): """ Merges overlapping nodes """ # Merge inner boxes to the best outer box # nodes.sort(key=Node.area) to_be_removed = set() for inner_idx in range(len(nodes)): inner = nodes[inner_idx] outers = [] outers_indices = [] for outer_idx in range(len(nodes)): outer = nodes[outer_idx] if outer is inner or outer in to_be_removed: continue if intersect(outer.bbox, inner.bbox): outers.append(outer) outers_indices.append(outer_idx) if not outers: continue # Best is defined as min L1 distance to outer center best_outer = min( outers, key=lambda outer: l1(center(outer.bbox), center(inner.bbox)) ) best_outer_idx = outers_indices[outers.index(best_outer)] to_be_removed.add(inner) best_outer.merge(inner) for cid_iter in range(len(merge_indices)): if merge_indices[cid_iter] == merge_indices[inner_idx]: merge_indices[cid_iter] = merge_indices[best_outer_idx] return nodes, merge_indices
[ "def", "merge_nodes", "(", "nodes", ",", "plane", ",", "page_stat", ",", "merge_indices", ")", ":", "# Merge inner boxes to the best outer box", "# nodes.sort(key=Node.area)", "to_be_removed", "=", "set", "(", ")", "for", "inner_idx", "in", "range", "(", "len", "(", "nodes", ")", ")", ":", "inner", "=", "nodes", "[", "inner_idx", "]", "outers", "=", "[", "]", "outers_indices", "=", "[", "]", "for", "outer_idx", "in", "range", "(", "len", "(", "nodes", ")", ")", ":", "outer", "=", "nodes", "[", "outer_idx", "]", "if", "outer", "is", "inner", "or", "outer", "in", "to_be_removed", ":", "continue", "if", "intersect", "(", "outer", ".", "bbox", ",", "inner", ".", "bbox", ")", ":", "outers", ".", "append", "(", "outer", ")", "outers_indices", ".", "append", "(", "outer_idx", ")", "if", "not", "outers", ":", "continue", "# Best is defined as min L1 distance to outer center", "best_outer", "=", "min", "(", "outers", ",", "key", "=", "lambda", "outer", ":", "l1", "(", "center", "(", "outer", ".", "bbox", ")", ",", "center", "(", "inner", ".", "bbox", ")", ")", ")", "best_outer_idx", "=", "outers_indices", "[", "outers", ".", "index", "(", "best_outer", ")", "]", "to_be_removed", ".", "add", "(", "inner", ")", "best_outer", ".", "merge", "(", "inner", ")", "for", "cid_iter", "in", "range", "(", "len", "(", "merge_indices", ")", ")", ":", "if", "merge_indices", "[", "cid_iter", "]", "==", "merge_indices", "[", "inner_idx", "]", ":", "merge_indices", "[", "cid_iter", "]", "=", "merge_indices", "[", "best_outer_idx", "]", "return", "nodes", ",", "merge_indices" ]
Merges overlapping nodes
[ "Merges", "overlapping", "nodes" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_parsers.py#L1266-L1296
train
236,888
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
_get_cols
def _get_cols(row_content): """ Counting the number columns based on the content of this row """ cols = [] subcell_col = [] prev_bar = None for _coord, item in row_content: if isinstance(item, LTTextLine): subcell_col.append(item) else: # bar, add column content # When there is no content, we count a None column if prev_bar: bar_ranges = (prev_bar, item) col_items = subcell_col if subcell_col else [None] cols.extend([bar_ranges, col_items]) prev_bar = item subcell_col = [] # Remove extra column before first bar return cols
python
def _get_cols(row_content): """ Counting the number columns based on the content of this row """ cols = [] subcell_col = [] prev_bar = None for _coord, item in row_content: if isinstance(item, LTTextLine): subcell_col.append(item) else: # bar, add column content # When there is no content, we count a None column if prev_bar: bar_ranges = (prev_bar, item) col_items = subcell_col if subcell_col else [None] cols.extend([bar_ranges, col_items]) prev_bar = item subcell_col = [] # Remove extra column before first bar return cols
[ "def", "_get_cols", "(", "row_content", ")", ":", "cols", "=", "[", "]", "subcell_col", "=", "[", "]", "prev_bar", "=", "None", "for", "_coord", ",", "item", "in", "row_content", ":", "if", "isinstance", "(", "item", ",", "LTTextLine", ")", ":", "subcell_col", ".", "append", "(", "item", ")", "else", ":", "# bar, add column content", "# When there is no content, we count a None column", "if", "prev_bar", ":", "bar_ranges", "=", "(", "prev_bar", ",", "item", ")", "col_items", "=", "subcell_col", "if", "subcell_col", "else", "[", "None", "]", "cols", ".", "extend", "(", "[", "bar_ranges", ",", "col_items", "]", ")", "prev_bar", "=", "item", "subcell_col", "=", "[", "]", "# Remove extra column before first bar", "return", "cols" ]
Counting the number columns based on the content of this row
[ "Counting", "the", "number", "columns", "based", "on", "the", "content", "of", "this", "row" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L187-L206
train
236,889
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
_one_contains_other
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
python
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
[ "def", "_one_contains_other", "(", "s1", ",", "s2", ")", ":", "return", "min", "(", "len", "(", "s1", ")", ",", "len", "(", "s2", ")", ")", "==", "len", "(", "s1", "&", "s2", ")" ]
Whether one set contains the other
[ "Whether", "one", "set", "contains", "the", "other" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L241-L245
train
236,890
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
Node.is_table
def is_table(self): """ Count the node's number of mention al ignment in both axes to determine if the node is a table. """ if self.type_counts["text"] < 6 or "figure" in self.type_counts: return False for e in self.elems: # Characters written as curve are usually small, discard diagrams here if elem_type(e) == "curve" and e.height * e.width > 100: return False # import re # space_re = '\\s+' # ws_arr = [] # whitespace_aligned = False # for elem in self.elems: # elem_ws = [] # for m in re.finditer(space_re, elem.get_text()): # elem_ws.append(m.start()) # # print elem, elem_ws # if(len(elem_ws)>0): # ws_arr.append(elem_ws) # # print ws_arr # if(len(ws_arr)>0): # count_arr = max([ws_arr.count(i) for i in ws_arr]) # if(float(count_arr)/len(ws_arr) > 0.75): # return True if ( self.sum_elem_bbox / (self.height * self.width) ) > self.table_area_threshold: return False has_many_x_align = False has_many_y_align = False for k, v in six.iteritems(self.feat_counts): font_key = k[0] if ( v >= 2 and "-" in font_key ): # Text row or column with more than 2 elements if font_key[-2] == "x": has_many_x_align = True if font_key[-2] == "y": has_many_y_align = True return has_many_x_align and has_many_y_align
python
def is_table(self): """ Count the node's number of mention al ignment in both axes to determine if the node is a table. """ if self.type_counts["text"] < 6 or "figure" in self.type_counts: return False for e in self.elems: # Characters written as curve are usually small, discard diagrams here if elem_type(e) == "curve" and e.height * e.width > 100: return False # import re # space_re = '\\s+' # ws_arr = [] # whitespace_aligned = False # for elem in self.elems: # elem_ws = [] # for m in re.finditer(space_re, elem.get_text()): # elem_ws.append(m.start()) # # print elem, elem_ws # if(len(elem_ws)>0): # ws_arr.append(elem_ws) # # print ws_arr # if(len(ws_arr)>0): # count_arr = max([ws_arr.count(i) for i in ws_arr]) # if(float(count_arr)/len(ws_arr) > 0.75): # return True if ( self.sum_elem_bbox / (self.height * self.width) ) > self.table_area_threshold: return False has_many_x_align = False has_many_y_align = False for k, v in six.iteritems(self.feat_counts): font_key = k[0] if ( v >= 2 and "-" in font_key ): # Text row or column with more than 2 elements if font_key[-2] == "x": has_many_x_align = True if font_key[-2] == "y": has_many_y_align = True return has_many_x_align and has_many_y_align
[ "def", "is_table", "(", "self", ")", ":", "if", "self", ".", "type_counts", "[", "\"text\"", "]", "<", "6", "or", "\"figure\"", "in", "self", ".", "type_counts", ":", "return", "False", "for", "e", "in", "self", ".", "elems", ":", "# Characters written as curve are usually small, discard diagrams here", "if", "elem_type", "(", "e", ")", "==", "\"curve\"", "and", "e", ".", "height", "*", "e", ".", "width", ">", "100", ":", "return", "False", "# import re", "# space_re = '\\\\s+'", "# ws_arr = []", "# whitespace_aligned = False", "# for elem in self.elems:", "# elem_ws = []", "# for m in re.finditer(space_re, elem.get_text()):", "# elem_ws.append(m.start())", "# # print elem, elem_ws", "# if(len(elem_ws)>0):", "# ws_arr.append(elem_ws)", "# # print ws_arr", "# if(len(ws_arr)>0):", "# count_arr = max([ws_arr.count(i) for i in ws_arr])", "# if(float(count_arr)/len(ws_arr) > 0.75):", "# return True", "if", "(", "self", ".", "sum_elem_bbox", "/", "(", "self", ".", "height", "*", "self", ".", "width", ")", ")", ">", "self", ".", "table_area_threshold", ":", "return", "False", "has_many_x_align", "=", "False", "has_many_y_align", "=", "False", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "self", ".", "feat_counts", ")", ":", "font_key", "=", "k", "[", "0", "]", "if", "(", "v", ">=", "2", "and", "\"-\"", "in", "font_key", ")", ":", "# Text row or column with more than 2 elements", "if", "font_key", "[", "-", "2", "]", "==", "\"x\"", ":", "has_many_x_align", "=", "True", "if", "font_key", "[", "-", "2", "]", "==", "\"y\"", ":", "has_many_y_align", "=", "True", "return", "has_many_x_align", "and", "has_many_y_align" ]
Count the node's number of mention al ignment in both axes to determine if the node is a table.
[ "Count", "the", "node", "s", "number", "of", "mention", "al", "ignment", "in", "both", "axes", "to", "determine", "if", "the", "node", "is", "a", "table", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L73-L115
train
236,891
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
Node.get_grid
def get_grid(self): """ Standardize the layout of the table into grids """ mentions, lines = _split_text_n_lines(self.elems) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
python
def get_grid(self): """ Standardize the layout of the table into grids """ mentions, lines = _split_text_n_lines(self.elems) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
[ "def", "get_grid", "(", "self", ")", ":", "mentions", ",", "lines", "=", "_split_text_n_lines", "(", "self", ".", "elems", ")", "# Sort mentions in reading order where y values are snapped to half", "# height-sized grid", "mentions", ".", "sort", "(", "key", "=", "lambda", "m", ":", "(", "m", ".", "yc_grid", ",", "m", ".", "xc", ")", ")", "grid", "=", "Grid", "(", "mentions", ",", "lines", ",", "self", ")", "return", "grid" ]
Standardize the layout of the table into grids
[ "Standardize", "the", "layout", "of", "the", "table", "into", "grids" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L118-L128
train
236,892
HazyResearch/pdftotree
pdftotree/utils/img_utils.py
lazy_load_font
def lazy_load_font(font_size=default_font_size): """ Lazy loading font according to system platform """ if font_size not in _font_cache: if _platform.startswith("darwin"): font_path = "/Library/Fonts/Arial.ttf" elif _platform.startswith("linux"): font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform.startswith("win32"): font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache[font_size] = ImageFont.truetype(font_path, font_size) return _font_cache[font_size]
python
def lazy_load_font(font_size=default_font_size): """ Lazy loading font according to system platform """ if font_size not in _font_cache: if _platform.startswith("darwin"): font_path = "/Library/Fonts/Arial.ttf" elif _platform.startswith("linux"): font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform.startswith("win32"): font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache[font_size] = ImageFont.truetype(font_path, font_size) return _font_cache[font_size]
[ "def", "lazy_load_font", "(", "font_size", "=", "default_font_size", ")", ":", "if", "font_size", "not", "in", "_font_cache", ":", "if", "_platform", ".", "startswith", "(", "\"darwin\"", ")", ":", "font_path", "=", "\"/Library/Fonts/Arial.ttf\"", "elif", "_platform", ".", "startswith", "(", "\"linux\"", ")", ":", "font_path", "=", "\"/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf\"", "elif", "_platform", ".", "startswith", "(", "\"win32\"", ")", ":", "font_path", "=", "\"C:\\\\Windows\\\\Fonts\\\\arial.ttf\"", "_font_cache", "[", "font_size", "]", "=", "ImageFont", ".", "truetype", "(", "font_path", ",", "font_size", ")", "return", "_font_cache", "[", "font_size", "]" ]
Lazy loading font according to system platform
[ "Lazy", "loading", "font", "according", "to", "system", "platform" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/img_utils.py#L24-L36
train
236,893
HazyResearch/pdftotree
pdftotree/utils/img_utils.py
render_debug_img
def render_debug_img( file_name, page_num, elems, nodes=[], scaler=1, print_segments=False, print_curves=True, print_table_bbox=True, print_text_as_rect=True, ): """ Shows an image rendering of the pdf page along with debugging info printed """ # For debugging show the boolean pixels in black white grayscale height = scaler * int(elems.layout.height) width = scaler * int(elems.layout.width) debug_img, draw = create_img((0, 0, width, height)) font = lazy_load_font() large_font = lazy_load_font(24) if print_curves: for i, c in enumerate(elems.curves): if len(c.pts) > 1: draw.polygon(c.pts, outline=blue) draw.rectangle(c.bbox, fill=blue) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i, m in enumerate(elems.mentions): if isinstance(m, LTAnno): continue if print_text_as_rect: fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green # fill = green draw.rectangle(m.bbox, fill=fill) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw.text( m.bbox[:2], m.get_text(), black, font=font ) # Draw mention content else: draw.text(m.bbox[:2], m.get_text(), "black", font=font) if print_segments: # draw skeleton for all segments for i, s in enumerate(elems.segments): draw.line(s.bbox, fill="black") if print_table_bbox: for node in nodes: is_table = node.is_table() color = "red" if is_table else "green" draw.rectangle(node.bbox, outline=color) if is_table: # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw.rectangle(node.bbox, outline=color) draw.text(node.bbox[:2], text, red, font=large_font) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None: water_mark = ( file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height) ) draw.text((10, 10), water_mark, black, font=font) debug_img.show() return debug_img
python
def render_debug_img( file_name, page_num, elems, nodes=[], scaler=1, print_segments=False, print_curves=True, print_table_bbox=True, print_text_as_rect=True, ): """ Shows an image rendering of the pdf page along with debugging info printed """ # For debugging show the boolean pixels in black white grayscale height = scaler * int(elems.layout.height) width = scaler * int(elems.layout.width) debug_img, draw = create_img((0, 0, width, height)) font = lazy_load_font() large_font = lazy_load_font(24) if print_curves: for i, c in enumerate(elems.curves): if len(c.pts) > 1: draw.polygon(c.pts, outline=blue) draw.rectangle(c.bbox, fill=blue) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i, m in enumerate(elems.mentions): if isinstance(m, LTAnno): continue if print_text_as_rect: fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green # fill = green draw.rectangle(m.bbox, fill=fill) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw.text( m.bbox[:2], m.get_text(), black, font=font ) # Draw mention content else: draw.text(m.bbox[:2], m.get_text(), "black", font=font) if print_segments: # draw skeleton for all segments for i, s in enumerate(elems.segments): draw.line(s.bbox, fill="black") if print_table_bbox: for node in nodes: is_table = node.is_table() color = "red" if is_table else "green" draw.rectangle(node.bbox, outline=color) if is_table: # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw.rectangle(node.bbox, outline=color) draw.text(node.bbox[:2], text, red, font=large_font) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None: water_mark = ( file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height) ) draw.text((10, 10), water_mark, black, font=font) debug_img.show() return debug_img
[ "def", "render_debug_img", "(", "file_name", ",", "page_num", ",", "elems", ",", "nodes", "=", "[", "]", ",", "scaler", "=", "1", ",", "print_segments", "=", "False", ",", "print_curves", "=", "True", ",", "print_table_bbox", "=", "True", ",", "print_text_as_rect", "=", "True", ",", ")", ":", "# For debugging show the boolean pixels in black white grayscale", "height", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "height", ")", "width", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "width", ")", "debug_img", ",", "draw", "=", "create_img", "(", "(", "0", ",", "0", ",", "width", ",", "height", ")", ")", "font", "=", "lazy_load_font", "(", ")", "large_font", "=", "lazy_load_font", "(", "24", ")", "if", "print_curves", ":", "for", "i", ",", "c", "in", "enumerate", "(", "elems", ".", "curves", ")", ":", "if", "len", "(", "c", ".", "pts", ")", ">", "1", ":", "draw", ".", "polygon", "(", "c", ".", "pts", ",", "outline", "=", "blue", ")", "draw", ".", "rectangle", "(", "c", ".", "bbox", ",", "fill", "=", "blue", ")", "# for fig in elems.figures:", "# draw.rectangle(fig.bbox, fill = blue)", "for", "i", ",", "m", "in", "enumerate", "(", "elems", ".", "mentions", ")", ":", "if", "isinstance", "(", "m", ",", "LTAnno", ")", ":", "continue", "if", "print_text_as_rect", ":", "fill", "=", "\"pink\"", "if", "hasattr", "(", "m", ",", "\"feats\"", ")", "and", "m", ".", "feats", "[", "\"is_cell\"", "]", "else", "green", "# fill = green", "draw", ".", "rectangle", "(", "m", ".", "bbox", ",", "fill", "=", "fill", ")", "# draw.text(center(m.bbox), str(i), black, font = font) # Draw id", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "black", ",", "font", "=", "font", ")", "# Draw mention content", "else", ":", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "\"black\"", ",", "font", "=", "font", ")", "if", "print_segments", ":", "# draw skeleton for all segments", "for", "i", ",", "s", "in", "enumerate", "(", "elems", ".", "segments", ")", ":", "draw", ".", "line", "(", "s", ".", "bbox", ",", "fill", "=", "\"black\"", ")", "if", "print_table_bbox", ":", "for", "node", "in", "nodes", ":", "is_table", "=", "node", ".", "is_table", "(", ")", "color", "=", "\"red\"", "if", "is_table", "else", "\"green\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "if", "is_table", ":", "# text = 'Borderless' if node.is_borderless() else 'Bordered'", "text", "=", "\"Table\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "draw", ".", "text", "(", "node", ".", "bbox", "[", ":", "2", "]", ",", "text", ",", "red", ",", "font", "=", "large_font", ")", "# Water mark with file name so we can identify among multiple images", "if", "file_name", "and", "page_num", "is", "not", "None", ":", "water_mark", "=", "(", "file_name", "+", "\":page \"", "+", "str", "(", "page_num", "+", "1", ")", "+", "\"@%dx%d\"", "%", "(", "width", ",", "height", ")", ")", "draw", ".", "text", "(", "(", "10", ",", "10", ")", ",", "water_mark", ",", "black", ",", "font", "=", "font", ")", "debug_img", ".", "show", "(", ")", "return", "debug_img" ]
Shows an image rendering of the pdf page along with debugging info printed
[ "Shows", "an", "image", "rendering", "of", "the", "pdf", "page", "along", "with", "debugging", "info", "printed" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/img_utils.py#L93-L160
train
236,894
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_partition_estimators
def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs if n_jobs == -1: n_jobs = min(cpu_count(), n_estimators) else: n_jobs = min(n_jobs, n_estimators) # Partition estimators between jobs n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
python
def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs if n_jobs == -1: n_jobs = min(cpu_count(), n_estimators) else: n_jobs = min(n_jobs, n_estimators) # Partition estimators between jobs n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
[ "def", "_partition_estimators", "(", "n_estimators", ",", "n_jobs", ")", ":", "# Compute the number of jobs", "if", "n_jobs", "==", "-", "1", ":", "n_jobs", "=", "min", "(", "cpu_count", "(", ")", ",", "n_estimators", ")", "else", ":", "n_jobs", "=", "min", "(", "n_jobs", ",", "n_estimators", ")", "# Partition estimators between jobs", "n_estimators_per_job", "=", "(", "n_estimators", "//", "n_jobs", ")", "*", "np", ".", "ones", "(", "n_jobs", ",", "dtype", "=", "np", ".", "int", ")", "n_estimators_per_job", "[", ":", "n_estimators", "%", "n_jobs", "]", "+=", "1", "starts", "=", "np", ".", "cumsum", "(", "n_estimators_per_job", ")", "return", "n_jobs", ",", "n_estimators_per_job", ".", "tolist", "(", ")", ",", "[", "0", "]", "+", "starts", ".", "tolist", "(", ")" ]
Private function used to partition estimators between jobs.
[ "Private", "function", "used", "to", "partition", "estimators", "between", "jobs", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L36-L51
train
236,895
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_parallel_build_estimators
def _parallel_build_estimators(n_estimators, ensemble, X, y, cost_mat, seeds, verbose): """Private function used to build a batch of estimators within a job.""" # Retrieve settings n_samples, n_features = X.shape max_samples = ensemble.max_samples max_features = ensemble.max_features if (not isinstance(max_samples, (numbers.Integral, np.integer)) and (0.0 < max_samples <= 1.0)): max_samples = int(max_samples * n_samples) if (not isinstance(max_features, (numbers.Integral, np.integer)) and (0.0 < max_features <= 1.0)): max_features = int(max_features * n_features) bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features # Build estimators estimators = [] estimators_samples = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print(("building estimator %d of %d" % (i + 1, n_estimators))) random_state = check_random_state(seeds[i]) seed = check_random_state(random_state.randint(MAX_INT)) estimator = ensemble._make_estimator(append=False) try: # Not all estimator accept a random_state estimator.set_params(random_state=seed) except ValueError: pass # Draw features if bootstrap_features: features = random_state.randint(0, n_features, max_features) else: features = sample_without_replacement(n_features, max_features, random_state=random_state) # Draw samples, using a mask, and then fit if bootstrap: indices = random_state.randint(0, n_samples, max_samples) else: indices = sample_without_replacement(n_samples, max_samples, random_state=random_state) sample_counts = np.bincount(indices, minlength=n_samples) estimator.fit((X[indices])[:, features], y[indices], cost_mat[indices, :]) samples = sample_counts > 0. estimators.append(estimator) estimators_samples.append(samples) estimators_features.append(features) return estimators, estimators_samples, estimators_features
python
def _parallel_build_estimators(n_estimators, ensemble, X, y, cost_mat, seeds, verbose): """Private function used to build a batch of estimators within a job.""" # Retrieve settings n_samples, n_features = X.shape max_samples = ensemble.max_samples max_features = ensemble.max_features if (not isinstance(max_samples, (numbers.Integral, np.integer)) and (0.0 < max_samples <= 1.0)): max_samples = int(max_samples * n_samples) if (not isinstance(max_features, (numbers.Integral, np.integer)) and (0.0 < max_features <= 1.0)): max_features = int(max_features * n_features) bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features # Build estimators estimators = [] estimators_samples = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print(("building estimator %d of %d" % (i + 1, n_estimators))) random_state = check_random_state(seeds[i]) seed = check_random_state(random_state.randint(MAX_INT)) estimator = ensemble._make_estimator(append=False) try: # Not all estimator accept a random_state estimator.set_params(random_state=seed) except ValueError: pass # Draw features if bootstrap_features: features = random_state.randint(0, n_features, max_features) else: features = sample_without_replacement(n_features, max_features, random_state=random_state) # Draw samples, using a mask, and then fit if bootstrap: indices = random_state.randint(0, n_samples, max_samples) else: indices = sample_without_replacement(n_samples, max_samples, random_state=random_state) sample_counts = np.bincount(indices, minlength=n_samples) estimator.fit((X[indices])[:, features], y[indices], cost_mat[indices, :]) samples = sample_counts > 0. estimators.append(estimator) estimators_samples.append(samples) estimators_features.append(features) return estimators, estimators_samples, estimators_features
[ "def", "_parallel_build_estimators", "(", "n_estimators", ",", "ensemble", ",", "X", ",", "y", ",", "cost_mat", ",", "seeds", ",", "verbose", ")", ":", "# Retrieve settings", "n_samples", ",", "n_features", "=", "X", ".", "shape", "max_samples", "=", "ensemble", ".", "max_samples", "max_features", "=", "ensemble", ".", "max_features", "if", "(", "not", "isinstance", "(", "max_samples", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", "and", "(", "0.0", "<", "max_samples", "<=", "1.0", ")", ")", ":", "max_samples", "=", "int", "(", "max_samples", "*", "n_samples", ")", "if", "(", "not", "isinstance", "(", "max_features", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", "and", "(", "0.0", "<", "max_features", "<=", "1.0", ")", ")", ":", "max_features", "=", "int", "(", "max_features", "*", "n_features", ")", "bootstrap", "=", "ensemble", ".", "bootstrap", "bootstrap_features", "=", "ensemble", ".", "bootstrap_features", "# Build estimators", "estimators", "=", "[", "]", "estimators_samples", "=", "[", "]", "estimators_features", "=", "[", "]", "for", "i", "in", "range", "(", "n_estimators", ")", ":", "if", "verbose", ">", "1", ":", "print", "(", "(", "\"building estimator %d of %d\"", "%", "(", "i", "+", "1", ",", "n_estimators", ")", ")", ")", "random_state", "=", "check_random_state", "(", "seeds", "[", "i", "]", ")", "seed", "=", "check_random_state", "(", "random_state", ".", "randint", "(", "MAX_INT", ")", ")", "estimator", "=", "ensemble", ".", "_make_estimator", "(", "append", "=", "False", ")", "try", ":", "# Not all estimator accept a random_state", "estimator", ".", "set_params", "(", "random_state", "=", "seed", ")", "except", "ValueError", ":", "pass", "# Draw features", "if", "bootstrap_features", ":", "features", "=", "random_state", ".", "randint", "(", "0", ",", "n_features", ",", "max_features", ")", "else", ":", "features", "=", "sample_without_replacement", "(", "n_features", ",", "max_features", ",", "random_state", "=", "random_state", ")", "# Draw samples, using a mask, and then fit", "if", "bootstrap", ":", "indices", "=", "random_state", ".", "randint", "(", "0", ",", "n_samples", ",", "max_samples", ")", "else", ":", "indices", "=", "sample_without_replacement", "(", "n_samples", ",", "max_samples", ",", "random_state", "=", "random_state", ")", "sample_counts", "=", "np", ".", "bincount", "(", "indices", ",", "minlength", "=", "n_samples", ")", "estimator", ".", "fit", "(", "(", "X", "[", "indices", "]", ")", "[", ":", ",", "features", "]", ",", "y", "[", "indices", "]", ",", "cost_mat", "[", "indices", ",", ":", "]", ")", "samples", "=", "sample_counts", ">", "0.", "estimators", ".", "append", "(", "estimator", ")", "estimators_samples", ".", "append", "(", "samples", ")", "estimators_features", ".", "append", "(", "features", ")", "return", "estimators", ",", "estimators_samples", ",", "estimators_features" ]
Private function used to build a batch of estimators within a job.
[ "Private", "function", "used", "to", "build", "a", "batch", "of", "estimators", "within", "a", "job", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L54-L116
train
236,896
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_parallel_predict
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): """Private function used to compute predictions within a job.""" n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
python
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): """Private function used to compute predictions within a job.""" n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
[ "def", "_parallel_predict", "(", "estimators", ",", "estimators_features", ",", "X", ",", "n_classes", ",", "combination", ",", "estimators_weight", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "pred", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_classes", ")", ")", "n_estimators", "=", "len", "(", "estimators", ")", "for", "estimator", ",", "features", ",", "weight", "in", "zip", "(", "estimators", ",", "estimators_features", ",", "estimators_weight", ")", ":", "# Resort to voting", "predictions", "=", "estimator", ".", "predict", "(", "X", "[", ":", ",", "features", "]", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "if", "combination", "==", "'weighted_voting'", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "*", "weight", "else", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "return", "pred" ]
Private function used to compute predictions within a job.
[ "Private", "function", "used", "to", "compute", "predictions", "within", "a", "job", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L134-L150
train
236,897
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_create_stacking_set
def _create_stacking_set(estimators, estimators_features, estimators_weight, X, combination): """Private function used to create the stacking training set.""" n_samples = X.shape[0] valid_estimators = np.nonzero(estimators_weight)[0] n_valid_estimators = valid_estimators.shape[0] X_stacking = np.zeros((n_samples, n_valid_estimators)) for e in range(n_valid_estimators): if combination in ['stacking', 'stacking_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict(X[:, estimators_features[valid_estimators[e]]]) elif combination in ['stacking_proba', 'stacking_proba_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict_proba(X[:, estimators_features[valid_estimators[e]]])[:, 1] return X_stacking
python
def _create_stacking_set(estimators, estimators_features, estimators_weight, X, combination): """Private function used to create the stacking training set.""" n_samples = X.shape[0] valid_estimators = np.nonzero(estimators_weight)[0] n_valid_estimators = valid_estimators.shape[0] X_stacking = np.zeros((n_samples, n_valid_estimators)) for e in range(n_valid_estimators): if combination in ['stacking', 'stacking_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict(X[:, estimators_features[valid_estimators[e]]]) elif combination in ['stacking_proba', 'stacking_proba_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict_proba(X[:, estimators_features[valid_estimators[e]]])[:, 1] return X_stacking
[ "def", "_create_stacking_set", "(", "estimators", ",", "estimators_features", ",", "estimators_weight", ",", "X", ",", "combination", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "valid_estimators", "=", "np", ".", "nonzero", "(", "estimators_weight", ")", "[", "0", "]", "n_valid_estimators", "=", "valid_estimators", ".", "shape", "[", "0", "]", "X_stacking", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_valid_estimators", ")", ")", "for", "e", "in", "range", "(", "n_valid_estimators", ")", ":", "if", "combination", "in", "[", "'stacking'", ",", "'stacking_bmr'", "]", ":", "X_stacking", "[", ":", ",", "e", "]", "=", "estimators", "[", "valid_estimators", "[", "e", "]", "]", ".", "predict", "(", "X", "[", ":", ",", "estimators_features", "[", "valid_estimators", "[", "e", "]", "]", "]", ")", "elif", "combination", "in", "[", "'stacking_proba'", ",", "'stacking_proba_bmr'", "]", ":", "X_stacking", "[", ":", ",", "e", "]", "=", "estimators", "[", "valid_estimators", "[", "e", "]", "]", ".", "predict_proba", "(", "X", "[", ":", ",", "estimators_features", "[", "valid_estimators", "[", "e", "]", "]", "]", ")", "[", ":", ",", "1", "]", "return", "X_stacking" ]
Private function used to create the stacking training set.
[ "Private", "function", "used", "to", "create", "the", "stacking", "training", "set", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L153-L167
train
236,898
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaseBagging._fit_bmr_model
def _fit_bmr_model(self, X, y): """Private function used to fit the BayesMinimumRisk model.""" self.f_bmr = BayesMinimumRiskClassifier() X_bmr = self.predict_proba(X) self.f_bmr.fit(y, X_bmr) return self
python
def _fit_bmr_model(self, X, y): """Private function used to fit the BayesMinimumRisk model.""" self.f_bmr = BayesMinimumRiskClassifier() X_bmr = self.predict_proba(X) self.f_bmr.fit(y, X_bmr) return self
[ "def", "_fit_bmr_model", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "f_bmr", "=", "BayesMinimumRiskClassifier", "(", ")", "X_bmr", "=", "self", ".", "predict_proba", "(", "X", ")", "self", ".", "f_bmr", ".", "fit", "(", "y", ",", "X_bmr", ")", "return", "self" ]
Private function used to fit the BayesMinimumRisk model.
[ "Private", "function", "used", "to", "fit", "the", "BayesMinimumRisk", "model", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L295-L300
train
236,899