func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def get_func(func, aliasing, implementations): try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations: return func_str if func_str.startswith('nan') and \ func_str[3:] in...
Return the key of a found implementation or the func itself
def minimum_dtype(x, dtype=np.bool_): def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_di...
returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.
def input_validation(group_idx, a, size=None, order='C', axis=None, ravel_group_idx=True, check_bounds=True): if not isinstance(a, (int, float, complex)): a = np.asanyarray(a) group_idx = np.asanyarray(group_idx) if not np.issubdtype(group_idx.dtype, np.integer): ra...
Do some fairly extensive checking of group_idx and a, trying to give the user as much help as possible with what is wrong. Also, convert ndim-indexing to 1d indexing.
def multi_arange(n): if n.ndim != 1: raise ValueError("n is supposed to be 1d array.") n_mask = n.astype(bool) n_cumsum = np.cumsum(n) ret = np.ones(n_cumsum[-1] + 1, dtype=int) ret[n_cumsum[n_mask]] -= n[n_mask] ret[0] -= 1 return np.cumsum(ret)[:-1]
By example: # 0 1 2 3 4 5 6 7 8 n = [0, 0, 3, 0, 0, 2, 0, 2, 1] res = [0, 1, 2, 0, 1, 0, 1, 0] That is it is equivalent to something like this : hstack((arange(n_i) for n_i in n)) This version seems quite a bit faster, at least for some possible...
def label_contiguous_1d(X): if X.ndim != 1: raise ValueError("this is for 1d masks only.") is_start = np.empty(len(X), dtype=bool) is_start[0] = X[0] # True if X[0] is True or non-zero if X.dtype.kind == 'b': is_start[1:] = ~X[:-1] & X[1:] M = X else: M = X.asty...
WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` a...
def relabel_groups_unique(group_idx): keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool) keep_group[0] = True keep_group[group_idx] = True return relabel_groups_masked(group_idx, keep_group)
See also ``relabel_groups_masked``. keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4] Description of above: unique groups in input was ``1,2,3,5``, i.e. ``4`` was missing, so group 5 was relabled to be ``4``. Relabeling maintains order, just "compres...
def relabel_groups_masked(group_idx, keep_group): keep_group = keep_group.astype(bool, copy=not keep_group[0]) if not keep_group[0]: # ensuring keep_group[0] is True makes life easier keep_group[0] = True relabel = np.zeros(keep_group.size, dtype=group_idx.dtype) relabel[keep_group] = np.a...
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] 0 1 2 3 4 5 keep_group: [0 1 0 1 1 1] ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4] Description of above in words: remove group 2, and relabel group 3,4, and 5 to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group...
def _array(group_idx, a, size, fill_value, dtype=None): if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence") order_group_idx ...
groups a into separate arrays, keeping the order intact.
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs): groups = _array(group_idx, a, size, ()) ret = np.full(size, fill_value, dtype=dtype or np.float64) for i, grp in enumerate(groups): if np.ndim(grp) == 1 and len(grp) > 0: ...
groups a by inds, and then applies foo to each group in turn, placing the results in an array.
def _cumsum(group_idx, a, size, fill_value=None, dtype=None): sortidx = np.argsort(group_idx, kind='mergesort') invsortidx = np.argsort(sortidx, kind='mergesort') group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] a_srt_cumsum = np.cumsum(a_srt, dtype=dtype) increasing = np.arange(len(a)...
N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, ...
def _fill_untouched(idx, ret, fill_value): untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
any elements of ret not indexed by idx are set to fill_value.
def aggregate_grouploop(*args, **kwargs): extrafuncs = {'allnan': allnan, 'anynan': anynan, 'first': itemgetter(0), 'last': itemgetter(-1), 'nanfirst': nanfirst, 'nanlast': nanlast} func = kwargs.pop('func') func = extrafuncs.get(func, func) if isinstance(func, s...
wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.
def _prod(group_idx, a, size, fill_value, dtype=None): dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 # product should start from 1 np.multiply.at(ret, group_idx, a) return ret
Same as aggregate_numpy.py
def c_func(funcname, reverse=False, nans=False, scalar=False): varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname] if scalar: varnames.remove('a') return codebase % dict(init...
Fill c_funcs with constructed code from the templates
def step_indices(group_idx): ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0 indices[-1] = group_idx.size inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args) return indices
Get the edges of areas within group_idx, which are filled with the same value
def __create_proj_mat(self, size): # [1] # return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6]) # [2] s = 1 / self.density return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=si...
Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
def load_movies(data_home, size): all_genres = ['Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', ...
Load movie genres as a context. Returns: dict of movie vectors: item_id -> numpy array (n_genre,)
def load_users(data_home, size): ages = [1, 18, 25, 35, 45, 50, 56, 999] users = {} if size == '100k': all_occupations = ['administrator', 'artist', 'doctor', 'educator', 'engineer', ...
Load user demographics as contexts.User ID -> {sex (M/F), age (7 groupd), occupation(0-20; 21)} Returns: dict of user vectors: user_id -> numpy array (1+1+21,); (sex_flg + age_group + n_occupation, )
def load_ratings(data_home, size): if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'),...
Load all samples in the dataset.
def delta(d1, d2, opt='d'): delta = 0 if opt == 'm': while True: mdays = monthrange(d1.year, d1.month)[1] d1 += timedelta(days=mdays) if d1 <= d2: delta += 1 else: break else: delta = (d2 - d1).days retu...
Compute difference between given 2 dates in month/day.
def n_feature_hash(feature, dims, seeds): vec = np.zeros(sum(dims)) offset = 0 for seed, dim in zip(seeds, dims): vec[offset:(offset + dim)] = feature_hash(feature, dim, seed) offset += dim return vec
N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`.
def feature_hash(feature, dim, seed=123): vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`.
def count_true_positive(truth, recommend): tp = 0 for r in recommend: if r in truth: tp += 1 return tp
Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives.
def recall(truth, recommend, k=None): if len(truth) == 0: if len(recommend) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(truth.size)
Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k.
def precision(truth, recommend, k=None): if len(recommend) == 0: if len(truth) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(k)
Precision@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Precision@k.
def average_precision(truth, recommend): if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP.
def auc(truth, recommend): tp = correct = 0. for r in recommend: if r in truth: # keep track number of true positives placed before tp += 1. else: correct += tp # number of all possible tp-fp pairs pairs = tp * (recommend.size - tp) # if there...
Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC.
def reciprocal_rank(truth, recommend): for n in range(recommend.size): if recommend[n] in truth: return 1. / (n + 1) return 0.
Reciprocal Rank (RR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: RR.
def mpr(truth, recommend): if len(recommend) == 0 and len(truth) == 0: return 0. # best elif len(truth) == 0 or len(truth) == 0: return 100. # worst accum = 0. n_recommend = recommend.size for t in truth: r = np.where(recommend == t)[0][0] / float(n_recommend) ...
Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR.
def ndcg(truth, recommend, k=None): if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: ...
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
def initialize(self, *args): # number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
Initialize a recommender by resetting stored users and items.
def register_user(self, user): self.users[user.index] = {'known_items': set()} self.n_user += 1
For new users, append their information into the dictionaries. Args: user (User): User.
def scores2recos(self, scores, candidates, rev=False): sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are con...
def fit(self, train_events, test_events, n_epoch=1): # make initial status for batch training for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) # for batch eva...
Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event...
def evaluate(self, test_events): for i, e in enumerate(test_events): self.__validate(e) # target items (all or unobserved depending on a detaset) unobserved = set(self.item_buffer) if not self.repeat: unobserved -= self.rec.users[e.user.in...
Iterate recommend/update procedure and compute incremental recall. Args: test_events (list of Event): Positive test events. Returns: list of tuples: (rank, recommend time, update time)
def __batch_update(self, train_events, test_events, n_epoch): for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) ...
Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training.
def __batch_evaluate(self, test_events): percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if no...
Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set.
def _scale_x_values(self, values, max_width): if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos): ...
Scale X values to new width
def _scale_x_values_timestamps(self, values, max_width): first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in value...
Scale X values to new width based on timestamps
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max...
Take values and transmute them into a new range
def _get_ascii_field(self, values): empty_space = ' ' # This formats as field[x][y] field = [[empty_space for y in range(max(values) + 1)] for x in range(len(values))] # Draw graph into field for x in range(len(values)): y = values[x] y_prev = val...
Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity char = '?' if y_next > y and y_prev > y: char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next: char =...
Assign the character to be placed into the graph
def _draw_ascii_graph(self, field): row_strings = [] for y in range(len(field[0])): row = '' for x in range(len(field)): row += field[x][y] row_strings.insert(0, row) graph_string = '\n'.join(row_strings) return graph_string
Draw graph from field double nested list, format field[x][y] = char
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store t...
Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
def substitute(expression: Union[Expression, Pattern], substitution: Substitution) -> Replacement: if isinstance(expression, Pattern): expression = expression.expression return _substitute(expression, substitution)[0]
Replaces variables in the given *expression* using the given *substitution*. >>> print(substitute(f(x_), {'x': a})) f(a) If nothing was substituted, the original expression is returned: >>> expression = f(x_) >>> result = substitute(expression, {'y': a}) >>> print(result) f(x_) >>> ex...
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement: r if len(position) == 0: return replacement if not isinstance(expression, Operation): raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression)) if posit...
r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>...
def replace_many(expression: Expression, replacements: Sequence[Tuple[Sequence[int], Replacement]]) -> Replacement: r if len(replacements) == 0: return expression replacements = sorted(replacements) if len(replacements[0][0]) == 0: if len(replacements) > 1: raise IndexError( ...
r"""Replaces the subexpressions of *expression* at the given positions with the given replacements. The original *expression* itself is not modified, but a modified copy is returned. If the replacement is a sequence of expressions, it will be expanded into the list of operands of the respective operation. ...
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]: rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 wh...
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever...
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \ -> Union[Expression, Sequence[Expression]]: return _replace_all_post_order(expression, rules)[0]
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever...
def is_match(subject: Expression, pattern: Expression) -> bool: return any(True for _ in match(subject, pattern))
Check whether the given *subject* matches given *pattern*. Args: subject: The subject. pattern: The pattern. Returns: True iff the subject matches the pattern.
def as_graph(self) -> Graph: # pragma: no cover if Graph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Graph() nodes_left = {} # type: Dict[TLeft, str] nodes_right = {} # type: Dict[TRight, str] node_id = 0 ...
Returns a :class:`graphviz.Graph` representation of this bipartite graph.
def find_matching(self) -> Dict[TLeft, TRight]: # The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail (key) # In addition, the g...
Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part...
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])
Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
Returns a copy of this bipartite graph with the given edge removed.
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
Returns the induced subgraph where only the nodes from the given sets are included.
def as_graph(self) -> Digraph: # pragma: no cover if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Digraph() subgraphs = [Digraph(graph_attr={'rank': 'same'}), Digraph(graph_attr={'rank': 'same'})] nodes = [{}, {}]...
Returns a :class:`graphviz.Digraph` representation of this directed match graph.
def is_constant(expression): if isinstance(expression, Wildcard): return False if isinstance(expression, Expression): return expression.is_constant if isinstance(expression, Operation): return all(is_constant(o) for o in op_iter(expression)) return True
Check if the given expression is constant, i.e. it does not contain Wildcards.
def is_syntactic(expression): if isinstance(expression, Wildcard): return expression.fixed_size if isinstance(expression, Expression): return expression.is_syntactic if isinstance(expression, (AssociativeOperation, CommutativeOperation)): return False if isinstance(expressio...
Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or associative/commutative operations.
def get_head(expression): if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard): return expression.symbol_type return None return type(expression)
Returns the given expression's head.
def match_head(subject, pattern): if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) asser...
Checks if the head of subject matches the pattern's head.
def preorder_iter(expression): yield expression if isinstance(expression, Operation): for operand in op_iter(expression): yield from preorder_iter(operand)
Iterate over the expression in preorder.
def preorder_iter_with_position(expression): yield expression, () if isinstance(expression, Operation): for i, operand in enumerate(op_iter(expression)): for child, pos in preorder_iter_with_position(operand): yield child, (i, ) + pos
Iterate over the expression in preorder. Also yields the position of each subexpression.
def is_anonymous(expression): if hasattr(expression, 'variable_name') and expression.variable_name: return False if isinstance(expression, Operation): return all(is_anonymous(o) for o in op_iter(expression)) return True
Returns True iff the expression does not contain any variables.
def contains_variables_from_set(expression, variables): if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True if isinstance(expression, Operation): return any(contains_variables_from_set(o, variables) for o in op_iter(expression)) return False
Returns True iff the expression contains any of the variables from the given set.
def get_variables(expression, variables=None): if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expressio...
Returns the set of variable names in the given expression.
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expressio...
Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary ar...
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]: if vector_sum < 0: raise ValueError("Vector sum must not be negative") if len(max_vector) == 0: if vector_sum == 0: yield tuple() return total = sum(max_vector) ...
Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are...
def weak_composition_iter(n: int, num_parts: int) -> Iterator[Tuple[int, ...]]: if n < 0: raise ValueError("Total must not be negative") if num_parts < 0: raise ValueError("Number of num_parts must not be negative") if num_parts == 0: if n == 0: yield tuple() ...
Yield all weak compositions of integer *n* into *num_parts* parts. Each composition is yielded as a tuple. The generated partitions are order-dependant and not unique when ignoring the order of the components. The partitions are yielded in lexicographical order. Example: >>> compositions = list(w...
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]: if len(variables) == 1: yield from _commutative_single_variable_partiton_iter(values, variables[0]) return ...
Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a,...
def get_short_lambda_source(lambda_func: LambdaType) -> Optional[str]: try: all_source_lines, lnum = inspect.findsource(lambda_func) source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None all_source_lines = [l.rstrip('\r\n') for l in all_s...
Return the source of a (short) lambda function. If it's impossible to obtain, return ``None``. The source is returned without the ``lambda`` and signature parts: >>> get_short_lambda_source(lambda x, y: x < y) 'x < y' This should work well for most lambda definitions, however for multi-line or hi...
def extended_euclid(a: int, b: int) -> Tuple[int, int, int]: if b == 0: return (1, 0, a) x0, y0, d = extended_euclid(b, a % b) x, y = y0, x0 - (a // b) * y0 return (x, y, d)
Extended Euclidean algorithm that computes the Bézout coefficients as well as :math:`gcd(a, b)` Returns ``x, y, d`` where *x* and *y* are a solution to :math:`ax + by = d` and :math:`d = gcd(a, b)`. *x* and *y* are a minimal pair of Bézout's coefficients. See `Extended Euclidean algorithm <https://en.wiki...
def base_solution_linear(a: int, b: int, c: int) -> Iterator[Tuple[int, int]]: r if a <= 0 or b <= 0: raise ValueError('Coefficients a and b must be positive integers.') if c < 0: raise ValueError('Constant c must not be negative.') d = math.gcd(a, math.gcd(b, c)) a = a // d b = ...
r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative sol...
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]: r if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: ...
r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of v...
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]: generator_count = len(factories) if generator_count == 0: yield initial_data return generators = [None] * generator_count # type: List[Optional[Iterator[T]]] next_data = initial_data ge...
Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded...
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value = self[variable_name] i...
Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the o...
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The subs...
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool: if getattr(pattern, 'variable_name', False): try: self.try_add_variable(pattern.variable_name, subject) except ValueError: return False ...
Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all th...
def union(self, *others: 'Substitution') -> 'Substitution': new_subst = Substitution(self) for other in others: for variable_name, replacement in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multi...
def rename(self, renaming: Dict[str, str]) -> 'Substitution': return Substitution((renaming.get(name, name), value) for name, value in self.items())
Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old v...
def is_operation(term: Any) -> bool: return isinstance(term, type) and issubclass(term, Operation)
Return True iff the given term is a subclass of :class:`.Operation`.
def is_symbol_wildcard(term: Any) -> bool: return isinstance(term, type) and issubclass(term, Symbol)
Return True iff the given term is a subclass of :class:`.Symbol`.
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]: return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None)
Return the transition target for the given symbol type from the the given state or None if it does not exist.
def _term_str(term: TermAtom) -> str: # pragma: no cover if is_operation(term): return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard): return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or...
Return a string representation of a term atom.
def is_syntactic(self): for term in self._terms: if isinstance(term, Wildcard) and not term.fixed_size: return False if is_operation(term) and issubclass(term, (AssociativeOperation, CommutativeOperation)): return False return True
True, iff the flatterm is :term:`syntactic`.
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms.
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]: if isinstance(expression, Operation): yield type(expression) for operand in op_iter(expression): yield from cls._flatterm_iter(operand) yield OPERATION_END elif isinstance(...
Generator that yields the atoms of the expressions in prefix notation with operation end markers.
def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]: last_wildcard = None # type: Optional[Wildcard] for term in flatterm: if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard): if last_wildcard is not None: ...
Combine consecutive wildcards in a flatterm into a single one.
def labels(self) -> Set[TransitionLabel]: labels = set() # type: Set[TransitionLabel] if self.state1 is not None and self.fixed != 1: labels.update(self.state1.keys()) if self.state2 is not None and self.fixed != 2: labels.update(self.state2.keys()) if s...
Return the set of transition labels to examine for this queue state. This is the union of the transition label sets for both states. However, if one of the states is fixed, it is excluded from this union and a wildcard transition is included instead. Also, when already in a failed state (one of...
def add(self, pattern: Union[Pattern, FlatTerm], final_label: T=None) -> int: index = len(self._patterns) self._patterns.append((pattern, final_label)) flatterm = FlatTerm(pattern.expression) if not isinstance(pattern, FlatTerm) else pattern if flatterm.is_syntactic or len(flatt...
Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: ...
def _generate_net(cls, flatterm: FlatTerm, final_label: T) -> _State[T]: # Capture the last sequence wildcard for every level of operation nesting on a stack # Used to add backtracking edges in case the "match" fails later last_wildcards = [None] # Generate a fail state for ever...
Generates a DFA matching the given pattern.
def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]: for index in self._match(subject): pattern, label = self._patterns[index] subst = Substitution() if subst.extract_substitution(subject, pattern.expression): for...
Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as gi...
def is_match(self, subject: Union[Expression, FlatTerm]) -> bool: try: next(self.match(subject)) except StopIteration: return False return True
Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject.
def as_graph(self) -> Digraph: # pragma: no cover if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') dot = Digraph() nodes = set() queue = [self._root] while queue: state = queue.pop(0) if not...
Renders the discrimination net as graphviz digraph.
def add(self, pattern: Pattern) -> int: inner = pattern.expression if self.operation is None: if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation): raise TypeError("Pattern must be a non-commutative operation.") self.operation = ...
Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: ...
def can_match(cls, pattern: Pattern) -> bool: if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation): return False if op_len(pattern.expression) < 3: return False first, *_, last = op_iter(pattern.expression) ...
Check if a pattern can be matched with a sequence matcher. Args: pattern: The pattern to check. Returns: True, iff the pattern can be matched with a sequence matcher.
def match(self, subject: Expression) -> Iterator[Tuple[Pattern, Substitution]]: if not isinstance(subject, self.operation): return subjects = list(op_iter(subject)) flatterms = [FlatTerm(o) for o in subjects] for i in range(len(flatterms)): flatterm = Fla...
Match the given subject against all patterns in the sequence matcher. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(pattern, substitution)` for every matching pattern.
def match(subject: Expression, pattern: Pattern) -> Iterator[Substitution]: r if not is_constant(subject): raise ValueError("The subject for matching must be constant.") global_constraints = [c for c in pattern.constraints if not c.variables] local_constraints = set(c for c in pattern.constraint...
r"""Tries to match the given *pattern* to the given *subject*. Yields each match in form of a substitution. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible match substitutions. Raises: ValueError: ...
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]: if not is_constant(subject): raise ValueError("The subject for matching must be constant.") for child, pos in preorder_iter_with_position(subject): if match_head(child, pattern): ...
Tries to match the given *pattern* to the any subexpression of the given *subject*. Yields each match in form of a substitution and a position tuple. The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself, :code:`(0, )` refers to the first child (operand) of the subject, :c...
def _build_full_partition( optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation ) -> List[Sequence[Expression]]: i = 0 var_index = 0 opt_index = 0 result = [] for operand in op_iter(operation): wrap_associative = False ...
Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned).
def grouped(self): for _ in self._match(self.matcher.root): yield list(self._internal_iter())
Yield the matches grouped by their final state in the automaton, i.e. structurally identical patterns only differing in constraints will be yielded together. Each group is yielded as a list of tuples consisting of a pattern and a match substitution. Yields: The grouped matches.