text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _match_class_pos(self): """Return the position of the match class."""
# TODO: add notfitted warnings if self.kernel.classes_.shape[0] != 2: raise ValueError("Number of classes is {}, expected 2.".format( self.kernel.classes_.shape[0])) # # get the position of match probabilities # classes = list(self.kernel.classes_) # return classes.index(1) return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _nonmatch_class_pos(self): """Return the position of the non-match class."""
# TODO: add notfitted warnings if self.kernel.classes_.shape[0] != 2: raise ValueError("Number of classes is {}, expected 2.".format( self.kernel.classes_.shape[0])) # # get the position of match probabilities # classes = list(self.kernel.classes_) # return classes.index(0) return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_weights(self): """Log weights as described in the FS framework."""
m = self.kernel.feature_log_prob_[self._match_class_pos()] u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()] return self._prob_inverse_transform(m - u)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def weights(self): """Weights as described in the FS framework."""
m = self.kernel.feature_log_prob_[self._match_class_pos()] u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()] return self._prob_inverse_transform(numpy.exp(m - u))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initialise_classifier(self, comparison_vectors): """Set the centers of the clusters."""
# Set the start point of the classifier. self.kernel.init = numpy.array( [[0.05] * len(list(comparison_vectors)), [0.95] * len(list(comparison_vectors))])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_label_dataframe(label, df): """check column label existance"""
setdiff = set(label) - set(df.columns.tolist()) if len(setdiff) == 0: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listify(x, none_value=[]): """Make a list of the argument if it is not a list."""
if isinstance(x, list): return x elif isinstance(x, tuple): return list(x) elif x is None: return none_value else: return [x]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def multi_index_to_frame(index): """ Replicates MultiIndex.to_frame, which was introduced in pandas 0.21, for the sake of backwards compatibility. """
return pandas.DataFrame(index.tolist(), index=index, columns=index.names)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index_split(index, chunks): """Function to split pandas.Index and pandas.MultiIndex objects. Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects into chunks. This function is based on :func:`numpy.array_split`. Parameters index : pandas.Index, pandas.MultiIndex A pandas.Index or pandas.MultiIndex to split into chunks. chunks : int The number of parts to split the index into. Returns ------- list A list with chunked pandas.Index or pandas.MultiIndex objects. """
Ntotal = index.shape[0] Nsections = int(chunks) if Nsections <= 0: raise ValueError('number sections must be larger than 0.') Neach_section, extras = divmod(Ntotal, Nsections) section_sizes = ([0] + extras * [Neach_section + 1] + (Nsections - extras) * [Neach_section]) div_points = numpy.array(section_sizes).cumsum() sub_ind = [] for i in range(Nsections): st = div_points[i] end = div_points[i + 1] sub_ind.append(index[st:end]) return sub_ind
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frame_indexing(frame, multi_index, level_i, indexing_type='label'): """Index dataframe based on one level of MultiIndex. Arguments --------- frame : pandas.DataFrame The datafrme to select records from. multi_index : pandas.MultiIndex A pandas multiindex were one fo the levels is used to sample the dataframe with. level_i : int, str The level of the multiIndex to index on. indexing_type : str The type of indexing. The value can be 'label' or 'position'. Default 'label'. """
if indexing_type == "label": data = frame.loc[multi_index.get_level_values(level_i)] data.index = multi_index elif indexing_type == "position": data = frame.iloc[multi_index.get_level_values(level_i)] data.index = multi_index else: raise ValueError("indexing_type needs to be 'label' or 'position'") return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fillna(series_or_arr, missing_value=0.0): """Fill missing values in pandas objects and numpy arrays. Arguments --------- series_or_arr : pandas.Series, numpy.ndarray The numpy array or pandas series for which the missing values need to be replaced. missing_value : float, int, str The value to replace the missing value with. Default 0.0. Returns ------- pandas.Series, numpy.ndarray The numpy array or pandas series with the missing values filled. """
if pandas.notnull(missing_value): if isinstance(series_or_arr, (numpy.ndarray)): series_or_arr[numpy.isnan(series_or_arr)] = missing_value else: series_or_arr.fillna(missing_value, inplace=True) return series_or_arr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_related_model(field): """Gets the related model from a related field"""
model = None if hasattr(field, 'related_model') and field.related_model: # pragma: no cover model = field.related_model # Django<1.8 doesn't have the related_model API, so we need to use rel, # which was removed in Django 2.0 elif hasattr(field, 'rel') and field.rel: # pragma: no cover model = field.rel.to return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_timeseries(self, fieldnames=(), verbose=True, index=None, storage='wide', values=None, pivot_columns=None, freq=None, coerce_float=True, rs_kwargs=None): """ A convenience method for creating a time series DataFrame i.e the DataFrame index will be an instance of DateTime or PeriodIndex Parameters fieldnames: The model field names(columns) to utilise in creating the DataFrame. You can span a relationships in the usual Django ORM way by using the foreign key field name separated by double underscores and refer to a field in a related model. index: specify the field to use for the index. If the index field is not in fieldnames it will be appended. This is mandatory for timeseries. storage: Specify if the queryset uses the ``wide`` format date | col1| col2| col3| 2001-01-01-| 100.5| 23.3| 2.2| 2001-02-01-| 106.3| 17.0| 4.6| 2001-03-01-| 111.7| 11.1| 0.7| or the `long` format. date |values| names| 2001-01-01-| 100.5| col1| 2001-02-01-| 106.3| col1| 2001-03-01-| 111.7| col1| 2001-01-01-| 23.3| col2| 2001-02-01-| 17.0| col2| 2001-01-01-| 23.3| col2| 2001-02-01-| 2.2| col3| 2001-03-01-| 4.6| col3| 2001-03-01-| 0.7| col3| pivot_columns: Required once the you specify `long` format storage. This could either be a list or string identifying the field name or combination of field. If the pivot_column is a single column then the unique values in this column become a new columns in the DataFrame If the pivot column is a list the values in these columns are concatenated (using the '-' as a separator) and these values are used for the new timeseries columns values: Also required if you utilize the `long` storage the values column name is use for populating new frame values freq: The offset string or object representing a target conversion rs_kwargs: A dictonary of keyword arguments based on the ``pandas.DataFrame.resample`` method verbose: If this is ``True`` then populate the DataFrame with the human readable versions of any foreign key fields else use the primary keys values else use the actual values set in the model. coerce_float: Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. """
assert index is not None, 'You must supply an index field' assert storage in ('wide', 'long'), 'storage must be wide or long' if rs_kwargs is None: rs_kwargs = {} if storage == 'wide': df = self.to_dataframe(fieldnames, verbose=verbose, index=index, coerce_float=coerce_float, datetime_index=True) else: df = self.to_dataframe(fieldnames, verbose=verbose, coerce_float=coerce_float, datetime_index=True) assert values is not None, 'You must specify a values field' assert pivot_columns is not None, 'You must specify pivot_columns' if isinstance(pivot_columns, (tuple, list)): df['combined_keys'] = '' for c in pivot_columns: df['combined_keys'] += df[c].str.upper() + '.' df['combined_keys'] += values.lower() df = df.pivot(index=index, columns='combined_keys', values=values) else: df = df.pivot(index=index, columns=pivot_columns, values=values) if freq is not None: df = df.resample(freq, **rs_kwargs) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dataframe(self, fieldnames=(), verbose=True, index=None, coerce_float=False, datetime_index=False): """ Returns a DataFrame from the queryset Paramaters fieldnames: The model field names(columns) to utilise in creating the DataFrame. You can span a relationships in the usual Django ORM way by using the foreign key field name separated by double underscores and refer to a field in a related model. index: specify the field to use for the index. If the index field is not in fieldnames it will be appended. This is mandatory for timeseries. verbose: If this is ``True`` then populate the DataFrame with the human readable versions for foreign key fields else use the actual values set in the model coerce_float: Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. datetime_index: specify whether index should be converted to a DateTimeIndex. """
return read_frame(self, fieldnames=fieldnames, verbose=verbose, index_col=index, coerce_float=coerce_float, datetime_index=datetime_index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False, verbose=True, datetime_index=False): """ Returns a dataframe from a QuerySet Optionally specify the field names/columns to utilize and a field as the index Parameters qs: The Django QuerySet. fieldnames: The model field names to use in creating the frame. You can span a relationship in the usual Django way by using double underscores to specify a related field in another model You can span a relationship in the usual Django way by using double underscores to specify a related field in another model index_col: specify the field to use for the index. If the index field is not in the field list it will be appended coerce_float : boolean, default False Attempt to convert values to non-string, non-numeric data (like decimal.Decimal) to floating point, useful for SQL result sets verbose: boolean If this is ``True`` then populate the DataFrame with the human readable versions of any foreign key fields else use the primary keys values. The human readable version of the foreign key field is defined in the ``__unicode__`` or ``__str__`` methods of the related class definition datetime_index: specify whether index should be converted to a DateTimeIndex. """
if fieldnames: fieldnames = pd.unique(fieldnames) if index_col is not None and index_col not in fieldnames: # Add it to the field names if not already there fieldnames = tuple(fieldnames) + (index_col,) fields = to_fields(qs, fieldnames) elif is_values_queryset(qs): if django.VERSION < (1, 9): # pragma: no cover annotation_field_names = list(qs.query.annotation_select) if annotation_field_names is None: annotation_field_names = [] extra_field_names = qs.extra_names if extra_field_names is None: extra_field_names = [] select_field_names = qs.field_names else: # pragma: no cover annotation_field_names = list(qs.query.annotation_select) extra_field_names = list(qs.query.extra_select) select_field_names = list(qs.query.values_select) fieldnames = select_field_names + annotation_field_names + \ extra_field_names fields = [None if '__' in f else qs.model._meta.get_field(f) for f in select_field_names] + \ [None] * (len(annotation_field_names) + len(extra_field_names)) uniq_fields = set() fieldnames, fields = zip( *(f for f in zip(fieldnames, fields) if f[0] not in uniq_fields and not uniq_fields.add(f[0]))) else: fields = qs.model._meta.fields fieldnames = [f.name for f in fields] fieldnames += list(qs.query.annotation_select.keys()) if is_values_queryset(qs): recs = list(qs) else: recs = list(qs.values_list(*fieldnames)) df = pd.DataFrame.from_records(recs, columns=fieldnames, coerce_float=coerce_float) if verbose: update_with_verbose(df, fieldnames, fields) if index_col is not None: df.set_index(index_col, inplace=True) if datetime_index: df.index = pd.to_datetime(df.index, errors="ignore") return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_balanced(root): """Return the height if the binary tree is balanced, -1 otherwise. :param root: Root node of the binary tree. :type root: binarytree.Node | None :return: Height if the binary tree is balanced, -1 otherwise. :rtype: int """
if root is None: return 0 left = _is_balanced(root.left) if left < 0: return -1 right = _is_balanced(root.right) if right < 0: return -1 return -1 if abs(left - right) > 1 else max(left, right) + 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_bst_from_sorted_values(sorted_values): """Recursively build a perfect BST from odd number of sorted values. :param sorted_values: Odd number of sorted values. :type sorted_values: [int | float] :return: Root node of the BST. :rtype: binarytree.Node """
if len(sorted_values) == 0: return None mid_index = len(sorted_values) // 2 root = Node(sorted_values[mid_index]) root.left = _build_bst_from_sorted_values(sorted_values[:mid_index]) root.right = _build_bst_from_sorted_values(sorted_values[mid_index + 1:]) return root
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_random_leaf_count(height): """Return a random leaf count for building binary trees. :param height: Height of the binary tree. :type height: int :return: Random leaf count. :rtype: int """
max_leaf_count = 2 ** height half_leaf_count = max_leaf_count // 2 # A very naive way of mimicking normal distribution roll_1 = random.randint(0, half_leaf_count) roll_2 = random.randint(0, max_leaf_count - half_leaf_count) return roll_1 + roll_2 or half_leaf_count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_random_node_values(height): """Return random node values for building binary trees. :param height: Height of the binary tree. :type height: int :return: Randomly generated node values. :rtype: [int] """
max_node_count = 2 ** (height + 1) - 1 node_values = list(range(max_node_count)) random.shuffle(node_values) return node_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_tree_string(root, curr_index, index=False, delimiter='-'): """Recursively walk down the binary tree and build a pretty-print string. In each recursive call, a "box" of characters visually representing the current (sub)tree is constructed line by line. Each line is padded with whitespaces to ensure all lines in the box have the same length. Then the box, its width, and start-end positions of its root node value repr string (required for drawing branches) are sent up to the parent call. The parent call then combines its left and right sub-boxes to build a larger box etc. :param root: Root node of the binary tree. :type root: binarytree.Node | None :param curr_index: Level-order_ index of the current node (root node is 0). :type curr_index: int :param index: If set to True, include the level-order_ node indexes using the following format: ``{index}{delimiter}{value}`` (default: False). :type index: bool :param delimiter: Delimiter character between the node index and the node value (default: '-'). :type delimiter: :return: Box of characters visually representing the current subtree, width of the box, and start-end positions of the repr string of the new root node value. :rtype: ([str], int, int, int) .. _Level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search """
if root is None: return [], 0, 0, 0 line1 = [] line2 = [] if index: node_repr = '{}{}{}'.format(curr_index, delimiter, root.value) else: node_repr = str(root.value) new_root_width = gap_size = len(node_repr) # Get the left and right sub-boxes, their widths, and root repr positions l_box, l_box_width, l_root_start, l_root_end = \ _build_tree_string(root.left, 2 * curr_index + 1, index, delimiter) r_box, r_box_width, r_root_start, r_root_end = \ _build_tree_string(root.right, 2 * curr_index + 2, index, delimiter) # Draw the branch connecting the current root node to the left sub-box # Pad the line with whitespaces where necessary if l_box_width > 0: l_root = (l_root_start + l_root_end) // 2 + 1 line1.append(' ' * (l_root + 1)) line1.append('_' * (l_box_width - l_root)) line2.append(' ' * l_root + '/') line2.append(' ' * (l_box_width - l_root)) new_root_start = l_box_width + 1 gap_size += 1 else: new_root_start = 0 # Draw the representation of the current root node line1.append(node_repr) line2.append(' ' * new_root_width) # Draw the branch connecting the current root node to the right sub-box # Pad the line with whitespaces where necessary if r_box_width > 0: r_root = (r_root_start + r_root_end) // 2 line1.append('_' * r_root) line1.append(' ' * (r_box_width - r_root + 1)) line2.append(' ' * r_root + '\\') line2.append(' ' * (r_box_width - r_root)) gap_size += 1 new_root_end = new_root_start + new_root_width - 1 # Combine the left and right sub-boxes with the branches drawn above gap = ' ' * gap_size new_box = [''.join(line1), ''.join(line2)] for i in range(max(len(l_box), len(r_box))): l_line = l_box[i] if i < len(l_box) else ' ' * l_box_width r_line = r_box[i] if i < len(r_box) else ' ' * r_box_width new_box.append(l_line + gap + r_line) # Return the new box, its width and its root repr positions return new_box, len(new_box[0]), new_root_start, new_root_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build(values): """Build a tree from `list representation`_ and return its root node. .. _list representation: https://en.wikipedia.org/wiki/Binary_tree#Arrays :param values: List representation of the binary tree, which is a list of node values in breadth-first order starting from the root (current node). If a node is at index i, its left child is always at 2i + 1, right child at 2i + 2, and parent at floor((i - 1) / 2). None indicates absence of a node at that index. See example below for an illustration. :type values: [int | float | None] :return: Root node of the binary tree. :rtype: binarytree.Node :raise binarytree.exceptions.NodeNotFoundError: If the list representation is malformed (e.g. a parent node is missing). **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> .. doctest:: Traceback (most recent call last): NodeNotFoundError: parent node missing at index 0 """
nodes = [None if v is None else Node(v) for v in values] for index in range(1, len(nodes)): node = nodes[index] if node is not None: parent_index = (index - 1) // 2 parent = nodes[parent_index] if parent is None: raise NodeNotFoundError( 'parent node missing at index {}'.format(parent_index)) setattr(parent, 'left' if index % 2 else 'right', node) return nodes[0] if nodes else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree(height=3, is_perfect=False): """Generate a random binary tree and return its root node. :param height: Height of the tree (default: 3, range: 0 - 9 inclusive). :type height: int :param is_perfect: If set to True (default: False), a perfect binary tree with all levels filled is returned. If set to False, a perfect binary tree may still be generated by chance. :type is_perfect: bool :return: Root node of the binary tree. :rtype: binarytree.Node :raise binarytree.exceptions.TreeHeightError: If height is invalid. **Example**: .. doctest:: 3 .. doctest:: 5 True .. doctest:: Traceback (most recent call last): TreeHeightError: height must be an int between 0 - 9 """
_validate_tree_height(height) values = _generate_random_node_values(height) if is_perfect: return build(values) leaf_count = _generate_random_leaf_count(height) root = Node(values.pop(0)) leaves = set() for value in values: node = root depth = 0 inserted = False while depth < height and not inserted: attr = random.choice(('left', 'right')) if getattr(node, attr) is None: setattr(node, attr, Node(value)) inserted = True node = getattr(node, attr) depth += 1 if inserted and depth == height: leaves.add(node) if len(leaves) == leaf_count: break return root
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def heap(height=3, is_max=True, is_perfect=False): """Generate a random heap and return its root node. :param height: Height of the heap (default: 3, range: 0 - 9 inclusive). :type height: int :param is_max: If set to True (default: True), generate a max heap. If set to False, generate a min heap. A binary tree with only the root node is considered both a min and max heap. :type is_max: bool :param is_perfect: If set to True (default: False), a perfect heap with all levels filled is returned. If set to False, a perfect heap may still be generated by chance. :type is_perfect: bool :return: Root node of the heap. :rtype: binarytree.Node :raise binarytree.exceptions.TreeHeightError: If height is invalid. **Example**: .. doctest:: 3 True .. doctest:: 4 True .. doctest:: 5 True True .. doctest:: Traceback (most recent call last): TreeHeightError: height must be an int between 0 - 9 """
_validate_tree_height(height) values = _generate_random_node_values(height) if not is_perfect: # Randomly cut some of the leaf nodes away random_cut = random.randint(2 ** height, len(values)) values = values[:random_cut] if is_max: negated = [-v for v in values] heapq.heapify(negated) return build([-v for v in negated]) else: heapq.heapify(values) return build(values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pprint(self, index=False, delimiter='-'): """Pretty-print the binary tree. :param index: If set to True (default: False), display level-order_ indexes using the format: ``{index}{delimiter}{value}``. :type index: bool :param delimiter: Delimiter character between the node index and the node value (default: '-'). :type delimiter: str | unicode **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> <BLANKLINE> _____0-1_ / \\ 1-2_ 2-3 \\ 4-4 <BLANKLINE> .. note:: If you do not need level-order_ indexes in the output string, use :func:`binarytree.Node.__str__` instead. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search """
lines = _build_tree_string(self, 0, index, delimiter)[0] print('\n' + '\n'.join((line.rstrip() for line in lines)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self): """Check if the binary tree is malformed. :raise binarytree.exceptions.NodeReferenceError: If there is a cyclic reference to a node in the binary tree. :raise binarytree.exceptions.NodeTypeError: If a node is not an instance of :class:`binarytree.Node`. :raise binarytree.exceptions.NodeValueError: If a node value is not a number (e.g. int, float). **Example**: .. doctest:: Traceback (most recent call last): NodeReferenceError: cyclic node reference at index 0 """
has_more_nodes = True visited = set() to_visit = [self] index = 0 while has_more_nodes: has_more_nodes = False next_nodes = [] for node in to_visit: if node is None: next_nodes.extend((None, None)) else: if node in visited: raise NodeReferenceError( 'cyclic node reference at index {}'.format(index)) if not isinstance(node, Node): raise NodeTypeError( 'invalid node instance at index {}'.format(index)) if not isinstance(node.value, numbers.Number): raise NodeValueError( 'invalid node value at index {}'.format(index)) if node.left is not None or node.right is not None: has_more_nodes = True visited.add(node) next_nodes.extend((node.left, node.right)) index += 1 to_visit = next_nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def values(self): """Return the `list representation`_ of the binary tree. .. _list representation: https://en.wikipedia.org/wiki/Binary_tree#Arrays :return: List representation of the binary tree, which is a list of node values in breadth-first order starting from the root (current node). If a node is at index i, its left child is always at 2i + 1, right child at 2i + 2, and parent at index floor((i - 1) / 2). None indicates absence of a node at that index. See example below for an illustration. :rtype: [int | float | None] **Example**: .. doctest:: [1, 2, 3, None, 4] """
current_nodes = [self] has_more_nodes = True values = [] while has_more_nodes: has_more_nodes = False next_nodes = [] for node in current_nodes: if node is None: values.append(None) next_nodes.extend((None, None)) continue if node.left is not None or node.right is not None: has_more_nodes = True values.append(node.value) next_nodes.extend((node.left, node.right)) current_nodes = next_nodes # Get rid of trailing None's while values and values[-1] is None: values.pop() return values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def leaves(self): """Return the leaf nodes of the binary tree. A leaf node is any node that does not have child nodes. :return: List of leaf nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> [Node(3), Node(4)] """
current_nodes = [self] leaves = [] while len(current_nodes) > 0: next_nodes = [] for node in current_nodes: if node.left is None and node.right is None: leaves.append(node) continue if node.left is not None: next_nodes.append(node.left) if node.right is not None: next_nodes.append(node.right) current_nodes = next_nodes return leaves
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def properties(self): """Return various properties of the binary tree. :return: Binary tree properties. :rtype: dict **Example**: .. doctest:: 2 5 2 1 5 1 3 True False True False True False True """
properties = _get_tree_properties(self) properties.update({ 'is_bst': _is_bst(self), 'is_balanced': _is_balanced(self) >= 0 }) return properties
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inorder(self): """Return the nodes in the binary tree using in-order_ traversal. An in-order_ traversal visits left subtree, root, then right subtree. .. _in-order: https://en.wikipedia.org/wiki/Tree_traversal :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> [Node(4), Node(2), Node(5), Node(1), Node(3)] """
node_stack = [] result = [] node = self while True: if node is not None: node_stack.append(node) node = node.left elif len(node_stack) > 0: node = node_stack.pop() result.append(node) node = node.right else: break return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preorder(self): """Return the nodes in the binary tree using pre-order_ traversal. A pre-order_ traversal visits root, left subtree, then right subtree. .. _pre-order: https://en.wikipedia.org/wiki/Tree_traversal :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> [Node(1), Node(2), Node(4), Node(5), Node(3)] """
node_stack = [self] result = [] while len(node_stack) > 0: node = node_stack.pop() result.append(node) if node.right is not None: node_stack.append(node.right) if node.left is not None: node_stack.append(node.left) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def postorder(self): """Return the nodes in the binary tree using post-order_ traversal. A post-order_ traversal visits left subtree, right subtree, then root. .. _post-order: https://en.wikipedia.org/wiki/Tree_traversal :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> [Node(4), Node(5), Node(2), Node(3), Node(1)] """
node_stack = [] result = [] node = self while True: while node is not None: if node.right is not None: node_stack.append(node.right) node_stack.append(node) node = node.left node = node_stack.pop() if (node.right is not None and len(node_stack) > 0 and node_stack[-1] is node.right): node_stack.pop() node_stack.append(node) node = node.right else: result.append(node) node = None if len(node_stack) == 0: break return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def levelorder(self): """Return the nodes in the binary tree using level-order_ traversal. A level-order_ traversal visits nodes left to right, level by level. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> [Node(1), Node(2), Node(3), Node(4), Node(5)] """
current_nodes = [self] result = [] while len(current_nodes) > 0: next_nodes = [] for node in current_nodes: result.append(node) if node.left is not None: next_nodes.append(node.left) if node.right is not None: next_nodes.append(node.right) current_nodes = next_nodes return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invitation_backend(backend=None, namespace=None): # type: (Optional[Text], Optional[Text]) -> BaseBackend """ Returns a specified invitation backend Args: backend: dotted path to the invitation backend class namespace: URL namespace to use Returns: an instance of an InvitationBackend """
backend = backend or ORGS_INVITATION_BACKEND class_module, class_name = backend.rsplit(".", 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def registration_backend(backend=None, namespace=None): # type: (Optional[Text], Optional[Text]) -> BaseBackend """ Returns a specified registration backend Args: backend: dotted path to the registration backend class namespace: URL namespace to use Returns: an instance of an RegistrationBackend """
backend = backend or ORGS_REGISTRATION_BACKEND class_module, class_name = backend.rsplit(".", 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def org_registration_form(org_model): """ Generates a registration ModelForm for the given organization model class """
class OrganizationRegistrationForm(forms.ModelForm): """Form class for creating new organizations owned by new users.""" email = forms.EmailField() class Meta: model = org_model exclude = ("is_active", "users") def save(self, *args, **kwargs): self.instance.is_active = False super(OrganizationRegistrationForm, self).save(*args, **kwargs) return OrganizationRegistrationForm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ The save method should create a new OrganizationUser linking the User matching the provided email address. If not matching User is found it should kick off the registration process. It needs to create a User in order to link it to the Organization. """
try: user = get_user_model().objects.get( email__iexact=self.cleaned_data["email"] ) except get_user_model().MultipleObjectsReturned: raise forms.ValidationError( _("This email address has been used multiple times.") ) except get_user_model().DoesNotExist: user = invitation_backend().invite_by_email( self.cleaned_data["email"], **{ "domain": get_current_site(self.request), "organization": self.organization, "sender": self.request.user, } ) # Send a notification email to this user to inform them that they # have been added to a new organization. invitation_backend().send_notification( user, **{ "domain": get_current_site(self.request), "organization": self.organization, "sender": self.request.user, } ) return OrganizationUser.objects.create( user=user, organization=self.organization, is_admin=self.cleaned_data["is_admin"], )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, **kwargs): """ Create the organization, then get the user, then make the owner. """
is_active = True try: user = get_user_model().objects.get(email=self.cleaned_data["email"]) except get_user_model().DoesNotExist: user = invitation_backend().invite_by_email( self.cleaned_data["email"], **{ "domain": get_current_site(self.request), "organization": self.cleaned_data["name"], "sender": self.request.user, "created": True, } ) is_active = False return create_organization( user, self.cleaned_data["name"], self.cleaned_data["slug"], is_active=is_active, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invite_by_email(self, email, user, organization, **kwargs): # type: (Text, AbstractUser, AbstractBaseOrganization) -> OrganizationInvitationBase """ Primary interface method by which one user invites another to join Args: email: request: **kwargs: Returns: an invitation instance Raises: MultipleObjectsReturned if multiple matching users are found """
try: invitee = self.user_model.objects.get(email__iexact=email) except self.user_model.DoesNotExist: invitee = None # TODO allow sending just the OrganizationUser instance user_invitation = self.invitation_model.objects.create( invitee=invitee, invitee_identifier=email.lower(), invited_by=user, organization=organization, ) self.send_invitation(user_invitation) return user_invitation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_invitation(self, invitation, **kwargs): # type: (OrganizationInvitationBase) -> bool """ Sends an invitation message for a specific invitation. This could be overridden to do other things, such as sending a confirmation email to the sender. Args: invitation: Returns: """
return self.email_message( invitation.invitee_identifier, self.invitation_subject, self.invitation_body, invitation.invited_by, **kwargs ).send()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def email_message( self, recipient, # type: Text subject_template, # type: Text body_template, # type: Text sender=None, # type: Optional[AbstractUser] message_class=EmailMessage, **kwargs ): """ Returns an invitation email message. This can be easily overridden. For instance, to send an HTML message, use the EmailMultiAlternatives message_class and attach the additional conent. """
from_email = "%s %s <%s>" % ( sender.first_name, sender.last_name, email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1], ) reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name, sender.email) headers = {"Reply-To": reply_to} kwargs.update({"sender": sender, "recipient": recipient}) subject_template = loader.get_template(subject_template) body_template = loader.get_template(body_template) subject = subject_template.render( kwargs ).strip() # Remove stray newline characters body = body_template.render(kwargs) return message_class(subject, body, from_email, [recipient], headers=headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_org(cls, module): """ Adds the `users` field to the organization model """
try: cls.module_registry[module]["OrgModel"]._meta.get_field("users") except FieldDoesNotExist: cls.module_registry[module]["OrgModel"].add_to_class( "users", models.ManyToManyField( USER_MODEL, through=cls.module_registry[module]["OrgUserModel"].__name__, related_name="%(app_label)s_%(class)s", ), ) cls.module_registry[module]["OrgModel"].invitation_model = cls.module_registry[ module ][ "OrgInviteModel" ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_org_users(cls, module): """ Adds the `user` field to the organization user model and the link to the specific organization model. """
try: cls.module_registry[module]["OrgUserModel"]._meta.get_field("user") except FieldDoesNotExist: cls.module_registry[module]["OrgUserModel"].add_to_class( "user", models.ForeignKey( USER_MODEL, related_name="%(app_label)s_%(class)s", on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgUserModel"]._meta.get_field("organization") except FieldDoesNotExist: cls.module_registry[module]["OrgUserModel"].add_to_class( "organization", models.ForeignKey( cls.module_registry[module]["OrgModel"], related_name="organization_users", on_delete=models.CASCADE, ), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_org_owner(cls, module): """ Creates the links to the organization and organization user for the owner. """
try: cls.module_registry[module]["OrgOwnerModel"]._meta.get_field( "organization_user" ) except FieldDoesNotExist: cls.module_registry[module]["OrgOwnerModel"].add_to_class( "organization_user", models.OneToOneField( cls.module_registry[module]["OrgUserModel"], on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgOwnerModel"]._meta.get_field("organization") except FieldDoesNotExist: cls.module_registry[module]["OrgOwnerModel"].add_to_class( "organization", models.OneToOneField( cls.module_registry[module]["OrgModel"], related_name="owner", on_delete=models.CASCADE, ), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_org_invite(cls, module): """ Adds the links to the organization and to the organization user """
try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field("invited_by") except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "invited_by", models.ForeignKey( USER_MODEL, related_name="%(app_label)s_%(class)s_sent_invitations", on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field("invitee") except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "invitee", models.ForeignKey( USER_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_invitations", on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field( "organization" ) except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "organization", models.ForeignKey( cls.module_registry[module]["OrgModel"], related_name="organization_invites", on_delete=models.CASCADE, ), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_relation_name(self): """ Returns the string name of the related name to the user. This provides a consistent interface across different organization model classes. """
return "{0}_{1}".format( self._meta.app_label.lower(), self.__class__.__name__.lower() )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def activate(self, user): """ Updates the `invitee` value and saves the instance Provided as a way of extending the behavior. Args: user: the newly created user Returns: the linking organization user """
org_user = self.organization.add_user(user, **self.activation_kwargs()) self.invitee = user self.save() return org_user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_object(self): """ Returns the OrganizationUser object based on the primary keys for both the organization and the organization user. """
if hasattr(self, "organization_user"): return self.organization_user organization_pk = self.kwargs.get("organization_pk", None) user_pk = self.kwargs.get("user_pk", None) self.organization_user = get_object_or_404( self.get_user_model().objects.select_related(), user__pk=user_pk, organization__pk=organization_pk, ) return self.organization_user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_token(self, user, token): """ Check that a password reset token is correct for a given user. """
# Parse the token try: ts_b36, hash = token.split("-") except ValueError: return False try: ts = base36_to_int(ts_b36) except ValueError: return False # Check that the timestamp/uid has not been tampered with if not constant_time_compare(self._make_token_with_timestamp(user, ts), token): return False # Check the timestamp is within limit if (self._num_days(self._today()) - ts) > REGISTRATION_TIMEOUT_DAYS: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_organization( user, name, slug=None, is_active=None, org_defaults=None, org_user_defaults=None, **kwargs ): """ Returns a new organization, also creating an initial organization user who is the owner. The specific models can be specified if a custom organization app is used. The simplest way would be to use a partial. """
org_model = kwargs.pop("model", None) or kwargs.pop( "org_model", None ) or default_org_model() kwargs.pop("org_user_model", None) # Discard deprecated argument org_owner_model = org_model.owner.related.related_model try: # Django 1.9 org_user_model = org_model.organization_users.rel.related_model except AttributeError: # Django 1.8 org_user_model = org_model.organization_users.related.related_model if org_defaults is None: org_defaults = {} if org_user_defaults is None: if "is_admin" in model_field_names(org_user_model): org_user_defaults = {"is_admin": True} else: org_user_defaults = {} if slug is not None: org_defaults.update({"slug": slug}) if is_active is not None: org_defaults.update({"is_active": is_active}) org_defaults.update({"name": name}) organization = org_model.objects.create(**org_defaults) org_user_defaults.update({"organization": organization, "user": user}) new_user = org_user_model.objects.create(**org_user_defaults) org_owner_model.objects.create( organization=organization, organization_user=new_user ) return organization
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_field_attr(model, model_field, attr): """ Returns the specified attribute for the specified field on the model class. """
fields = dict([(field.name, field) for field in model._meta.fields]) return getattr(fields[model_field], attr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_form(self, **kwargs): """Returns the form for registering or inviting a user"""
if not hasattr(self, "form_class"): raise AttributeError(_("You must define a form_class")) return self.form_class(**kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def activate_organizations(self, user): """ Activates the related organizations for the user. It only activates the related organizations by model type - that is, if there are multiple types of organizations then only organizations in the provided model class are activated. """
try: relation_name = self.org_model().user_relation_name except TypeError: # No org_model specified, raises a TypeError because NoneType is # not callable. This the most sensible default: relation_name = "organizations_organization" organization_set = getattr(user, relation_name) for org in organization_set.filter(is_active=False): org.is_active = True org.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def activate_view(self, request, user_id, token): """ View function that activates the given User by setting `is_active` to true if the provided information is verified. """
try: user = self.user_model.objects.get(id=user_id, is_active=False) except self.user_model.DoesNotExist: raise Http404(_("Your URL may have expired.")) if not RegistrationTokenGenerator().check_token(user, token): raise Http404(_("Your URL may have expired.")) form = self.get_form( data=request.POST or None, files=request.FILES or None, instance=user ) if form.is_valid(): form.instance.is_active = True user = form.save() user.set_password(form.cleaned_data["password"]) user.save() self.activate_organizations(user) user = authenticate( username=form.cleaned_data["username"], password=form.cleaned_data["password"], ) login(request, user) return redirect(self.get_success_url()) return render(request, self.registration_form_template, {"form": form})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_reminder(self, user, sender=None, **kwargs): """Sends a reminder email to the specified user"""
if user.is_active: return False token = RegistrationTokenGenerator().make_token(user) kwargs.update({"token": token}) self.email_message( user, self.reminder_subject, self.reminder_body, sender, **kwargs ).send()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def email_message( self, user, subject_template, body_template, sender=None, message_class=EmailMessage, **kwargs ): """ Returns an email message for a new user. This can be easily overridden. For instance, to send an HTML message, use the EmailMultiAlternatives message_class and attach the additional conent. """
if sender: try: display_name = sender.get_full_name() except (AttributeError, TypeError): display_name = sender.get_username() from_email = "%s <%s>" % ( display_name, email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1] ) reply_to = "%s <%s>" % (display_name, sender.email) else: from_email = settings.DEFAULT_FROM_EMAIL reply_to = from_email headers = {"Reply-To": reply_to} kwargs.update({"sender": sender, "user": user}) subject_template = loader.get_template(subject_template) body_template = loader.get_template(body_template) subject = subject_template.render( kwargs ).strip() # Remove stray newline characters body = body_template.render(kwargs) return message_class(subject, body, from_email, [user.email], headers=headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_by_email(self, email, sender=None, request=None, **kwargs): """ Returns a User object filled with dummy data and not active, and sends an invitation email. """
try: user = self.user_model.objects.get(email=email) except self.user_model.DoesNotExist: user = self.user_model.objects.create( username=self.get_username(), email=email, password=self.user_model.objects.make_random_password(), ) user.is_active = False user.save() self.send_activation(user, sender, **kwargs) return user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_activation(self, user, sender=None, **kwargs): """ Invites a user to join the site """
if user.is_active: return False token = self.get_token(user) kwargs.update({"token": token}) self.email_message( user, self.activation_subject, self.activation_body, sender, **kwargs ).send()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_view(self, request): """ Initiates the organization and user account creation process """
try: if request.user.is_authenticated(): return redirect("organization_add") except TypeError: if request.user.is_authenticated: return redirect("organization_add") form = org_registration_form(self.org_model)(request.POST or None) if form.is_valid(): try: user = self.user_model.objects.get(email=form.cleaned_data["email"]) except self.user_model.DoesNotExist: user = self.user_model.objects.create( username=self.get_username(), email=form.cleaned_data["email"], password=self.user_model.objects.make_random_password(), ) user.is_active = False user.save() else: return redirect("organization_add") organization = create_organization( user, form.cleaned_data["name"], form.cleaned_data["slug"], is_active=False, ) return render( request, self.activation_success_template, {"user": user, "organization": organization}, ) return render(request, self.registration_form_template, {"form": form})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invite_by_email(self, email, sender=None, request=None, **kwargs): """Creates an inactive user with the information we know and then sends an invitation email for that user to complete registration. If your project uses email in a different way then you should make to extend this method as it only checks the `email` attribute for Users. """
try: user = self.user_model.objects.get(email=email) except self.user_model.DoesNotExist: # TODO break out user creation process if "username" in inspect.getargspec( self.user_model.objects.create_user ).args: user = self.user_model.objects.create( username=self.get_username(), email=email, password=self.user_model.objects.make_random_password(), ) else: user = self.user_model.objects.create( email=email, password=self.user_model.objects.make_random_password() ) user.is_active = False user.save() self.send_invitation(user, sender, **kwargs) return user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_invitation(self, user, sender=None, **kwargs): """An intermediary function for sending an invitation email that selects the templates, generating the token, and ensuring that the user has not already joined the site. """
if user.is_active: return False token = self.get_token(user) kwargs.update({"token": token}) self.email_message( user, self.invitation_subject, self.invitation_body, sender, **kwargs ).send() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_notification(self, user, sender=None, **kwargs): """ An intermediary function for sending an notification email informing a pre-existing, active user that they have been added to a new organization. """
if not user.is_active: return False self.email_message( user, self.notification_subject, self.notification_body, sender, **kwargs ).send() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_user(self, user, is_admin=False): """ Adds a new user and if the first user makes the user an admin and the owner. """
users_count = self.users.all().count() if users_count == 0: is_admin = True # TODO get specific org user? org_user = self._org_user_model.objects.create( user=user, organization=self, is_admin=is_admin ) if users_count == 0: # TODO get specific org user? self._org_owner_model.objects.create( organization=self, organization_user=org_user ) # User added signal user_added.send(sender=self, user=user) return org_user
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_user(self, user): """ Deletes a user from an organization. """
org_user = self._org_user_model.objects.get(user=user, organization=self) org_user.delete() # User removed signal user_removed.send(sender=self, user=user)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_or_add_user(self, user, **kwargs): """ Adds a new user to the organization, and if it's the first user makes the user an admin and the owner. Uses the `get_or_create` method to create or return the existing user. `user` should be a user instance, e.g. `auth.User`. Returns the same tuple as the `get_or_create` method, the `OrganizationUser` and a boolean value indicating whether the OrganizationUser was created or not. """
is_admin = kwargs.pop("is_admin", False) users_count = self.users.all().count() if users_count == 0: is_admin = True org_user, created = self._org_user_model.objects.get_or_create( organization=self, user=user, defaults={"is_admin": is_admin} ) if users_count == 0: self._org_owner_model.objects.create( organization=self, organization_user=org_user ) if created: # User added signal user_added.send(sender=self, user=user) return org_user, created
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_owner(self, new_owner): """ Changes ownership of an organization. """
old_owner = self.owner.organization_user self.owner.organization_user = new_owner self.owner.save() # Owner changed signal owner_changed.send(sender=self, old=old_owner, new=new_owner)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_admin(self, user): """ Returns True is user is an admin in the organization, otherwise false """
return True if self.organization_users.filter( user=user, is_admin=True ) else False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, using=None): """ If the organization user is also the owner, this should not be deleted unless it's part of a cascade from the Organization. If there is no owner then the deletion should proceed. """
from organizations.exceptions import OwnershipRequired try: if self.organization.owner.organization_user.pk == self.pk: raise OwnershipRequired( _( "Cannot delete organization owner " "before organization or transferring ownership." ) ) # TODO This line presumes that OrgOwner model can't be modified except self._org_owner_model.DoesNotExist: pass super(AbstractBaseOrganizationUser, self).delete(using=using)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Extends the default save method by verifying that the chosen organization user is associated with the organization. Method validates against the primary key of the organization because when validating an inherited model it may be checking an instance of `Organization` against an instance of `CustomOrganization`. Mutli-table inheritence means the database keys will be identical though. """
from organizations.exceptions import OrganizationMismatch if self.organization_user.organization.pk != self.organization.pk: raise OrganizationMismatch else: super(AbstractBaseOrganizationOwner, self).save(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kml_default_colors(x): """ flight mode to color conversion """
x = max([x, 0]) colors_arr = [simplekml.Color.red, simplekml.Color.green, simplekml.Color.blue, simplekml.Color.violet, simplekml.Color.yellow, simplekml.Color.orange, simplekml.Color.burlywood, simplekml.Color.azure, simplekml.Color.lightblue, simplekml.Color.lawngreen, simplekml.Color.indianred, simplekml.Color.hotpink] return colors_arr[x]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kml_add_camera_triggers(kml, ulog, camera_trigger_topic_name, altitude_offset): """ Add camera trigger points to the map """
data = ulog.data_list topic_instance = 0 cur_dataset = [elem for elem in data if elem.name == camera_trigger_topic_name and elem.multi_id == topic_instance] if len(cur_dataset) > 0: cur_dataset = cur_dataset[0] pos_lon = cur_dataset.data['lon'] pos_lat = cur_dataset.data['lat'] pos_alt = cur_dataset.data['alt'] sequence = cur_dataset.data['seq'] for i in range(len(pos_lon)): pnt = kml.newpoint(name='Camera Trigger '+str(sequence[i])) pnt.coords = [(pos_lon[i], pos_lat[i], pos_alt[i] + altitude_offset)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dataset(self, name, multi_instance=0): """ get a specific dataset. example: try: gyro_data = ulog.get_dataset('sensor_gyro') except (KeyError, IndexError, ValueError) as error: print(type(error), "(sensor_gyro): ", error) :param name: name of the dataset :param multi_instance: the multi_id, defaults to the first :raises KeyError, IndexError, ValueError: if name or instance not found """
return [elem for elem in self._data_list if elem.name == name and elem.multi_id == multi_instance][0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_message_info_multiple(self, msg_info): """ add a message info multiple to self._msg_info_multiple_dict """
if msg_info.key in self._msg_info_multiple_dict: if msg_info.is_continued: self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value) else: self._msg_info_multiple_dict[msg_info.key].append([msg_info.value]) else: self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_file(self, log_file, message_name_filter_list): """ load and parse an ULog file into memory """
if isinstance(log_file, str): self._file_handle = open(log_file, "rb") else: self._file_handle = log_file # parse the whole file self._read_file_header() self._last_timestamp = self._start_timestamp self._read_file_definitions() if self.has_data_appended and len(self._appended_offsets) > 0: if self._debug: print('This file has data appended') for offset in self._appended_offsets: self._read_file_data(message_name_filter_list, read_until=offset) self._file_handle.seek(offset) # read the whole file, or the rest if data appended self._read_file_data(message_name_filter_list) self._file_handle.close() del self._file_handle
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_file_corruption(self, header): """ check for file corruption based on an unknown message type in the header """
# We need to handle 2 cases: # - corrupt file (we do our best to read the rest of the file) # - new ULog message type got added (we just want to skip the message) if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000: if not self._file_corrupt and self._debug: print('File corruption detected') self._file_corrupt = True return self._file_corrupt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_info(ulog, verbose): """Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60) h1, m1 = divmod(m1, 60) m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60) h2, m2 = divmod(m2, 60) print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format( h1, m1, s1, h2, m2, s2)) dropout_durations = [dropout.duration for dropout in ulog.dropouts] if len(dropout_durations) == 0: print("No Dropouts") else: print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms" .format(len(dropout_durations), sum(dropout_durations)/1000., max(dropout_durations), int(sum(dropout_durations)/len(dropout_durations)))) version = ulog.get_version_info_str() if not version is None: print('SW Version: {}'.format(version)) print("Info Messages:") for k in sorted(ulog.msg_info_dict): if not k.startswith('perf_') or verbose: print(" {0}: {1}".format(k, ulog.msg_info_dict[k])) if len(ulog.msg_info_multiple_dict) > 0: if verbose: print("Info Multiple Messages:") for k in sorted(ulog.msg_info_multiple_dict): print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k])) else: print("Info Multiple Messages: {}".format( ", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in sorted(ulog.msg_info_multiple_dict)]))) print("") print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)", "number of data points", "total bytes")) data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id)) for d in data_list_sorted: message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data]) num_data_points = len(d.data['timestamp']) name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size) print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points, message_size * num_data_points))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_estimator(self): """return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None) if mav_type == 1: # fixed wing always uses EKF2 return 'EKF2' mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None) return {0: 'INAV', 1: 'LPE', 2: 'EKF2', 3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_configured_rc_input_names(self, channel): """ find all RC mappings to a given channel and return their names :param channel: input channel (0=first) :return: list of strings or None """
ret_val = [] for key in self._ulog.initial_parameters: param_val = self._ulog.initial_parameters[key] if key.startswith('RC_MAP_') and param_val == channel + 1: ret_val.append(key[7:].capitalize()) if len(ret_val) > 0: return ret_val return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_fragments(base_directory, sections, fragment_directory, definitions): """ Sections are a dictonary of section names to paths. """
content = OrderedDict() fragment_filenames = [] for key, val in sections.items(): if fragment_directory is not None: section_dir = os.path.join(base_directory, val, fragment_directory) else: section_dir = os.path.join(base_directory, val) files = os.listdir(section_dir) file_content = {} for basename in files: parts = basename.split(u".") counter = 0 if len(parts) == 1: continue else: ticket, category = parts[:2] # If there is a number after the category then use it as a counter, # otherwise ignore it. # This means 1.feature.1 and 1.feature do not conflict but # 1.feature.rst and 1.feature do. if len(parts) > 2: try: counter = int(parts[2]) except ValueError: pass if category not in definitions: continue full_filename = os.path.join(section_dir, basename) fragment_filenames.append(full_filename) with open(full_filename, "rb") as f: data = f.read().decode("utf8", "replace") if (ticket, category, counter) in file_content: raise ValueError( "multiple files for {}.{} in {}".format( ticket, category, section_dir ) ) file_content[ticket, category, counter] = data content[key] = file_content return content, fragment_filenames
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def indent(text, prefix): """ Adds `prefix` to the beginning of non-empty lines in `text`. """
# Based on Python 3's textwrap.indent def prefixed_lines(): for line in text.splitlines(True): yield (prefix + line if line.strip() else line) return u"".join(prefixed_lines())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_fragments(template, issue_format, fragments, definitions, underlines, wrap): """ Render the fragments into a news file. """
jinja_template = Template(template, trim_blocks=True) data = OrderedDict() for section_name, section_value in fragments.items(): data[section_name] = OrderedDict() for category_name, category_value in section_value.items(): # Suppose we start with an ordering like this: # # - Fix the thing (#7, #123, #2) # - Fix the other thing (#1) # First we sort the issues inside each line: # # - Fix the thing (#2, #7, #123) # - Fix the other thing (#1) entries = [] for text, issues in category_value.items(): entries.append((text, sorted(issues, key=issue_key))) # Then we sort the lines: # # - Fix the other thing (#1) # - Fix the thing (#2, #7, #123) entries.sort(key=entry_key) # Then we put these nicely sorted entries back in an ordered dict # for the template, after formatting each issue number categories = OrderedDict() for text, issues in entries: rendered = [render_issue(issue_format, i) for i in issues] categories[text] = rendered data[section_name][category_name] = categories done = [] res = jinja_template.render( sections=data, definitions=definitions, underlines=underlines ) for line in res.split(u"\n"): if wrap: done.append( textwrap.fill( line, width=79, subsequent_indent=u" ", break_long_words=False, break_on_hyphens=False, ) ) else: done.append(line) return u"\n".join(done).rstrip() + u"\n"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ppoints(n, a=0.5): """ Ordinates For Probability Plotting. Numpy analogue or `R`'s `ppoints` function. Parameters n : int Number of points generated a : float Offset fraction (typically between 0 and 1) Returns ------- p : array Sequence of probabilities at which to evaluate the inverse distribution. """
a = 3 / 8 if n <= 10 else 0.5 return (np.arange(n) + 1 - a) / (n + 1 - 2 * a)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qqplot(x, dist='norm', sparams=(), confidence=.95, figsize=(5, 4), ax=None): """Quantile-Quantile plot. Parameters x : array_like Sample data. dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a `scipy.stats.distributions` instance (i.e. they have a ``ppf`` method) are also accepted. sparams : tuple, optional Distribution-specific shape parameters (shape parameters, location, and scale). See :py:func:`scipy.stats.probplot` for more details. confidence : float Confidence level (.95 = 95%) for point-wise confidence envelope. Pass False for no envelope. figsize : tuple Figsize in inches ax : matplotlib axes Axis on which to draw the plot Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- This function returns a scatter plot of the quantile of the sample data `x` against the theoretical quantiles of the distribution given in `dist` (default = 'norm'). The points plotted in a Q–Q plot are always non-decreasing when viewed from left to right. If the two distributions being compared are identical, the Q–Q plot follows the 45° line y = x. If the two distributions agree after linearly transforming the values in one of the distributions, then the Q–Q plot follows some line, but not necessarily the line y = x. If the general trend of the Q–Q plot is flatter than the line y = x, the distribution plotted on the horizontal axis is more dispersed than the distribution plotted on the vertical axis. Conversely, if the general trend of the Q–Q plot is steeper than the line y = x, the distribution plotted on the vertical axis is more dispersed than the distribution plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped, indicating that one of the distributions is more skewed than the other, or that one of the distributions has heavier tails than the other. In addition, the function also plots a best-fit line (linear regression) for the data and annotates the plot with the coefficient of determination :math:`R^2`. Note that the intercept and slope of the linear regression between the quantiles gives a measure of the relative location and relative scale of the samples. References .. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot .. [2] https://github.com/cran/car/blob/master/R/qqPlot.R .. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear Models, 2nd Ed., Sage Publications, Inc. Examples -------- Q-Q plot using a normal theoretical distribution: .. plot:: Two Q-Q plots using two separate axes: .. plot:: Using custom location / scale parameters as well as another Seaborn style .. plot:: """
if isinstance(dist, str): dist = getattr(stats, dist) x = np.asarray(x) x = x[~np.isnan(x)] # NaN are automatically removed # Extract quantiles and regression quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False) theor, observed = quantiles[0], quantiles[1] fit_params = dist.fit(x) loc = fit_params[-2] scale = fit_params[-1] shape = fit_params[0] if len(fit_params) == 3 else None # Observed values to observed quantiles if loc != 0 and scale != 1: observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1] # Linear regression slope, intercept, r, _, _ = stats.linregress(theor, observed) # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.plot(theor, observed, 'bo') stats.morestats._add_axis_labels_title(ax, xlabel='Theoretical quantiles', ylabel='Ordered quantiles', title='Q-Q Plot') # Add diagonal line end_pts = [ax.get_xlim(), ax.get_ylim()] end_pts[0] = min(end_pts[0]) end_pts[1] = max(end_pts[1]) ax.plot(end_pts, end_pts, color='slategrey', lw=1.5) ax.set_xlim(end_pts) ax.set_ylim(end_pts) # Add regression line and annotate R2 fit_val = slope * theor + intercept ax.plot(theor, fit_val, 'r-', lw=2) posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0]) posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0]) ax.text(posx, posy, "$R^2=%.3f$" % r**2) if confidence is not False: # Confidence envelope n = x.size P = _ppoints(n) crit = stats.norm.ppf(1 - (1 - confidence) / 2) pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, shape) se = (slope / pdf) * np.sqrt(P * (1 - P) / n) upper = fit_val + crit * se lower = fit_val - crit * se ax.plot(theor, upper, 'r--', lw=1.25) ax.plot(theor, lower, 'r--', lw=1.25) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_paired(data=None, dv=None, within=None, subject=None, order=None, boxplot=True, figsize=(4, 4), dpi=100, ax=None, colors=['green', 'grey', 'indianred'], pointplot_kwargs={'scale': .6, 'markers': '.'}, boxplot_kwargs={'color': 'lightslategrey', 'width': .2}): """ Paired plot. Parameters data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: Paired plot on an existing axis (no boxplot and uniform color): .. plot:: """
from pingouin.utils import _check_dataframe, remove_rm_na # Validate args _check_dataframe(data=data, dv=dv, within=within, subject=subject, effects='within') # Remove NaN values data = remove_rm_na(dv=dv, within=within, subject=subject, data=data) # Extract subjects subj = data[subject].unique() # Extract within-subject level (alphabetical order) x_cat = np.unique(data[within]) assert len(x_cat) == 2, 'Within must have exactly two unique levels.' if order is None: order = x_cat else: assert len(order) == 2, 'Order must have exactly two elements.' # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) for idx, s in enumerate(subj): tmp = data.loc[data[subject] == s, [dv, within, subject]] x_val = tmp[tmp[within] == order[0]][dv].values[0] y_val = tmp[tmp[within] == order[1]][dv].values[0] if x_val < y_val: color = colors[0] elif x_val > y_val: color = colors[2] elif x_val == y_val: color = colors[1] # Plot individual lines using Seaborn sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color, ax=ax, **pointplot_kwargs) if boxplot: sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax, **boxplot_kwargs) # Despine and trim sns.despine(trim=True, ax=ax) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def anova(dv=None, between=None, data=None, detailed=False, export_filename=None): """One-way and two-way ANOVA. Parameters dv : string Name of column in ``data`` containing the dependent variable. between : string or list with two elements Name of column(s) in ``data`` containing the between-subject factor(s). If ``between`` is a single string, a one-way ANOVA is computed. If ``between`` is a list with two elements (e.g. ['Factor1', 'Factor2']), a two-way ANOVA is computed. data : pandas DataFrame DataFrame. Note that this function can also directly be used as a Pandas method, in which case this argument is no longer needed. detailed : boolean If True, return a detailed ANOVA table (default True for two-way ANOVA). export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- aov : DataFrame ANOVA summary :: 'Source' : Factor names 'SS' : Sums of squares 'DF' : Degrees of freedom 'MS' : Mean squares 'F' : F-values 'p-unc' : uncorrected p-values 'np2' : Partial eta-square effect sizes See Also -------- rm_anova : One-way and two-way repeated measures ANOVA mixed_anova : Two way mixed ANOVA welch_anova : One-way Welch ANOVA kruskal : Non-parametric one-way ANOVA Notes ----- The classic ANOVA is very powerful when the groups are normally distributed and have equal variances. However, when the groups have unequal variances, it is best to use the Welch ANOVA (`welch_anova`) that better controls for type I error (Liu 2015). The homogeneity of variances can be measured with the `homoscedasticity` function. The main idea of ANOVA is to partition the variance (sums of squares) into several components. For example, in one-way ANOVA: .. math:: SS_{total} = SS_{treatment} + SS_{error} .. math:: SS_{total} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y})^2 .. math:: SS_{treatment} = \\sum_i n_i (\\overline{Y_i} - \\overline{Y})^2 .. math:: SS_{error} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y}_i)^2 and :math:`n_i` the number of observations for the :math:`i` th group. The F-statistics is then defined as: .. math:: F^* = \\frac{MS_{treatment}}{MS_{error}} = \\frac{SS_{treatment} / (r - 1)}{SS_{error} / (n_t - r)} and the p-value can be calculated using a F-distribution with :math:`r-1, n_t-1` degrees of freedom. When the groups are balanced and have equal variances, the optimal post-hoc test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`). If the groups have unequal variances, the Games-Howell test is more adequate (:py:func:`pingouin.pairwise_gameshowell`). The effect size reported in Pingouin is the partial eta-square. However, one should keep in mind that for one-way ANOVA partial eta-square is the same as eta-square and generalized eta-square. For more details, see Bakeman 2005; Richardson 2011. .. math:: \\eta_p^2 = \\frac{SS_{treatment}}{SS_{treatment} + SS_{error}} Note that missing values are automatically removed. Results have been tested against R, Matlab and JASP. **Important** Versions of Pingouin below 0.2.5 gave wrong results for **unbalanced two-way ANOVA**. This issue has been resolved in Pingouin>=0.2.5. In such cases, a type II ANOVA is calculated via an internal call to the statsmodels package. This latter package is therefore required for two-way ANOVA with unequal sample sizes. References .. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and traditional ANOVA in case of Heterogeneity of Variance." (2015). .. [2] Bakeman, Roger. "Recommended effect size statistics for repeated measures designs." Behavior research methods 37.3 (2005): 379-384. .. [3] Richardson, John TE. "Eta squared and partial eta squared as measures of effect size in educational research." Educational Research Review 6.2 (2011): 135-147. Examples -------- One-way ANOVA Source SS DF MS F p-unc np2 0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576 1 Within 1001.800 15 66.787 - - - Note that this function can also directly be used as a Pandas method Source SS DF MS F p-unc np2 0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576 1 Within 1001.800 15 66.787 - - - Two-way ANOVA with balanced design Source SS DF MS F p-unc np2 0 Blend 2.042 1 2.042 0.004 0.952 0.000 1 Crop 2736.583 2 1368.292 2.525 0.108 0.219 2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195 3 residual 9753.250 18 541.847 NaN NaN NaN Two-way ANOVA with unbalanced design (requires statsmodels) Source SS DF MS F p-unc np2 0 Diet 390.625 1.0 390.625 7.423 0.034 0.553 1 Exercise 180.625 1.0 180.625 3.432 0.113 0.364 2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.047 3 residual 315.750 6.0 52.625 NaN NaN NaN """
if isinstance(between, list): if len(between) == 2: return anova2(dv=dv, between=between, data=data, export_filename=export_filename) elif len(between) == 1: between = between[0] # Check data _check_dataframe(dv=dv, between=between, data=data, effects='between') # Drop missing values data = data[[dv, between]].dropna() # Reset index (avoid duplicate axis error) data = data.reset_index(drop=True) groups = list(data[between].unique()) n_groups = len(groups) N = data[dv].size # Calculate sums of squares grp = data.groupby(between)[dv] # Between effect ssbetween = ((grp.mean() - data[dv].mean())**2 * grp.count()).sum() # Within effect (= error between) # = (grp.var(ddof=0) * grp.count()).sum() sserror = grp.apply(lambda x: (x - x.mean())**2).sum() # Calculate DOF, MS, F and p-values ddof1 = n_groups - 1 ddof2 = N - n_groups msbetween = ssbetween / ddof1 mserror = sserror / ddof2 fval = msbetween / mserror p_unc = f(ddof1, ddof2).sf(fval) # Calculating partial eta-square # Similar to (fval * ddof1) / (fval * ddof1 + ddof2) np2 = ssbetween / (ssbetween + sserror) # Create output dataframe if not detailed: aov = pd.DataFrame({'Source': between, 'ddof1': ddof1, 'ddof2': ddof2, 'F': fval, 'p-unc': p_unc, 'np2': np2 }, index=[0]) col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc', 'np2'] else: aov = pd.DataFrame({'Source': [between, 'Within'], 'SS': np.round([ssbetween, sserror], 3), 'DF': [ddof1, ddof2], 'MS': np.round([msbetween, mserror], 3), 'F': [fval, np.nan], 'p-unc': [p_unc, np.nan], 'np2': [np2, np.nan] }) col_order = ['Source', 'SS', 'DF', 'MS', 'F', 'p-unc', 'np2'] # Round aov[['F', 'np2']] = aov[['F', 'np2']].round(3) # Replace NaN aov = aov.fillna('-') aov = aov.reindex(columns=col_order) aov.dropna(how='all', axis=1, inplace=True) # Export to .csv if export_filename is not None: _export_table(aov, export_filename) return aov
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def welch_anova(dv=None, between=None, data=None, export_filename=None): """One-way Welch ANOVA. Parameters dv : string Name of column containing the dependant variable. between : string Name of column containing the between factor. data : pandas DataFrame DataFrame. Note that this function can also directly be used as a Pandas method, in which case this argument is no longer needed. export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- aov : DataFrame ANOVA summary :: 'Source' : Factor names 'SS' : Sums of squares 'DF' : Degrees of freedom 'MS' : Mean squares 'F' : F-values 'p-unc' : uncorrected p-values 'np2' : Partial eta-square effect sizes See Also -------- anova : One-way ANOVA rm_anova : One-way and two-way repeated measures ANOVA mixed_anova : Two way mixed ANOVA kruskal : Non-parametric one-way ANOVA Notes ----- The classic ANOVA is very powerful when the groups are normally distributed and have equal variances. However, when the groups have unequal variances, it is best to use the Welch ANOVA that better controls for type I error (Liu 2015). The homogeneity of variances can be measured with the `homoscedasticity` function. The two other assumptions of normality and independance remain. The main idea of Welch ANOVA is to use a weight :math:`w_i` to reduce the effect of unequal variances. This weight is calculated using the sample size :math:`n_i` and variance :math:`s_i^2` of each group .. math:: w_i = \\frac{n_i}{s_i^2} Using these weights, the adjusted grand mean of the data is: .. math:: \\overline{Y}_{welch} = \\frac{\\sum_{i=1}^r w_i\\overline{Y}_i} {\\sum w} where :math:`\\overline{Y}_i` is the mean of the :math:`i` group. The treatment sums of squares is defined as: .. math:: SS_{treatment} = \\sum_{i=1}^r w_i (\\overline{Y}_i - \\overline{Y}_{welch})^2 We then need to calculate a term lambda: .. math:: \\Lambda = \\frac{3\\sum_{i=1}^r(\\frac{1}{n_i-1}) (1 - \\frac{w_i}{\\sum w})^2}{r^2 - 1} from which the F-value can be calculated: .. math:: F_{welch} = \\frac{SS_{treatment} / (r-1)} {1 + \\frac{2\\Lambda(r-2)}{3}} and the p-value approximated using a F-distribution with :math:`(r-1, 1 / \\Lambda)` degrees of freedom. When the groups are balanced and have equal variances, the optimal post-hoc test is the Tukey-HSD test (`pairwise_tukey`). If the groups have unequal variances, the Games-Howell test is more adequate. Results have been tested against R. References .. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and traditional ANOVA in case of Heterogeneity of Variance." (2015). .. [2] Welch, Bernard Lewis. "On the comparison of several mean values: an alternative approach." Biometrika 38.3/4 (1951): 330-336. Examples -------- 1. One-way Welch ANOVA on the pain threshold dataset. Source ddof1 ddof2 F p-unc 0 Hair color 3 8.33 5.89 0.018813 """
# Check data _check_dataframe(dv=dv, between=between, data=data, effects='between') # Reset index (avoid duplicate axis error) data = data.reset_index(drop=True) # Number of groups r = data[between].nunique() ddof1 = r - 1 # Compute weights and ajusted means grp = data.groupby(between)[dv] weights = grp.count() / grp.var() adj_grandmean = (weights * grp.mean()).sum() / weights.sum() # Treatment sum of squares ss_tr = np.sum(weights * np.square(grp.mean() - adj_grandmean)) ms_tr = ss_tr / ddof1 # Calculate lambda, F-value and p-value lamb = (3 * np.sum((1 / (grp.count() - 1)) * (1 - (weights / weights.sum()))**2)) / (r**2 - 1) fval = ms_tr / (1 + (2 * lamb * (r - 2)) / 3) pval = f.sf(fval, ddof1, 1 / lamb) # Create output dataframe aov = pd.DataFrame({'Source': between, 'ddof1': ddof1, 'ddof2': 1 / lamb, 'F': fval, 'p-unc': pval, }, index=[0]) col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc'] aov = aov.reindex(columns=col_order) aov[['F', 'ddof2']] = aov[['F', 'ddof2']].round(3) # Export to .csv if export_filename is not None: _export_table(aov, export_filename) return aov
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ancovan(dv=None, covar=None, between=None, data=None, export_filename=None): """ANCOVA with n covariates. This is an internal function. The main call to this function should be done by the :py:func:`pingouin.ancova` function. Parameters dv : string Name of column containing the dependant variable. covar : string Name(s) of columns containing the covariates. between : string Name of column containing the between factor. data : pandas DataFrame DataFrame export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- aov : DataFrame ANCOVA summary :: 'Source' : Names of the factor considered 'SS' : Sums of squares 'DF' : Degrees of freedom 'F' : F-values 'p-unc' : Uncorrected p-values """
# Check that stasmodels is installed from pingouin.utils import _is_statsmodels_installed _is_statsmodels_installed(raise_error=True) from statsmodels.api import stats from statsmodels.formula.api import ols # Check that covariates are numeric ('float', 'int') assert all([data[covar[i]].dtype.kind in 'fi' for i in range(len(covar))]) # Fit ANCOVA model formula = dv + ' ~ C(' + between + ')' for c in covar: formula += ' + ' + c model = ols(formula, data=data).fit() aov = stats.anova_lm(model, typ=2).reset_index() aov.rename(columns={'index': 'Source', 'sum_sq': 'SS', 'df': 'DF', 'PR(>F)': 'p-unc'}, inplace=True) aov.loc[0, 'Source'] = between aov['DF'] = aov['DF'].astype(int) aov[['SS', 'F']] = aov[['SS', 'F']].round(3) # Export to .csv if export_filename is not None: _export_table(aov, export_filename) return aov
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_dataset(dname): """Read example datasets. Parameters dname : string Name of dataset to read (without extension). Must be a valid dataset present in pingouin.datasets Returns ------- data : pd.DataFrame Dataset Examples -------- Load the ANOVA dataset """
# Check extension d, ext = op.splitext(dname) if ext.lower() == '.csv': dname = d # Check that dataset exist if dname not in dts['dataset'].values: raise ValueError('Dataset does not exist. Valid datasets names are', dts['dataset'].values) # Load dataset return pd.read_csv(op.join(ddir, dname + '.csv'), sep=',')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _perm_pval(bootstat, estimate, tail='two-sided'): """ Compute p-values from a permutation test. Parameters bootstat : 1D array Permutation distribution. estimate : float or int Point estimate. tail : str 'upper': one-sided p-value (upper tail) 'lower': one-sided p-value (lower tail) 'two-sided': two-sided p-value Returns ------- p : float P-value. """
assert tail in ['two-sided', 'upper', 'lower'], 'Wrong tail argument.' assert isinstance(estimate, (int, float)) bootstat = np.asarray(bootstat) assert bootstat.ndim == 1, 'bootstat must be a 1D array.' n_boot = bootstat.size assert n_boot >= 1, 'bootstat must have at least one value.' if tail == 'upper': p = np.greater_equal(bootstat, estimate).sum() / n_boot elif tail == 'lower': p = np.less_equal(bootstat, estimate).sum() / n_boot else: p = np.greater_equal(np.fabs(bootstat), abs(estimate)).sum() / n_boot return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_table(df, floatfmt=".3f", tablefmt='simple'): """Pretty display of table. See: https://pypi.org/project/tabulate/. Parameters df : DataFrame Dataframe to print (e.g. ANOVA summary) floatfmt : string Decimal number formatting tablefmt : string Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid') """
if 'F' in df.keys(): print('\n=============\nANOVA SUMMARY\n=============\n') if 'A' in df.keys(): print('\n==============\nPOST HOC TESTS\n==============\n') print(tabulate(df, headers="keys", showindex=False, floatfmt=floatfmt, tablefmt=tablefmt)) print('')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _export_table(table, fname): """Export DataFrame to .csv"""
import os.path as op extension = op.splitext(fname.lower())[1] if extension == '': fname = fname + '.csv' table.to_csv(fname, index=None, sep=',', encoding='utf-8', float_format='%.4f', decimal='.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _remove_na_single(x, axis='rows'): """Remove NaN in a single array. This is an internal Pingouin function. """
if x.ndim == 1: # 1D arrays x_mask = ~np.isnan(x) else: # 2D arrays ax = 1 if axis == 'rows' else 0 x_mask = ~np.any(np.isnan(x), axis=ax) # Check if missing values are present if ~x_mask.all(): ax = 0 if axis == 'rows' else 1 ax = 0 if x.ndim == 1 else ax x = x.compress(x_mask, axis=ax) return x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_rm_na(dv=None, within=None, subject=None, data=None, aggregate='mean'): """Remove missing values in long-format repeated-measures dataframe. Parameters dv : string or list Dependent variable(s), from which the missing values should be removed. If ``dv`` is not specified, all the columns in the dataframe are considered. ``dv`` must be numeric. within : string or list Within-subject factor(s). subject : string Subject identifier. data : dataframe Long-format dataframe. aggregate : string Aggregation method if there are more within-factors in the data than specified in the ``within`` argument. Can be `mean`, `median`, `sum`, `first`, `last`, or any other function accepted by :py:meth:`pandas.DataFrame.groupby`. Returns ------- data : dataframe Dataframe without the missing values. Notes ----- If multiple factors are specified, the missing values are removed on the last factor, so the order of ``within`` is important. In addition, if there are more within-factors in the data than specified in the ``within`` argument, data will be aggregated using the function specified in ``aggregate``. Note that in the default case (aggregation using the mean), all the non-numeric column(s) will be dropped. """
# Safety checks assert isinstance(aggregate, str), 'aggregate must be a str.' assert isinstance(within, (str, list)), 'within must be str or list.' assert isinstance(subject, str), 'subject must be a string.' assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame.' idx_cols = _flatten_list([subject, within]) all_cols = data.columns if data[idx_cols].isnull().any().any(): raise ValueError("NaN are present in the within-factors or in the " "subject column. Please remove them manually.") # Check if more within-factors are present and if so, aggregate if (data.groupby(idx_cols).count() > 1).any().any(): # Make sure that we keep the non-numeric columns when aggregating # This is disabled by default to avoid any confusion. # all_others = all_cols.difference(idx_cols) # all_num = data[all_others].select_dtypes(include='number').columns # agg = {c: aggregate if c in all_num else 'first' for c in all_others} data = data.groupby(idx_cols).agg(aggregate) else: # Set subject + within factors as index. # Sorting is done to avoid performance warning when dropping. data = data.set_index(idx_cols).sort_index() # Find index with missing values if dv is None: iloc_nan = data.isnull().values.nonzero()[0] else: iloc_nan = data[dv].isnull().values.nonzero()[0] # Drop the last within level idx_nan = data.index[iloc_nan].droplevel(-1) # Drop and re-order data = data.drop(idx_nan).reset_index(drop=False) return data.reindex(columns=all_cols).dropna(how='all', axis=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _flatten_list(x): """Flatten an arbitrarily nested list into a new list. This can be useful to select pandas DataFrame columns. From https://stackoverflow.com/a/16176969/10581531 Examples -------- ['X1', 'M1', 'M2', 'Y1', 'Y2'] ['Xaa', 'Xbb', 'Xcc'] """
result = [] # Remove None x = list(filter(None.__ne__, x)) for el in x: x_is_iter = isinstance(x, collections.Iterable) if x_is_iter and not isinstance(el, (str, tuple)): result.extend(_flatten_list(el)) else: result.append(el) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format_bf(bf, precision=3, trim='0'): """Format BF10 to floating point or scientific notation. """
if bf >= 1e4 or bf <= 1e-4: out = np.format_float_scientific(bf, precision=precision, trim=trim) else: out = np.format_float_positional(bf, precision=precision, trim=trim) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bayesfactor_pearson(r, n): """ Bayes Factor of a Pearson correlation. Parameters r : float Pearson correlation coefficient n : int Sample size Returns ------- bf : str Bayes Factor (BF10). The Bayes Factor quantifies the evidence in favour of the alternative hypothesis. Notes ----- Adapted from a Matlab code found at https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m If you would like to compute the Bayes Factor directly from the raw data instead of from the correlation coefficient, use the :py:func:`pingouin.corr` function. The JZS Bayes Factor is approximated using the formula described in ref [1]_: .. math:: BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}* \\int_{0}^{\\infty}e((n-2)/2)* log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g where **n** is the sample size and **r** is the Pearson correlation coefficient. References .. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian hypothesis test for correlations and partial correlations. Psychon. Bull. Rev. 19, 1057–1064. https://doi.org/10.3758/s13423-012-0295-x Examples -------- Bayes Factor of a Pearson correlation Bayes Factor: 8.221 """
from scipy.special import gamma # Function to be integrated def fun(g, r, n): return np.exp(((n - 2) / 2) * np.log(1 + g) + (-(n - 1) / 2) * np.log(1 + (1 - r**2) * g) + (-3 / 2) * np.log(g) + - n / (2 * g)) # JZS Bayes factor calculation integr = quad(fun, 0, np.inf, args=(r, n))[0] bf10 = np.sqrt((n / 2)) / gamma(1 / 2) * integr return _format_bf(bf10)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normality(*args, alpha=.05): """Shapiro-Wilk univariate normality test. Parameters Array of sample data. May be of different lengths. Returns ------- normal : boolean True if x comes from a normal distribution. p : float P-value. See Also -------- homoscedasticity : Test equality of variance. sphericity : Mauchly's test for sphericity. Notes ----- The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a The :math:`W` statistic is calculated as follows: .. math:: W = \\frac{(\\sum_{i=1}^n a_i x_{i})^2} {\\sum_{i=1}^n (x_i - \\overline{x})^2} where the :math:`x_i` are the ordered sample values (in ascending order) and the :math:`a_i` are constants generated from the means, variances and covariances of the order statistics of a sample of size :math:`n` from a standard normal distribution. Specifically: expected values of the order statistics of independent and identically distributed random variables sampled from the standard normal distribution, and :math:`V` is the covariance matrix of those order statistics. The null-hypothesis of this test is that the population is normally distributed. Thus, if the p-value is less than the chosen alpha level (typically set at 0.05), then the null hypothesis is rejected and there is evidence that the data tested are not normally distributed. The result of the Shapiro-Wilk test should be interpreted with caution in the case of large sample sizes. Indeed, quoting from Wikipedia: *"Like most statistical significance tests, if the sample size is sufficiently large this test may detect even trivial departures from the null hypothesis (i.e., although there may be some statistically significant effect, it may be too small to be of any practical significance); thus, additional investigation of the effect size is typically advisable, e.g., a Q–Q plot in this case."* References .. [1] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test for normality (complete samples). Biometrika, 52(3/4), 591-611. .. [2] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [3] https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test Examples -------- 1. Test the normality of one array. True 0.275 2. Test the normality of two arrays. [ True False] [0.275 0.001] """
from scipy.stats import shapiro k = len(args) p = np.zeros(k) normal = np.zeros(k, 'bool') for j in range(k): _, p[j] = shapiro(args[j]) normal[j] = True if p[j] > alpha else False if k == 1: normal = bool(normal) p = float(p) return normal, np.round(p, 3)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def homoscedasticity(*args, alpha=.05): """Test equality of variance. Parameters Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. 1.273 0.602 False 0.0 """
from scipy.stats import levene, bartlett k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") # Test normality of data normal, _ = normality(*args) if np.count_nonzero(normal) != normal.size: # print('Data are not normally distributed. Using Levene test.') _, p = levene(*args) else: _, p = bartlett(*args) equal_var = True if p > alpha else False return equal_var, np.round(p, 3)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def anderson(*args, dist='norm'): """Anderson-Darling test of distribution. Parameters Array of sample data. May be different lengths. dist : string Distribution ('norm', 'expon', 'logistic', 'gumbel') Returns ------- from_dist : boolean True if data comes from this distribution. sig_level : float The significance levels for the corresponding critical values in %. (See :py:func:`scipy.stats.anderson` for more details) Examples -------- 1. Test that an array comes from a normal distribution (False, 15.0) 2. Test that two arrays comes from an exponential distribution (array([False, False]), array([15., 15.])) """
from scipy.stats import anderson as ads k = len(args) from_dist = np.zeros(k, 'bool') sig_level = np.zeros(k) for j in range(k): st, cr, sig = ads(args[j], dist=dist) from_dist[j] = True if (st > cr).any() else False sig_level[j] = sig[np.argmin(np.abs(st - cr))] if k == 1: from_dist = bool(from_dist) sig_level = float(sig_level) return from_dist, sig_level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def epsilon(data, correction='gg'): """Epsilon adjustement factor for repeated measures. Parameters data : pd.DataFrame DataFrame containing the repeated measurements. ``data`` must be in wide-format. To convert from wide to long format, use the :py:func:`pandas.pivot_table` function. correction : string Specify the epsilon version :: 'gg' : Greenhouse-Geisser 'hf' : Huynh-Feldt 'lb' : Lower bound Returns ------- eps : float Epsilon adjustement factor. Notes ----- The **lower bound** for epsilon is: .. math:: lb = \\frac{1}{k - 1} where :math:`k` is the number of groups (= data.shape[1]). The **Greenhouse-Geisser epsilon** is given by: .. math:: \\epsilon_{GG} = \\frac{k^2(\\overline{diag(S)} - \\overline{S})^2} {(k-1)(\\sum_{i=1}^{k}\\sum_{j=1}^{k}s_{ij}^2 - 2k\\sum_{j=1}^{k} \\overline{s_i}^2 + k^2\\overline{S}^2)} where :math:`S` is the covariance matrix, :math:`\\overline{S}` the grandmean of S and :math:`\\overline{diag(S)}` the mean of all the elements on the diagonal of S (i.e. mean of the variances). The **Huynh-Feldt epsilon** is given by: .. math:: \\epsilon_{HF} = \\frac{n(k-1)\\epsilon_{GG}-2}{(k-1) (n-1-(k-1)\\epsilon_{GG})} where :math:`n` is the number of subjects. References .. [1] http://www.real-statistics.com/anova-repeated-measures/sphericity/ Examples -------- 0.5587754577585018 0.6223448311539781 0.5 """
# Covariance matrix S = data.cov() n = data.shape[0] k = data.shape[1] # Lower bound if correction == 'lb': if S.columns.nlevels == 1: return 1 / (k - 1) elif S.columns.nlevels == 2: ka = S.columns.levels[0].size kb = S.columns.levels[1].size return 1 / ((ka - 1) * (kb - 1)) # Compute GGEpsilon # - Method 1 mean_var = np.diag(S).mean() S_mean = S.mean().mean() ss_mat = (S**2).sum().sum() ss_rows = (S.mean(1)**2).sum().sum() num = (k * (mean_var - S_mean))**2 den = (k - 1) * (ss_mat - 2 * k * ss_rows + k**2 * S_mean**2) eps = np.min([num / den, 1]) # - Method 2 # S_pop = S - S.mean(0)[:, None] - S.mean(1)[None, :] + S.mean() # eig = np.linalg.eigvalsh(S_pop)[1:] # V = eig.sum()**2 / np.sum(eig**2) # eps = V / (k - 1) # Huynh-Feldt if correction == 'hf': num = n * (k - 1) * eps - 2 den = (k - 1) * (n - 1 - (k - 1) * eps) eps = np.min([num / den, 1]) return eps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sphericity(data, method='mauchly', alpha=.05): """Mauchly and JNS test for sphericity. Parameters data : pd.DataFrame DataFrame containing the repeated measurements. ``data`` must be in wide-format. To convert from wide to long format, use the :py:func:`pandas.pivot_table` function. method : str Method to compute sphericity :: 'jns' : John, Nagao and Sugiura test. 'mauchly' : Mauchly test. alpha : float Significance level Returns ------- spher : boolean True if data have the sphericity property. W : float Test statistic chi_sq : float Chi-square statistic ddof : int Degrees of freedom p : float P-value. See Also -------- homoscedasticity : Test equality of variance. normality : Test the univariate normality of one or more array(s). Notes ----- The **Mauchly** :math:`W` statistic is defined by: .. math:: W = \\frac{\\prod_{j=1}^{r-1} \\lambda_j}{(\\frac{1}{r-1} \\cdot \\sum_{j=1}^{^{r-1}} \\lambda_j)^{r-1}} where :math:`\\lambda_j` are the eigenvalues of the population covariance matrix (= double-centered sample covariance matrix) and :math:`r` is the number of conditions. From then, the :math:`W` statistic is transformed into a chi-square score using the number of observations per condition :math:`n` .. math:: f = \\frac{2(r-1)^2+r+1}{6(r-1)(n-1)} .. math:: \\chi_w^2 = (f-1)(n-1) log(W) The p-value is then approximated using a chi-square distribution: .. math:: \\chi_w^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1) The **JNS** :math:`V` statistic is defined by: .. math:: V = \\frac{(\\sum_j^{r-1} \\lambda_j)^2}{\\sum_j^{r-1} \\lambda_j^2} .. math:: \\chi_v^2 = \\frac{n}{2} (r-1)^2 (V - \\frac{1}{r-1}) and the p-value approximated using a chi-square distribution .. math:: \\chi_v^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1) References .. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal n-variate distribution. The Annals of Mathematical Statistics, 11(2), 204-209. .. [2] Nagao, H. (1973). On some test criteria for covariance matrix. The Annals of Statistics, 700-709. .. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and the limiting distributions. The Annals of Mathematical Statistics, 1312-1316. .. [4] John, S. (1972). The distribution of a statistic used for testing sphericity of normal distributions. Biometrika, 59(1), 169-173. .. [5] http://www.real-statistics.com/anova-repeated-measures/sphericity/ Examples -------- 1. Mauchly test for sphericity (True, 0.21, 4.677, 2, 0.09649016283209666) 2. JNS test for sphericity (False, 1.118, 6.176, 2, 0.04560424030751982) """
from scipy.stats import chi2 S = data.cov().values n = data.shape[0] p = data.shape[1] d = p - 1 # Estimate of the population covariance (= double-centered) S_pop = S - S.mean(0)[:, np.newaxis] - S.mean(1)[np.newaxis, :] + S.mean() # p - 1 eigenvalues (sorted by ascending importance) eig = np.linalg.eigvalsh(S_pop)[1:] if method == 'jns': # eps = epsilon(data, correction='gg') # W = eps * d W = eig.sum()**2 / np.square(eig).sum() chi_sq = 0.5 * n * d ** 2 * (W - 1 / d) if method == 'mauchly': # Mauchly's statistic W = np.product(eig) / (eig.sum() / d)**d # Chi-square f = (2 * d**2 + p + 1) / (6 * d * (n - 1)) chi_sq = (f - 1) * (n - 1) * np.log(W) # Compute dof and pval ddof = 0.5 * d * p - 1 # Ensure that dof is not zero ddof = 1 if ddof == 0 else ddof pval = chi2.sf(chi_sq, ddof) # Second order approximation # pval2 = chi2.sf(chi_sq, ddof + 4) # w2 = (d + 2) * (d - 1) * (d - 2) * (2 * d**3 + 6 * d * d + 3 * d + 2) / \ # (288 * d * d * nr * nr * dd * dd) # pval += w2 * (pval2 - pval) sphericity = True if pval > alpha else False return sphericity, np.round(W, 3), np.round(chi_sq, 3), int(ddof), pval