repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
PX4/pyulog | pyulog/core.py | ULog._check_file_corruption | def _check_file_corruption(self, header):
""" check for file corruption based on an unknown message type in the header """
# We need to handle 2 cases:
# - corrupt file (we do our best to read the rest of the file)
# - new ULog message type got added (we just want to skip the message)
if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:
if not self._file_corrupt and self._debug:
print('File corruption detected')
self._file_corrupt = True
return self._file_corrupt | python | def _check_file_corruption(self, header):
""" check for file corruption based on an unknown message type in the header """
# We need to handle 2 cases:
# - corrupt file (we do our best to read the rest of the file)
# - new ULog message type got added (we just want to skip the message)
if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:
if not self._file_corrupt and self._debug:
print('File corruption detected')
self._file_corrupt = True
return self._file_corrupt | [
"def",
"_check_file_corruption",
"(",
"self",
",",
"header",
")",
":",
"# We need to handle 2 cases:",
"# - corrupt file (we do our best to read the rest of the file)",
"# - new ULog message type got added (we just want to skip the message)",
"if",
"header",
".",
"msg_type",
"==",
"0... | check for file corruption based on an unknown message type in the header | [
"check",
"for",
"file",
"corruption",
"based",
"on",
"an",
"unknown",
"message",
"type",
"in",
"the",
"header"
] | 3bc4f9338d30e2e0a0dfbed58f54d200967e5056 | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/core.py#L602-L612 | train | 206,600 |
PX4/pyulog | pyulog/info.py | show_info | def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points)) | python | def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points)) | [
"def",
"show_info",
"(",
"ulog",
",",
"verbose",
")",
":",
"m1",
",",
"s1",
"=",
"divmod",
"(",
"int",
"(",
"ulog",
".",
"start_timestamp",
"/",
"1e6",
")",
",",
"60",
")",
"h1",
",",
"m1",
"=",
"divmod",
"(",
"m1",
",",
"60",
")",
"m2",
",",
... | Show general information from an ULog | [
"Show",
"general",
"information",
"from",
"an",
"ULog"
] | 3bc4f9338d30e2e0a0dfbed58f54d200967e5056 | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/info.py#L15-L65 | train | 206,601 |
PX4/pyulog | pyulog/px4.py | PX4ULog.get_estimator | def get_estimator(self):
"""return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group)) | python | def get_estimator(self):
"""return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group)) | [
"def",
"get_estimator",
"(",
"self",
")",
":",
"mav_type",
"=",
"self",
".",
"_ulog",
".",
"initial_parameters",
".",
"get",
"(",
"'MAV_TYPE'",
",",
"None",
")",
"if",
"mav_type",
"==",
"1",
":",
"# fixed wing always uses EKF2",
"return",
"'EKF2'",
"mc_est_gro... | return the configured estimator as string from initial parameters | [
"return",
"the",
"configured",
"estimator",
"as",
"string",
"from",
"initial",
"parameters"
] | 3bc4f9338d30e2e0a0dfbed58f54d200967e5056 | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/px4.py#L54-L65 | train | 206,602 |
PX4/pyulog | pyulog/px4.py | PX4ULog.get_configured_rc_input_names | def get_configured_rc_input_names(self, channel):
"""
find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None
"""
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None | python | def get_configured_rc_input_names(self, channel):
"""
find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None
"""
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None | [
"def",
"get_configured_rc_input_names",
"(",
"self",
",",
"channel",
")",
":",
"ret_val",
"=",
"[",
"]",
"for",
"key",
"in",
"self",
".",
"_ulog",
".",
"initial_parameters",
":",
"param_val",
"=",
"self",
".",
"_ulog",
".",
"initial_parameters",
"[",
"key",
... | find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None | [
"find",
"all",
"RC",
"mappings",
"to",
"a",
"given",
"channel",
"and",
"return",
"their",
"names"
] | 3bc4f9338d30e2e0a0dfbed58f54d200967e5056 | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/px4.py#L96-L111 | train | 206,603 |
hawkowl/towncrier | src/towncrier/_builder.py | find_fragments | def find_fragments(base_directory, sections, fragment_directory, definitions):
"""
Sections are a dictonary of section names to paths.
"""
content = OrderedDict()
fragment_filenames = []
for key, val in sections.items():
if fragment_directory is not None:
section_dir = os.path.join(base_directory, val, fragment_directory)
else:
section_dir = os.path.join(base_directory, val)
files = os.listdir(section_dir)
file_content = {}
for basename in files:
parts = basename.split(u".")
counter = 0
if len(parts) == 1:
continue
else:
ticket, category = parts[:2]
# If there is a number after the category then use it as a counter,
# otherwise ignore it.
# This means 1.feature.1 and 1.feature do not conflict but
# 1.feature.rst and 1.feature do.
if len(parts) > 2:
try:
counter = int(parts[2])
except ValueError:
pass
if category not in definitions:
continue
full_filename = os.path.join(section_dir, basename)
fragment_filenames.append(full_filename)
with open(full_filename, "rb") as f:
data = f.read().decode("utf8", "replace")
if (ticket, category, counter) in file_content:
raise ValueError(
"multiple files for {}.{} in {}".format(
ticket, category, section_dir
)
)
file_content[ticket, category, counter] = data
content[key] = file_content
return content, fragment_filenames | python | def find_fragments(base_directory, sections, fragment_directory, definitions):
"""
Sections are a dictonary of section names to paths.
"""
content = OrderedDict()
fragment_filenames = []
for key, val in sections.items():
if fragment_directory is not None:
section_dir = os.path.join(base_directory, val, fragment_directory)
else:
section_dir = os.path.join(base_directory, val)
files = os.listdir(section_dir)
file_content = {}
for basename in files:
parts = basename.split(u".")
counter = 0
if len(parts) == 1:
continue
else:
ticket, category = parts[:2]
# If there is a number after the category then use it as a counter,
# otherwise ignore it.
# This means 1.feature.1 and 1.feature do not conflict but
# 1.feature.rst and 1.feature do.
if len(parts) > 2:
try:
counter = int(parts[2])
except ValueError:
pass
if category not in definitions:
continue
full_filename = os.path.join(section_dir, basename)
fragment_filenames.append(full_filename)
with open(full_filename, "rb") as f:
data = f.read().decode("utf8", "replace")
if (ticket, category, counter) in file_content:
raise ValueError(
"multiple files for {}.{} in {}".format(
ticket, category, section_dir
)
)
file_content[ticket, category, counter] = data
content[key] = file_content
return content, fragment_filenames | [
"def",
"find_fragments",
"(",
"base_directory",
",",
"sections",
",",
"fragment_directory",
",",
"definitions",
")",
":",
"content",
"=",
"OrderedDict",
"(",
")",
"fragment_filenames",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"sections",
".",
"items",
... | Sections are a dictonary of section names to paths. | [
"Sections",
"are",
"a",
"dictonary",
"of",
"section",
"names",
"to",
"paths",
"."
] | ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63 | https://github.com/hawkowl/towncrier/blob/ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63/src/towncrier/_builder.py#L29-L84 | train | 206,604 |
hawkowl/towncrier | src/towncrier/_builder.py | indent | def indent(text, prefix):
"""
Adds `prefix` to the beginning of non-empty lines in `text`.
"""
# Based on Python 3's textwrap.indent
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return u"".join(prefixed_lines()) | python | def indent(text, prefix):
"""
Adds `prefix` to the beginning of non-empty lines in `text`.
"""
# Based on Python 3's textwrap.indent
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return u"".join(prefixed_lines()) | [
"def",
"indent",
"(",
"text",
",",
"prefix",
")",
":",
"# Based on Python 3's textwrap.indent",
"def",
"prefixed_lines",
"(",
")",
":",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
"True",
")",
":",
"yield",
"(",
"prefix",
"+",
"line",
"if",
"line"... | Adds `prefix` to the beginning of non-empty lines in `text`. | [
"Adds",
"prefix",
"to",
"the",
"beginning",
"of",
"non",
"-",
"empty",
"lines",
"in",
"text",
"."
] | ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63 | https://github.com/hawkowl/towncrier/blob/ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63/src/towncrier/_builder.py#L87-L95 | train | 206,605 |
hawkowl/towncrier | src/towncrier/_builder.py | render_fragments | def render_fragments(template, issue_format, fragments, definitions, underlines, wrap):
"""
Render the fragments into a news file.
"""
jinja_template = Template(template, trim_blocks=True)
data = OrderedDict()
for section_name, section_value in fragments.items():
data[section_name] = OrderedDict()
for category_name, category_value in section_value.items():
# Suppose we start with an ordering like this:
#
# - Fix the thing (#7, #123, #2)
# - Fix the other thing (#1)
# First we sort the issues inside each line:
#
# - Fix the thing (#2, #7, #123)
# - Fix the other thing (#1)
entries = []
for text, issues in category_value.items():
entries.append((text, sorted(issues, key=issue_key)))
# Then we sort the lines:
#
# - Fix the other thing (#1)
# - Fix the thing (#2, #7, #123)
entries.sort(key=entry_key)
# Then we put these nicely sorted entries back in an ordered dict
# for the template, after formatting each issue number
categories = OrderedDict()
for text, issues in entries:
rendered = [render_issue(issue_format, i) for i in issues]
categories[text] = rendered
data[section_name][category_name] = categories
done = []
res = jinja_template.render(
sections=data, definitions=definitions, underlines=underlines
)
for line in res.split(u"\n"):
if wrap:
done.append(
textwrap.fill(
line,
width=79,
subsequent_indent=u" ",
break_long_words=False,
break_on_hyphens=False,
)
)
else:
done.append(line)
return u"\n".join(done).rstrip() + u"\n" | python | def render_fragments(template, issue_format, fragments, definitions, underlines, wrap):
"""
Render the fragments into a news file.
"""
jinja_template = Template(template, trim_blocks=True)
data = OrderedDict()
for section_name, section_value in fragments.items():
data[section_name] = OrderedDict()
for category_name, category_value in section_value.items():
# Suppose we start with an ordering like this:
#
# - Fix the thing (#7, #123, #2)
# - Fix the other thing (#1)
# First we sort the issues inside each line:
#
# - Fix the thing (#2, #7, #123)
# - Fix the other thing (#1)
entries = []
for text, issues in category_value.items():
entries.append((text, sorted(issues, key=issue_key)))
# Then we sort the lines:
#
# - Fix the other thing (#1)
# - Fix the thing (#2, #7, #123)
entries.sort(key=entry_key)
# Then we put these nicely sorted entries back in an ordered dict
# for the template, after formatting each issue number
categories = OrderedDict()
for text, issues in entries:
rendered = [render_issue(issue_format, i) for i in issues]
categories[text] = rendered
data[section_name][category_name] = categories
done = []
res = jinja_template.render(
sections=data, definitions=definitions, underlines=underlines
)
for line in res.split(u"\n"):
if wrap:
done.append(
textwrap.fill(
line,
width=79,
subsequent_indent=u" ",
break_long_words=False,
break_on_hyphens=False,
)
)
else:
done.append(line)
return u"\n".join(done).rstrip() + u"\n" | [
"def",
"render_fragments",
"(",
"template",
",",
"issue_format",
",",
"fragments",
",",
"definitions",
",",
"underlines",
",",
"wrap",
")",
":",
"jinja_template",
"=",
"Template",
"(",
"template",
",",
"trim_blocks",
"=",
"True",
")",
"data",
"=",
"OrderedDict... | Render the fragments into a news file. | [
"Render",
"the",
"fragments",
"into",
"a",
"news",
"file",
"."
] | ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63 | https://github.com/hawkowl/towncrier/blob/ecd438c9c0ef132a92aba2eecc4dc672ccf9ec63/src/towncrier/_builder.py#L156-L218 | train | 206,606 |
raphaelvallat/pingouin | pingouin/plotting.py | _ppoints | def _ppoints(n, a=0.5):
"""
Ordinates For Probability Plotting.
Numpy analogue or `R`'s `ppoints` function.
Parameters
----------
n : int
Number of points generated
a : float
Offset fraction (typically between 0 and 1)
Returns
-------
p : array
Sequence of probabilities at which to evaluate the inverse
distribution.
"""
a = 3 / 8 if n <= 10 else 0.5
return (np.arange(n) + 1 - a) / (n + 1 - 2 * a) | python | def _ppoints(n, a=0.5):
"""
Ordinates For Probability Plotting.
Numpy analogue or `R`'s `ppoints` function.
Parameters
----------
n : int
Number of points generated
a : float
Offset fraction (typically between 0 and 1)
Returns
-------
p : array
Sequence of probabilities at which to evaluate the inverse
distribution.
"""
a = 3 / 8 if n <= 10 else 0.5
return (np.arange(n) + 1 - a) / (n + 1 - 2 * a) | [
"def",
"_ppoints",
"(",
"n",
",",
"a",
"=",
"0.5",
")",
":",
"a",
"=",
"3",
"/",
"8",
"if",
"n",
"<=",
"10",
"else",
"0.5",
"return",
"(",
"np",
".",
"arange",
"(",
"n",
")",
"+",
"1",
"-",
"a",
")",
"/",
"(",
"n",
"+",
"1",
"-",
"2",
... | Ordinates For Probability Plotting.
Numpy analogue or `R`'s `ppoints` function.
Parameters
----------
n : int
Number of points generated
a : float
Offset fraction (typically between 0 and 1)
Returns
-------
p : array
Sequence of probabilities at which to evaluate the inverse
distribution. | [
"Ordinates",
"For",
"Probability",
"Plotting",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/plotting.py#L298-L318 | train | 206,607 |
raphaelvallat/pingouin | pingouin/plotting.py | qqplot | def qqplot(x, dist='norm', sparams=(), confidence=.95, figsize=(5, 4),
ax=None):
"""Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
`scipy.stats.distributions` instance (i.e. they have a ``ppf`` method)
are also accepted.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Pass False for no envelope.
figsize : tuple
Figsize in inches
ax : matplotlib axes
Axis on which to draw the plot
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
This function returns a scatter plot of the quantile of the sample data `x`
against the theoretical quantiles of the distribution given in `dist`
(default = 'norm').
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot
.. [2] https://github.com/cran/car/blob/master/R/qqPlot.R
.. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
"""
if isinstance(dist, str):
dist = getattr(stats, dist)
x = np.asarray(x)
x = x[~np.isnan(x)] # NaN are automatically removed
# Extract quantiles and regression
quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False)
theor, observed = quantiles[0], quantiles[1]
fit_params = dist.fit(x)
loc = fit_params[-2]
scale = fit_params[-1]
shape = fit_params[0] if len(fit_params) == 3 else None
# Observed values to observed quantiles
if loc != 0 and scale != 1:
observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1]
# Linear regression
slope, intercept, r, _, _ = stats.linregress(theor, observed)
# Start the plot
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(theor, observed, 'bo')
stats.morestats._add_axis_labels_title(ax,
xlabel='Theoretical quantiles',
ylabel='Ordered quantiles',
title='Q-Q Plot')
# Add diagonal line
end_pts = [ax.get_xlim(), ax.get_ylim()]
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, color='slategrey', lw=1.5)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
# Add regression line and annotate R2
fit_val = slope * theor + intercept
ax.plot(theor, fit_val, 'r-', lw=2)
posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0])
posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0])
ax.text(posx, posy, "$R^2=%.3f$" % r**2)
if confidence is not False:
# Confidence envelope
n = x.size
P = _ppoints(n)
crit = stats.norm.ppf(1 - (1 - confidence) / 2)
pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, shape)
se = (slope / pdf) * np.sqrt(P * (1 - P) / n)
upper = fit_val + crit * se
lower = fit_val - crit * se
ax.plot(theor, upper, 'r--', lw=1.25)
ax.plot(theor, lower, 'r--', lw=1.25)
return ax | python | def qqplot(x, dist='norm', sparams=(), confidence=.95, figsize=(5, 4),
ax=None):
"""Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
`scipy.stats.distributions` instance (i.e. they have a ``ppf`` method)
are also accepted.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Pass False for no envelope.
figsize : tuple
Figsize in inches
ax : matplotlib axes
Axis on which to draw the plot
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
This function returns a scatter plot of the quantile of the sample data `x`
against the theoretical quantiles of the distribution given in `dist`
(default = 'norm').
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot
.. [2] https://github.com/cran/car/blob/master/R/qqPlot.R
.. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
"""
if isinstance(dist, str):
dist = getattr(stats, dist)
x = np.asarray(x)
x = x[~np.isnan(x)] # NaN are automatically removed
# Extract quantiles and regression
quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False)
theor, observed = quantiles[0], quantiles[1]
fit_params = dist.fit(x)
loc = fit_params[-2]
scale = fit_params[-1]
shape = fit_params[0] if len(fit_params) == 3 else None
# Observed values to observed quantiles
if loc != 0 and scale != 1:
observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1]
# Linear regression
slope, intercept, r, _, _ = stats.linregress(theor, observed)
# Start the plot
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(theor, observed, 'bo')
stats.morestats._add_axis_labels_title(ax,
xlabel='Theoretical quantiles',
ylabel='Ordered quantiles',
title='Q-Q Plot')
# Add diagonal line
end_pts = [ax.get_xlim(), ax.get_ylim()]
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, color='slategrey', lw=1.5)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
# Add regression line and annotate R2
fit_val = slope * theor + intercept
ax.plot(theor, fit_val, 'r-', lw=2)
posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0])
posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0])
ax.text(posx, posy, "$R^2=%.3f$" % r**2)
if confidence is not False:
# Confidence envelope
n = x.size
P = _ppoints(n)
crit = stats.norm.ppf(1 - (1 - confidence) / 2)
pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, shape)
se = (slope / pdf) * np.sqrt(P * (1 - P) / n)
upper = fit_val + crit * se
lower = fit_val - crit * se
ax.plot(theor, upper, 'r--', lw=1.25)
ax.plot(theor, lower, 'r--', lw=1.25)
return ax | [
"def",
"qqplot",
"(",
"x",
",",
"dist",
"=",
"'norm'",
",",
"sparams",
"=",
"(",
")",
",",
"confidence",
"=",
".95",
",",
"figsize",
"=",
"(",
"5",
",",
"4",
")",
",",
"ax",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"dist",
",",
"str",
... | Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
`scipy.stats.distributions` instance (i.e. they have a ``ppf`` method)
are also accepted.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Pass False for no envelope.
figsize : tuple
Figsize in inches
ax : matplotlib axes
Axis on which to draw the plot
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
This function returns a scatter plot of the quantile of the sample data `x`
against the theoretical quantiles of the distribution given in `dist`
(default = 'norm').
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot
.. [2] https://github.com/cran/car/blob/master/R/qqPlot.R
.. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std)) | [
"Quantile",
"-",
"Quantile",
"plot",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/plotting.py#L321-L486 | train | 206,608 |
raphaelvallat/pingouin | pingouin/plotting.py | plot_paired | def plot_paired(data=None, dv=None, within=None, subject=None, order=None,
boxplot=True, figsize=(4, 4), dpi=100, ax=None,
colors=['green', 'grey', 'indianred'],
pointplot_kwargs={'scale': .6, 'markers': '.'},
boxplot_kwargs={'color': 'lightslategrey', 'width': .2}):
"""
Paired plot.
Parameters
----------
data : pandas DataFrame
Long-format dataFrame.
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor. Note that
``within`` must have exactly two within-subject levels
(= two unique values).
subject : string
Name of column containing the subject identifier.
order : list of str
List of values in ``within`` that define the order of elements on the
x-axis of the plot. If None, uses alphabetical order.
boxplot : boolean
If True, add a boxplot to the paired lines using the
:py:func:`seaborn.boxplot` function.
figsize : tuple
Figsize in inches
dpi : int
Resolution of the figure in dots per inches.
ax : matplotlib axes
Axis on which to draw the plot.
colors : list of str
Line colors names. Default is green when value increases from A to B,
indianred when value decreases from A to B and grey when the value is
the same in both measurements.
pointplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.pointplot` function.
boxplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.boxplot` function.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Data must be a long-format pandas DataFrame.
Examples
--------
Default paired plot:
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova')
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> df = df.query("Time == 'August' or Time == 'June'")
>>> import pingouin as pg
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', dpi=150)
Paired plot on an existing axis (no boxplot and uniform color):
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova').query("Time != 'January'")
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4))
>>> pg.plot_paired(data=df[df['Group'] == 'Meditation'],
... dv='Scores', within='Time', subject='Subject',
... ax=ax1, boxplot=False,
... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
"""
from pingouin.utils import _check_dataframe, remove_rm_na
# Validate args
_check_dataframe(data=data, dv=dv, within=within, subject=subject,
effects='within')
# Remove NaN values
data = remove_rm_na(dv=dv, within=within, subject=subject, data=data)
# Extract subjects
subj = data[subject].unique()
# Extract within-subject level (alphabetical order)
x_cat = np.unique(data[within])
assert len(x_cat) == 2, 'Within must have exactly two unique levels.'
if order is None:
order = x_cat
else:
assert len(order) == 2, 'Order must have exactly two elements.'
# Start the plot
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
for idx, s in enumerate(subj):
tmp = data.loc[data[subject] == s, [dv, within, subject]]
x_val = tmp[tmp[within] == order[0]][dv].values[0]
y_val = tmp[tmp[within] == order[1]][dv].values[0]
if x_val < y_val:
color = colors[0]
elif x_val > y_val:
color = colors[2]
elif x_val == y_val:
color = colors[1]
# Plot individual lines using Seaborn
sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color,
ax=ax, **pointplot_kwargs)
if boxplot:
sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax,
**boxplot_kwargs)
# Despine and trim
sns.despine(trim=True, ax=ax)
return ax | python | def plot_paired(data=None, dv=None, within=None, subject=None, order=None,
boxplot=True, figsize=(4, 4), dpi=100, ax=None,
colors=['green', 'grey', 'indianred'],
pointplot_kwargs={'scale': .6, 'markers': '.'},
boxplot_kwargs={'color': 'lightslategrey', 'width': .2}):
"""
Paired plot.
Parameters
----------
data : pandas DataFrame
Long-format dataFrame.
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor. Note that
``within`` must have exactly two within-subject levels
(= two unique values).
subject : string
Name of column containing the subject identifier.
order : list of str
List of values in ``within`` that define the order of elements on the
x-axis of the plot. If None, uses alphabetical order.
boxplot : boolean
If True, add a boxplot to the paired lines using the
:py:func:`seaborn.boxplot` function.
figsize : tuple
Figsize in inches
dpi : int
Resolution of the figure in dots per inches.
ax : matplotlib axes
Axis on which to draw the plot.
colors : list of str
Line colors names. Default is green when value increases from A to B,
indianred when value decreases from A to B and grey when the value is
the same in both measurements.
pointplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.pointplot` function.
boxplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.boxplot` function.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Data must be a long-format pandas DataFrame.
Examples
--------
Default paired plot:
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova')
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> df = df.query("Time == 'August' or Time == 'June'")
>>> import pingouin as pg
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', dpi=150)
Paired plot on an existing axis (no boxplot and uniform color):
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova').query("Time != 'January'")
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4))
>>> pg.plot_paired(data=df[df['Group'] == 'Meditation'],
... dv='Scores', within='Time', subject='Subject',
... ax=ax1, boxplot=False,
... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
"""
from pingouin.utils import _check_dataframe, remove_rm_na
# Validate args
_check_dataframe(data=data, dv=dv, within=within, subject=subject,
effects='within')
# Remove NaN values
data = remove_rm_na(dv=dv, within=within, subject=subject, data=data)
# Extract subjects
subj = data[subject].unique()
# Extract within-subject level (alphabetical order)
x_cat = np.unique(data[within])
assert len(x_cat) == 2, 'Within must have exactly two unique levels.'
if order is None:
order = x_cat
else:
assert len(order) == 2, 'Order must have exactly two elements.'
# Start the plot
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
for idx, s in enumerate(subj):
tmp = data.loc[data[subject] == s, [dv, within, subject]]
x_val = tmp[tmp[within] == order[0]][dv].values[0]
y_val = tmp[tmp[within] == order[1]][dv].values[0]
if x_val < y_val:
color = colors[0]
elif x_val > y_val:
color = colors[2]
elif x_val == y_val:
color = colors[1]
# Plot individual lines using Seaborn
sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color,
ax=ax, **pointplot_kwargs)
if boxplot:
sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax,
**boxplot_kwargs)
# Despine and trim
sns.despine(trim=True, ax=ax)
return ax | [
"def",
"plot_paired",
"(",
"data",
"=",
"None",
",",
"dv",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"order",
"=",
"None",
",",
"boxplot",
"=",
"True",
",",
"figsize",
"=",
"(",
"4",
",",
"4",
")",
",",
"dpi",
"... | Paired plot.
Parameters
----------
data : pandas DataFrame
Long-format dataFrame.
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor. Note that
``within`` must have exactly two within-subject levels
(= two unique values).
subject : string
Name of column containing the subject identifier.
order : list of str
List of values in ``within`` that define the order of elements on the
x-axis of the plot. If None, uses alphabetical order.
boxplot : boolean
If True, add a boxplot to the paired lines using the
:py:func:`seaborn.boxplot` function.
figsize : tuple
Figsize in inches
dpi : int
Resolution of the figure in dots per inches.
ax : matplotlib axes
Axis on which to draw the plot.
colors : list of str
Line colors names. Default is green when value increases from A to B,
indianred when value decreases from A to B and grey when the value is
the same in both measurements.
pointplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.pointplot` function.
boxplot_kwargs : dict
Dictionnary of optional arguments that are passed to the
:py:func:`seaborn.boxplot` function.
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
Data must be a long-format pandas DataFrame.
Examples
--------
Default paired plot:
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova')
>>> df = df.query("Group == 'Meditation' and Subject > 40")
>>> df = df.query("Time == 'August' or Time == 'June'")
>>> import pingouin as pg
>>> ax = pg.plot_paired(data=df, dv='Scores', within='Time',
... subject='Subject', dpi=150)
Paired plot on an existing axis (no boxplot and uniform color):
.. plot::
>>> from pingouin import read_dataset
>>> df = read_dataset('mixed_anova').query("Time != 'January'")
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4))
>>> pg.plot_paired(data=df[df['Group'] == 'Meditation'],
... dv='Scores', within='Time', subject='Subject',
... ax=ax1, boxplot=False,
... colors=['grey', 'grey', 'grey']) # doctest: +SKIP | [
"Paired",
"plot",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/plotting.py#L489-L617 | train | 206,609 |
raphaelvallat/pingouin | pingouin/parametric.py | anova | def anova(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""One-way and two-way ANOVA.
Parameters
----------
dv : string
Name of column in ``data`` containing the dependent variable.
between : string or list with two elements
Name of column(s) in ``data`` containing the between-subject factor(s).
If ``between`` is a single string, a one-way ANOVA is computed.
If ``between`` is a list with two elements
(e.g. ['Factor1', 'Factor2']), a two-way ANOVA is computed.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
detailed : boolean
If True, return a detailed ANOVA table
(default True for two-way ANOVA).
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
welch_anova : One-way Welch ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA (`welch_anova`) that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function.
The main idea of ANOVA is to partition the variance (sums of squares)
into several components. For example, in one-way ANOVA:
.. math:: SS_{total} = SS_{treatment} + SS_{error}
.. math:: SS_{total} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y})^2
.. math:: SS_{treatment} = \\sum_i n_i (\\overline{Y_i} - \\overline{Y})^2
.. math:: SS_{error} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y}_i)^2
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of groups,
and :math:`n_i` the number of observations for the :math:`i` th group.
The F-statistics is then defined as:
.. math::
F^* = \\frac{MS_{treatment}}{MS_{error}} = \\frac{SS_{treatment}
/ (r - 1)}{SS_{error} / (n_t - r)}
and the p-value can be calculated using a F-distribution with
:math:`r-1, n_t-1` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`).
If the groups have unequal variances, the Games-Howell test is more
adequate (:py:func:`pingouin.pairwise_gameshowell`).
The effect size reported in Pingouin is the partial eta-square.
However, one should keep in mind that for one-way ANOVA
partial eta-square is the same as eta-square and generalized eta-square.
For more details, see Bakeman 2005; Richardson 2011.
.. math:: \\eta_p^2 = \\frac{SS_{treatment}}{SS_{treatment} + SS_{error}}
Note that missing values are automatically removed. Results have been
tested against R, Matlab and JASP.
**Important**
Versions of Pingouin below 0.2.5 gave wrong results for **unbalanced
two-way ANOVA**. This issue has been resolved in Pingouin>=0.2.5. In such
cases, a type II ANOVA is calculated via an internal call to the
statsmodels package. This latter package is therefore required for two-way
ANOVA with unequal sample sizes.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Bakeman, Roger. "Recommended effect size statistics for repeated
measures designs." Behavior research methods 37.3 (2005): 379-384.
.. [3] Richardson, John TE. "Eta squared and partial eta squared as
measures of effect size in educational research." Educational
Research Review 6.2 (2011): 135-147.
Examples
--------
One-way ANOVA
>>> import pingouin as pg
>>> df = pg.read_dataset('anova')
>>> aov = pg.anova(dv='Pain threshold', between='Hair color', data=df,
... detailed=True)
>>> aov
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Note that this function can also directly be used as a Pandas method
>>> df.anova(dv='Pain threshold', between='Hair color', detailed=True)
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Two-way ANOVA with balanced design
>>> data = pg.read_dataset('anova2')
>>> data.anova(dv="Yield", between=["Blend", "Crop"]).round(3)
Source SS DF MS F p-unc np2
0 Blend 2.042 1 2.042 0.004 0.952 0.000
1 Crop 2736.583 2 1368.292 2.525 0.108 0.219
2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195
3 residual 9753.250 18 541.847 NaN NaN NaN
Two-way ANOVA with unbalanced design (requires statsmodels)
>>> data = pg.read_dataset('anova2_unbalanced')
>>> data.anova(dv="Scores", between=["Diet", "Exercise"]).round(3)
Source SS DF MS F p-unc np2
0 Diet 390.625 1.0 390.625 7.423 0.034 0.553
1 Exercise 180.625 1.0 180.625 3.432 0.113 0.364
2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.047
3 residual 315.750 6.0 52.625 NaN NaN NaN
"""
if isinstance(between, list):
if len(between) == 2:
return anova2(dv=dv, between=between, data=data,
export_filename=export_filename)
elif len(between) == 1:
between = between[0]
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Drop missing values
data = data[[dv, between]].dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
groups = list(data[between].unique())
n_groups = len(groups)
N = data[dv].size
# Calculate sums of squares
grp = data.groupby(between)[dv]
# Between effect
ssbetween = ((grp.mean() - data[dv].mean())**2 * grp.count()).sum()
# Within effect (= error between)
# = (grp.var(ddof=0) * grp.count()).sum()
sserror = grp.apply(lambda x: (x - x.mean())**2).sum()
# Calculate DOF, MS, F and p-values
ddof1 = n_groups - 1
ddof2 = N - n_groups
msbetween = ssbetween / ddof1
mserror = sserror / ddof2
fval = msbetween / mserror
p_unc = f(ddof1, ddof2).sf(fval)
# Calculating partial eta-square
# Similar to (fval * ddof1) / (fval * ddof1 + ddof2)
np2 = ssbetween / (ssbetween + sserror)
# Create output dataframe
if not detailed:
aov = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'ddof2': ddof2,
'F': fval,
'p-unc': p_unc,
'np2': np2
}, index=[0])
col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc', 'np2']
else:
aov = pd.DataFrame({'Source': [between, 'Within'],
'SS': np.round([ssbetween, sserror], 3),
'DF': [ddof1, ddof2],
'MS': np.round([msbetween, mserror], 3),
'F': [fval, np.nan],
'p-unc': [p_unc, np.nan],
'np2': [np2, np.nan]
})
col_order = ['Source', 'SS', 'DF', 'MS', 'F', 'p-unc', 'np2']
# Round
aov[['F', 'np2']] = aov[['F', 'np2']].round(3)
# Replace NaN
aov = aov.fillna('-')
aov = aov.reindex(columns=col_order)
aov.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | python | def anova(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""One-way and two-way ANOVA.
Parameters
----------
dv : string
Name of column in ``data`` containing the dependent variable.
between : string or list with two elements
Name of column(s) in ``data`` containing the between-subject factor(s).
If ``between`` is a single string, a one-way ANOVA is computed.
If ``between`` is a list with two elements
(e.g. ['Factor1', 'Factor2']), a two-way ANOVA is computed.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
detailed : boolean
If True, return a detailed ANOVA table
(default True for two-way ANOVA).
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
welch_anova : One-way Welch ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA (`welch_anova`) that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function.
The main idea of ANOVA is to partition the variance (sums of squares)
into several components. For example, in one-way ANOVA:
.. math:: SS_{total} = SS_{treatment} + SS_{error}
.. math:: SS_{total} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y})^2
.. math:: SS_{treatment} = \\sum_i n_i (\\overline{Y_i} - \\overline{Y})^2
.. math:: SS_{error} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y}_i)^2
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of groups,
and :math:`n_i` the number of observations for the :math:`i` th group.
The F-statistics is then defined as:
.. math::
F^* = \\frac{MS_{treatment}}{MS_{error}} = \\frac{SS_{treatment}
/ (r - 1)}{SS_{error} / (n_t - r)}
and the p-value can be calculated using a F-distribution with
:math:`r-1, n_t-1` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`).
If the groups have unequal variances, the Games-Howell test is more
adequate (:py:func:`pingouin.pairwise_gameshowell`).
The effect size reported in Pingouin is the partial eta-square.
However, one should keep in mind that for one-way ANOVA
partial eta-square is the same as eta-square and generalized eta-square.
For more details, see Bakeman 2005; Richardson 2011.
.. math:: \\eta_p^2 = \\frac{SS_{treatment}}{SS_{treatment} + SS_{error}}
Note that missing values are automatically removed. Results have been
tested against R, Matlab and JASP.
**Important**
Versions of Pingouin below 0.2.5 gave wrong results for **unbalanced
two-way ANOVA**. This issue has been resolved in Pingouin>=0.2.5. In such
cases, a type II ANOVA is calculated via an internal call to the
statsmodels package. This latter package is therefore required for two-way
ANOVA with unequal sample sizes.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Bakeman, Roger. "Recommended effect size statistics for repeated
measures designs." Behavior research methods 37.3 (2005): 379-384.
.. [3] Richardson, John TE. "Eta squared and partial eta squared as
measures of effect size in educational research." Educational
Research Review 6.2 (2011): 135-147.
Examples
--------
One-way ANOVA
>>> import pingouin as pg
>>> df = pg.read_dataset('anova')
>>> aov = pg.anova(dv='Pain threshold', between='Hair color', data=df,
... detailed=True)
>>> aov
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Note that this function can also directly be used as a Pandas method
>>> df.anova(dv='Pain threshold', between='Hair color', detailed=True)
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Two-way ANOVA with balanced design
>>> data = pg.read_dataset('anova2')
>>> data.anova(dv="Yield", between=["Blend", "Crop"]).round(3)
Source SS DF MS F p-unc np2
0 Blend 2.042 1 2.042 0.004 0.952 0.000
1 Crop 2736.583 2 1368.292 2.525 0.108 0.219
2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195
3 residual 9753.250 18 541.847 NaN NaN NaN
Two-way ANOVA with unbalanced design (requires statsmodels)
>>> data = pg.read_dataset('anova2_unbalanced')
>>> data.anova(dv="Scores", between=["Diet", "Exercise"]).round(3)
Source SS DF MS F p-unc np2
0 Diet 390.625 1.0 390.625 7.423 0.034 0.553
1 Exercise 180.625 1.0 180.625 3.432 0.113 0.364
2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.047
3 residual 315.750 6.0 52.625 NaN NaN NaN
"""
if isinstance(between, list):
if len(between) == 2:
return anova2(dv=dv, between=between, data=data,
export_filename=export_filename)
elif len(between) == 1:
between = between[0]
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Drop missing values
data = data[[dv, between]].dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
groups = list(data[between].unique())
n_groups = len(groups)
N = data[dv].size
# Calculate sums of squares
grp = data.groupby(between)[dv]
# Between effect
ssbetween = ((grp.mean() - data[dv].mean())**2 * grp.count()).sum()
# Within effect (= error between)
# = (grp.var(ddof=0) * grp.count()).sum()
sserror = grp.apply(lambda x: (x - x.mean())**2).sum()
# Calculate DOF, MS, F and p-values
ddof1 = n_groups - 1
ddof2 = N - n_groups
msbetween = ssbetween / ddof1
mserror = sserror / ddof2
fval = msbetween / mserror
p_unc = f(ddof1, ddof2).sf(fval)
# Calculating partial eta-square
# Similar to (fval * ddof1) / (fval * ddof1 + ddof2)
np2 = ssbetween / (ssbetween + sserror)
# Create output dataframe
if not detailed:
aov = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'ddof2': ddof2,
'F': fval,
'p-unc': p_unc,
'np2': np2
}, index=[0])
col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc', 'np2']
else:
aov = pd.DataFrame({'Source': [between, 'Within'],
'SS': np.round([ssbetween, sserror], 3),
'DF': [ddof1, ddof2],
'MS': np.round([msbetween, mserror], 3),
'F': [fval, np.nan],
'p-unc': [p_unc, np.nan],
'np2': [np2, np.nan]
})
col_order = ['Source', 'SS', 'DF', 'MS', 'F', 'p-unc', 'np2']
# Round
aov[['F', 'np2']] = aov[['F', 'np2']].round(3)
# Replace NaN
aov = aov.fillna('-')
aov = aov.reindex(columns=col_order)
aov.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | [
"def",
"anova",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"detailed",
"=",
"False",
",",
"export_filename",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"between",
",",
"list",
")",
":",
"if",
"len",
"("... | One-way and two-way ANOVA.
Parameters
----------
dv : string
Name of column in ``data`` containing the dependent variable.
between : string or list with two elements
Name of column(s) in ``data`` containing the between-subject factor(s).
If ``between`` is a single string, a one-way ANOVA is computed.
If ``between`` is a list with two elements
(e.g. ['Factor1', 'Factor2']), a two-way ANOVA is computed.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
detailed : boolean
If True, return a detailed ANOVA table
(default True for two-way ANOVA).
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
welch_anova : One-way Welch ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA (`welch_anova`) that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function.
The main idea of ANOVA is to partition the variance (sums of squares)
into several components. For example, in one-way ANOVA:
.. math:: SS_{total} = SS_{treatment} + SS_{error}
.. math:: SS_{total} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y})^2
.. math:: SS_{treatment} = \\sum_i n_i (\\overline{Y_i} - \\overline{Y})^2
.. math:: SS_{error} = \\sum_i \\sum_j (Y_{ij} - \\overline{Y}_i)^2
where :math:`i=1,...,r; j=1,...,n_i`, :math:`r` is the number of groups,
and :math:`n_i` the number of observations for the :math:`i` th group.
The F-statistics is then defined as:
.. math::
F^* = \\frac{MS_{treatment}}{MS_{error}} = \\frac{SS_{treatment}
/ (r - 1)}{SS_{error} / (n_t - r)}
and the p-value can be calculated using a F-distribution with
:math:`r-1, n_t-1` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (:py:func:`pingouin.pairwise_tukey`).
If the groups have unequal variances, the Games-Howell test is more
adequate (:py:func:`pingouin.pairwise_gameshowell`).
The effect size reported in Pingouin is the partial eta-square.
However, one should keep in mind that for one-way ANOVA
partial eta-square is the same as eta-square and generalized eta-square.
For more details, see Bakeman 2005; Richardson 2011.
.. math:: \\eta_p^2 = \\frac{SS_{treatment}}{SS_{treatment} + SS_{error}}
Note that missing values are automatically removed. Results have been
tested against R, Matlab and JASP.
**Important**
Versions of Pingouin below 0.2.5 gave wrong results for **unbalanced
two-way ANOVA**. This issue has been resolved in Pingouin>=0.2.5. In such
cases, a type II ANOVA is calculated via an internal call to the
statsmodels package. This latter package is therefore required for two-way
ANOVA with unequal sample sizes.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Bakeman, Roger. "Recommended effect size statistics for repeated
measures designs." Behavior research methods 37.3 (2005): 379-384.
.. [3] Richardson, John TE. "Eta squared and partial eta squared as
measures of effect size in educational research." Educational
Research Review 6.2 (2011): 135-147.
Examples
--------
One-way ANOVA
>>> import pingouin as pg
>>> df = pg.read_dataset('anova')
>>> aov = pg.anova(dv='Pain threshold', between='Hair color', data=df,
... detailed=True)
>>> aov
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Note that this function can also directly be used as a Pandas method
>>> df.anova(dv='Pain threshold', between='Hair color', detailed=True)
Source SS DF MS F p-unc np2
0 Hair color 1360.726 3 453.575 6.791 0.00411423 0.576
1 Within 1001.800 15 66.787 - - -
Two-way ANOVA with balanced design
>>> data = pg.read_dataset('anova2')
>>> data.anova(dv="Yield", between=["Blend", "Crop"]).round(3)
Source SS DF MS F p-unc np2
0 Blend 2.042 1 2.042 0.004 0.952 0.000
1 Crop 2736.583 2 1368.292 2.525 0.108 0.219
2 Blend * Crop 2360.083 2 1180.042 2.178 0.142 0.195
3 residual 9753.250 18 541.847 NaN NaN NaN
Two-way ANOVA with unbalanced design (requires statsmodels)
>>> data = pg.read_dataset('anova2_unbalanced')
>>> data.anova(dv="Scores", between=["Diet", "Exercise"]).round(3)
Source SS DF MS F p-unc np2
0 Diet 390.625 1.0 390.625 7.423 0.034 0.553
1 Exercise 180.625 1.0 180.625 3.432 0.113 0.364
2 Diet * Exercise 15.625 1.0 15.625 0.297 0.605 0.047
3 residual 315.750 6.0 52.625 NaN NaN NaN | [
"One",
"-",
"way",
"and",
"two",
"-",
"way",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/parametric.py#L731-L953 | train | 206,610 |
raphaelvallat/pingouin | pingouin/parametric.py | welch_anova | def welch_anova(dv=None, between=None, data=None, export_filename=None):
"""One-way Welch ANOVA.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
anova : One-way ANOVA
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function. The two other assumptions of
normality and independance remain.
The main idea of Welch ANOVA is to use a weight :math:`w_i` to reduce
the effect of unequal variances. This weight is calculated using the sample
size :math:`n_i` and variance :math:`s_i^2` of each group
:math:`i=1,...,r`:
.. math:: w_i = \\frac{n_i}{s_i^2}
Using these weights, the adjusted grand mean of the data is:
.. math::
\\overline{Y}_{welch} = \\frac{\\sum_{i=1}^r w_i\\overline{Y}_i}
{\\sum w}
where :math:`\\overline{Y}_i` is the mean of the :math:`i` group.
The treatment sums of squares is defined as:
.. math::
SS_{treatment} = \\sum_{i=1}^r w_i
(\\overline{Y}_i - \\overline{Y}_{welch})^2
We then need to calculate a term lambda:
.. math::
\\Lambda = \\frac{3\\sum_{i=1}^r(\\frac{1}{n_i-1})
(1 - \\frac{w_i}{\\sum w})^2}{r^2 - 1}
from which the F-value can be calculated:
.. math::
F_{welch} = \\frac{SS_{treatment} / (r-1)}
{1 + \\frac{2\\Lambda(r-2)}{3}}
and the p-value approximated using a F-distribution with
:math:`(r-1, 1 / \\Lambda)` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (`pairwise_tukey`). If the groups have unequal
variances, the Games-Howell test is more adequate.
Results have been tested against R.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Welch, Bernard Lewis. "On the comparison of several mean values:
an alternative approach." Biometrika 38.3/4 (1951): 330-336.
Examples
--------
1. One-way Welch ANOVA on the pain threshold dataset.
>>> from pingouin import welch_anova, read_dataset
>>> df = read_dataset('anova')
>>> aov = welch_anova(dv='Pain threshold', between='Hair color',
... data=df, export_filename='pain_anova.csv')
>>> aov
Source ddof1 ddof2 F p-unc
0 Hair color 3 8.33 5.89 0.018813
"""
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Number of groups
r = data[between].nunique()
ddof1 = r - 1
# Compute weights and ajusted means
grp = data.groupby(between)[dv]
weights = grp.count() / grp.var()
adj_grandmean = (weights * grp.mean()).sum() / weights.sum()
# Treatment sum of squares
ss_tr = np.sum(weights * np.square(grp.mean() - adj_grandmean))
ms_tr = ss_tr / ddof1
# Calculate lambda, F-value and p-value
lamb = (3 * np.sum((1 / (grp.count() - 1)) *
(1 - (weights / weights.sum()))**2)) / (r**2 - 1)
fval = ms_tr / (1 + (2 * lamb * (r - 2)) / 3)
pval = f.sf(fval, ddof1, 1 / lamb)
# Create output dataframe
aov = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'ddof2': 1 / lamb,
'F': fval,
'p-unc': pval,
}, index=[0])
col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc']
aov = aov.reindex(columns=col_order)
aov[['F', 'ddof2']] = aov[['F', 'ddof2']].round(3)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | python | def welch_anova(dv=None, between=None, data=None, export_filename=None):
"""One-way Welch ANOVA.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
anova : One-way ANOVA
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function. The two other assumptions of
normality and independance remain.
The main idea of Welch ANOVA is to use a weight :math:`w_i` to reduce
the effect of unequal variances. This weight is calculated using the sample
size :math:`n_i` and variance :math:`s_i^2` of each group
:math:`i=1,...,r`:
.. math:: w_i = \\frac{n_i}{s_i^2}
Using these weights, the adjusted grand mean of the data is:
.. math::
\\overline{Y}_{welch} = \\frac{\\sum_{i=1}^r w_i\\overline{Y}_i}
{\\sum w}
where :math:`\\overline{Y}_i` is the mean of the :math:`i` group.
The treatment sums of squares is defined as:
.. math::
SS_{treatment} = \\sum_{i=1}^r w_i
(\\overline{Y}_i - \\overline{Y}_{welch})^2
We then need to calculate a term lambda:
.. math::
\\Lambda = \\frac{3\\sum_{i=1}^r(\\frac{1}{n_i-1})
(1 - \\frac{w_i}{\\sum w})^2}{r^2 - 1}
from which the F-value can be calculated:
.. math::
F_{welch} = \\frac{SS_{treatment} / (r-1)}
{1 + \\frac{2\\Lambda(r-2)}{3}}
and the p-value approximated using a F-distribution with
:math:`(r-1, 1 / \\Lambda)` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (`pairwise_tukey`). If the groups have unequal
variances, the Games-Howell test is more adequate.
Results have been tested against R.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Welch, Bernard Lewis. "On the comparison of several mean values:
an alternative approach." Biometrika 38.3/4 (1951): 330-336.
Examples
--------
1. One-way Welch ANOVA on the pain threshold dataset.
>>> from pingouin import welch_anova, read_dataset
>>> df = read_dataset('anova')
>>> aov = welch_anova(dv='Pain threshold', between='Hair color',
... data=df, export_filename='pain_anova.csv')
>>> aov
Source ddof1 ddof2 F p-unc
0 Hair color 3 8.33 5.89 0.018813
"""
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Number of groups
r = data[between].nunique()
ddof1 = r - 1
# Compute weights and ajusted means
grp = data.groupby(between)[dv]
weights = grp.count() / grp.var()
adj_grandmean = (weights * grp.mean()).sum() / weights.sum()
# Treatment sum of squares
ss_tr = np.sum(weights * np.square(grp.mean() - adj_grandmean))
ms_tr = ss_tr / ddof1
# Calculate lambda, F-value and p-value
lamb = (3 * np.sum((1 / (grp.count() - 1)) *
(1 - (weights / weights.sum()))**2)) / (r**2 - 1)
fval = ms_tr / (1 + (2 * lamb * (r - 2)) / 3)
pval = f.sf(fval, ddof1, 1 / lamb)
# Create output dataframe
aov = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'ddof2': 1 / lamb,
'F': fval,
'p-unc': pval,
}, index=[0])
col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc']
aov = aov.reindex(columns=col_order)
aov[['F', 'ddof2']] = aov[['F', 'ddof2']].round(3)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | [
"def",
"welch_anova",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"export_filename",
"=",
"None",
")",
":",
"# Check data",
"_check_dataframe",
"(",
"dv",
"=",
"dv",
",",
"between",
"=",
"between",
",",
"data",
"... | One-way Welch ANOVA.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
anova : One-way ANOVA
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function. The two other assumptions of
normality and independance remain.
The main idea of Welch ANOVA is to use a weight :math:`w_i` to reduce
the effect of unequal variances. This weight is calculated using the sample
size :math:`n_i` and variance :math:`s_i^2` of each group
:math:`i=1,...,r`:
.. math:: w_i = \\frac{n_i}{s_i^2}
Using these weights, the adjusted grand mean of the data is:
.. math::
\\overline{Y}_{welch} = \\frac{\\sum_{i=1}^r w_i\\overline{Y}_i}
{\\sum w}
where :math:`\\overline{Y}_i` is the mean of the :math:`i` group.
The treatment sums of squares is defined as:
.. math::
SS_{treatment} = \\sum_{i=1}^r w_i
(\\overline{Y}_i - \\overline{Y}_{welch})^2
We then need to calculate a term lambda:
.. math::
\\Lambda = \\frac{3\\sum_{i=1}^r(\\frac{1}{n_i-1})
(1 - \\frac{w_i}{\\sum w})^2}{r^2 - 1}
from which the F-value can be calculated:
.. math::
F_{welch} = \\frac{SS_{treatment} / (r-1)}
{1 + \\frac{2\\Lambda(r-2)}{3}}
and the p-value approximated using a F-distribution with
:math:`(r-1, 1 / \\Lambda)` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (`pairwise_tukey`). If the groups have unequal
variances, the Games-Howell test is more adequate.
Results have been tested against R.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Welch, Bernard Lewis. "On the comparison of several mean values:
an alternative approach." Biometrika 38.3/4 (1951): 330-336.
Examples
--------
1. One-way Welch ANOVA on the pain threshold dataset.
>>> from pingouin import welch_anova, read_dataset
>>> df = read_dataset('anova')
>>> aov = welch_anova(dv='Pain threshold', between='Hair color',
... data=df, export_filename='pain_anova.csv')
>>> aov
Source ddof1 ddof2 F p-unc
0 Hair color 3 8.33 5.89 0.018813 | [
"One",
"-",
"way",
"Welch",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/parametric.py#L1082-L1235 | train | 206,611 |
raphaelvallat/pingouin | pingouin/parametric.py | ancovan | def ancovan(dv=None, covar=None, between=None, data=None,
export_filename=None):
"""ANCOVA with n covariates.
This is an internal function. The main call to this function should be done
by the :py:func:`pingouin.ancova` function.
Parameters
----------
dv : string
Name of column containing the dependant variable.
covar : string
Name(s) of columns containing the covariates.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANCOVA summary ::
'Source' : Names of the factor considered
'SS' : Sums of squares
'DF' : Degrees of freedom
'F' : F-values
'p-unc' : Uncorrected p-values
"""
# Check that stasmodels is installed
from pingouin.utils import _is_statsmodels_installed
_is_statsmodels_installed(raise_error=True)
from statsmodels.api import stats
from statsmodels.formula.api import ols
# Check that covariates are numeric ('float', 'int')
assert all([data[covar[i]].dtype.kind in 'fi' for i in range(len(covar))])
# Fit ANCOVA model
formula = dv + ' ~ C(' + between + ')'
for c in covar:
formula += ' + ' + c
model = ols(formula, data=data).fit()
aov = stats.anova_lm(model, typ=2).reset_index()
aov.rename(columns={'index': 'Source', 'sum_sq': 'SS',
'df': 'DF', 'PR(>F)': 'p-unc'}, inplace=True)
aov.loc[0, 'Source'] = between
aov['DF'] = aov['DF'].astype(int)
aov[['SS', 'F']] = aov[['SS', 'F']].round(3)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | python | def ancovan(dv=None, covar=None, between=None, data=None,
export_filename=None):
"""ANCOVA with n covariates.
This is an internal function. The main call to this function should be done
by the :py:func:`pingouin.ancova` function.
Parameters
----------
dv : string
Name of column containing the dependant variable.
covar : string
Name(s) of columns containing the covariates.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANCOVA summary ::
'Source' : Names of the factor considered
'SS' : Sums of squares
'DF' : Degrees of freedom
'F' : F-values
'p-unc' : Uncorrected p-values
"""
# Check that stasmodels is installed
from pingouin.utils import _is_statsmodels_installed
_is_statsmodels_installed(raise_error=True)
from statsmodels.api import stats
from statsmodels.formula.api import ols
# Check that covariates are numeric ('float', 'int')
assert all([data[covar[i]].dtype.kind in 'fi' for i in range(len(covar))])
# Fit ANCOVA model
formula = dv + ' ~ C(' + between + ')'
for c in covar:
formula += ' + ' + c
model = ols(formula, data=data).fit()
aov = stats.anova_lm(model, typ=2).reset_index()
aov.rename(columns={'index': 'Source', 'sum_sq': 'SS',
'df': 'DF', 'PR(>F)': 'p-unc'}, inplace=True)
aov.loc[0, 'Source'] = between
aov['DF'] = aov['DF'].astype(int)
aov[['SS', 'F']] = aov[['SS', 'F']].round(3)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | [
"def",
"ancovan",
"(",
"dv",
"=",
"None",
",",
"covar",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"export_filename",
"=",
"None",
")",
":",
"# Check that stasmodels is installed",
"from",
"pingouin",
".",
"utils",
"import",
"... | ANCOVA with n covariates.
This is an internal function. The main call to this function should be done
by the :py:func:`pingouin.ancova` function.
Parameters
----------
dv : string
Name of column containing the dependant variable.
covar : string
Name(s) of columns containing the covariates.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANCOVA summary ::
'Source' : Names of the factor considered
'SS' : Sums of squares
'DF' : Degrees of freedom
'F' : F-values
'p-unc' : Uncorrected p-values | [
"ANCOVA",
"with",
"n",
"covariates",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/parametric.py#L1564-L1626 | train | 206,612 |
raphaelvallat/pingouin | pingouin/datasets/__init__.py | read_dataset | def read_dataset(dname):
"""Read example datasets.
Parameters
----------
dname : string
Name of dataset to read (without extension).
Must be a valid dataset present in pingouin.datasets
Returns
-------
data : pd.DataFrame
Dataset
Examples
--------
Load the ANOVA dataset
>>> from pingouin import read_dataset
>>> df = read_dataset('anova')
"""
# Check extension
d, ext = op.splitext(dname)
if ext.lower() == '.csv':
dname = d
# Check that dataset exist
if dname not in dts['dataset'].values:
raise ValueError('Dataset does not exist. Valid datasets names are',
dts['dataset'].values)
# Load dataset
return pd.read_csv(op.join(ddir, dname + '.csv'), sep=',') | python | def read_dataset(dname):
"""Read example datasets.
Parameters
----------
dname : string
Name of dataset to read (without extension).
Must be a valid dataset present in pingouin.datasets
Returns
-------
data : pd.DataFrame
Dataset
Examples
--------
Load the ANOVA dataset
>>> from pingouin import read_dataset
>>> df = read_dataset('anova')
"""
# Check extension
d, ext = op.splitext(dname)
if ext.lower() == '.csv':
dname = d
# Check that dataset exist
if dname not in dts['dataset'].values:
raise ValueError('Dataset does not exist. Valid datasets names are',
dts['dataset'].values)
# Load dataset
return pd.read_csv(op.join(ddir, dname + '.csv'), sep=',') | [
"def",
"read_dataset",
"(",
"dname",
")",
":",
"# Check extension",
"d",
",",
"ext",
"=",
"op",
".",
"splitext",
"(",
"dname",
")",
"if",
"ext",
".",
"lower",
"(",
")",
"==",
"'.csv'",
":",
"dname",
"=",
"d",
"# Check that dataset exist",
"if",
"dname",
... | Read example datasets.
Parameters
----------
dname : string
Name of dataset to read (without extension).
Must be a valid dataset present in pingouin.datasets
Returns
-------
data : pd.DataFrame
Dataset
Examples
--------
Load the ANOVA dataset
>>> from pingouin import read_dataset
>>> df = read_dataset('anova') | [
"Read",
"example",
"datasets",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/datasets/__init__.py#L10-L40 | train | 206,613 |
raphaelvallat/pingouin | pingouin/utils.py | _perm_pval | def _perm_pval(bootstat, estimate, tail='two-sided'):
"""
Compute p-values from a permutation test.
Parameters
----------
bootstat : 1D array
Permutation distribution.
estimate : float or int
Point estimate.
tail : str
'upper': one-sided p-value (upper tail)
'lower': one-sided p-value (lower tail)
'two-sided': two-sided p-value
Returns
-------
p : float
P-value.
"""
assert tail in ['two-sided', 'upper', 'lower'], 'Wrong tail argument.'
assert isinstance(estimate, (int, float))
bootstat = np.asarray(bootstat)
assert bootstat.ndim == 1, 'bootstat must be a 1D array.'
n_boot = bootstat.size
assert n_boot >= 1, 'bootstat must have at least one value.'
if tail == 'upper':
p = np.greater_equal(bootstat, estimate).sum() / n_boot
elif tail == 'lower':
p = np.less_equal(bootstat, estimate).sum() / n_boot
else:
p = np.greater_equal(np.fabs(bootstat), abs(estimate)).sum() / n_boot
return p | python | def _perm_pval(bootstat, estimate, tail='two-sided'):
"""
Compute p-values from a permutation test.
Parameters
----------
bootstat : 1D array
Permutation distribution.
estimate : float or int
Point estimate.
tail : str
'upper': one-sided p-value (upper tail)
'lower': one-sided p-value (lower tail)
'two-sided': two-sided p-value
Returns
-------
p : float
P-value.
"""
assert tail in ['two-sided', 'upper', 'lower'], 'Wrong tail argument.'
assert isinstance(estimate, (int, float))
bootstat = np.asarray(bootstat)
assert bootstat.ndim == 1, 'bootstat must be a 1D array.'
n_boot = bootstat.size
assert n_boot >= 1, 'bootstat must have at least one value.'
if tail == 'upper':
p = np.greater_equal(bootstat, estimate).sum() / n_boot
elif tail == 'lower':
p = np.less_equal(bootstat, estimate).sum() / n_boot
else:
p = np.greater_equal(np.fabs(bootstat), abs(estimate)).sum() / n_boot
return p | [
"def",
"_perm_pval",
"(",
"bootstat",
",",
"estimate",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"assert",
"tail",
"in",
"[",
"'two-sided'",
",",
"'upper'",
",",
"'lower'",
"]",
",",
"'Wrong tail argument.'",
"assert",
"isinstance",
"(",
"estimate",
",",
"(... | Compute p-values from a permutation test.
Parameters
----------
bootstat : 1D array
Permutation distribution.
estimate : float or int
Point estimate.
tail : str
'upper': one-sided p-value (upper tail)
'lower': one-sided p-value (lower tail)
'two-sided': two-sided p-value
Returns
-------
p : float
P-value. | [
"Compute",
"p",
"-",
"values",
"from",
"a",
"permutation",
"test",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L13-L45 | train | 206,614 |
raphaelvallat/pingouin | pingouin/utils.py | print_table | def print_table(df, floatfmt=".3f", tablefmt='simple'):
"""Pretty display of table.
See: https://pypi.org/project/tabulate/.
Parameters
----------
df : DataFrame
Dataframe to print (e.g. ANOVA summary)
floatfmt : string
Decimal number formatting
tablefmt : string
Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid')
"""
if 'F' in df.keys():
print('\n=============\nANOVA SUMMARY\n=============\n')
if 'A' in df.keys():
print('\n==============\nPOST HOC TESTS\n==============\n')
print(tabulate(df, headers="keys", showindex=False, floatfmt=floatfmt,
tablefmt=tablefmt))
print('') | python | def print_table(df, floatfmt=".3f", tablefmt='simple'):
"""Pretty display of table.
See: https://pypi.org/project/tabulate/.
Parameters
----------
df : DataFrame
Dataframe to print (e.g. ANOVA summary)
floatfmt : string
Decimal number formatting
tablefmt : string
Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid')
"""
if 'F' in df.keys():
print('\n=============\nANOVA SUMMARY\n=============\n')
if 'A' in df.keys():
print('\n==============\nPOST HOC TESTS\n==============\n')
print(tabulate(df, headers="keys", showindex=False, floatfmt=floatfmt,
tablefmt=tablefmt))
print('') | [
"def",
"print_table",
"(",
"df",
",",
"floatfmt",
"=",
"\".3f\"",
",",
"tablefmt",
"=",
"'simple'",
")",
":",
"if",
"'F'",
"in",
"df",
".",
"keys",
"(",
")",
":",
"print",
"(",
"'\\n=============\\nANOVA SUMMARY\\n=============\\n'",
")",
"if",
"'A'",
"in",
... | Pretty display of table.
See: https://pypi.org/project/tabulate/.
Parameters
----------
df : DataFrame
Dataframe to print (e.g. ANOVA summary)
floatfmt : string
Decimal number formatting
tablefmt : string
Table format (e.g. 'simple', 'plain', 'html', 'latex', 'grid') | [
"Pretty",
"display",
"of",
"table",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L52-L73 | train | 206,615 |
raphaelvallat/pingouin | pingouin/utils.py | _export_table | def _export_table(table, fname):
"""Export DataFrame to .csv"""
import os.path as op
extension = op.splitext(fname.lower())[1]
if extension == '':
fname = fname + '.csv'
table.to_csv(fname, index=None, sep=',', encoding='utf-8',
float_format='%.4f', decimal='.') | python | def _export_table(table, fname):
"""Export DataFrame to .csv"""
import os.path as op
extension = op.splitext(fname.lower())[1]
if extension == '':
fname = fname + '.csv'
table.to_csv(fname, index=None, sep=',', encoding='utf-8',
float_format='%.4f', decimal='.') | [
"def",
"_export_table",
"(",
"table",
",",
"fname",
")",
":",
"import",
"os",
".",
"path",
"as",
"op",
"extension",
"=",
"op",
".",
"splitext",
"(",
"fname",
".",
"lower",
"(",
")",
")",
"[",
"1",
"]",
"if",
"extension",
"==",
"''",
":",
"fname",
... | Export DataFrame to .csv | [
"Export",
"DataFrame",
"to",
".",
"csv"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L76-L83 | train | 206,616 |
raphaelvallat/pingouin | pingouin/utils.py | _remove_na_single | def _remove_na_single(x, axis='rows'):
"""Remove NaN in a single array.
This is an internal Pingouin function.
"""
if x.ndim == 1:
# 1D arrays
x_mask = ~np.isnan(x)
else:
# 2D arrays
ax = 1 if axis == 'rows' else 0
x_mask = ~np.any(np.isnan(x), axis=ax)
# Check if missing values are present
if ~x_mask.all():
ax = 0 if axis == 'rows' else 1
ax = 0 if x.ndim == 1 else ax
x = x.compress(x_mask, axis=ax)
return x | python | def _remove_na_single(x, axis='rows'):
"""Remove NaN in a single array.
This is an internal Pingouin function.
"""
if x.ndim == 1:
# 1D arrays
x_mask = ~np.isnan(x)
else:
# 2D arrays
ax = 1 if axis == 'rows' else 0
x_mask = ~np.any(np.isnan(x), axis=ax)
# Check if missing values are present
if ~x_mask.all():
ax = 0 if axis == 'rows' else 1
ax = 0 if x.ndim == 1 else ax
x = x.compress(x_mask, axis=ax)
return x | [
"def",
"_remove_na_single",
"(",
"x",
",",
"axis",
"=",
"'rows'",
")",
":",
"if",
"x",
".",
"ndim",
"==",
"1",
":",
"# 1D arrays",
"x_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"x",
")",
"else",
":",
"# 2D arrays",
"ax",
"=",
"1",
"if",
"axis",
"=... | Remove NaN in a single array.
This is an internal Pingouin function. | [
"Remove",
"NaN",
"in",
"a",
"single",
"array",
".",
"This",
"is",
"an",
"internal",
"Pingouin",
"function",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L90-L106 | train | 206,617 |
raphaelvallat/pingouin | pingouin/utils.py | remove_rm_na | def remove_rm_na(dv=None, within=None, subject=None, data=None,
aggregate='mean'):
"""Remove missing values in long-format repeated-measures dataframe.
Parameters
----------
dv : string or list
Dependent variable(s), from which the missing values should be removed.
If ``dv`` is not specified, all the columns in the dataframe are
considered. ``dv`` must be numeric.
within : string or list
Within-subject factor(s).
subject : string
Subject identifier.
data : dataframe
Long-format dataframe.
aggregate : string
Aggregation method if there are more within-factors in the data than
specified in the ``within`` argument. Can be `mean`, `median`, `sum`,
`first`, `last`, or any other function accepted by
:py:meth:`pandas.DataFrame.groupby`.
Returns
-------
data : dataframe
Dataframe without the missing values.
Notes
-----
If multiple factors are specified, the missing values are removed on the
last factor, so the order of ``within`` is important.
In addition, if there are more within-factors in the data than specified in
the ``within`` argument, data will be aggregated using the function
specified in ``aggregate``. Note that in the default case (aggregation
using the mean), all the non-numeric column(s) will be dropped.
"""
# Safety checks
assert isinstance(aggregate, str), 'aggregate must be a str.'
assert isinstance(within, (str, list)), 'within must be str or list.'
assert isinstance(subject, str), 'subject must be a string.'
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame.'
idx_cols = _flatten_list([subject, within])
all_cols = data.columns
if data[idx_cols].isnull().any().any():
raise ValueError("NaN are present in the within-factors or in the "
"subject column. Please remove them manually.")
# Check if more within-factors are present and if so, aggregate
if (data.groupby(idx_cols).count() > 1).any().any():
# Make sure that we keep the non-numeric columns when aggregating
# This is disabled by default to avoid any confusion.
# all_others = all_cols.difference(idx_cols)
# all_num = data[all_others].select_dtypes(include='number').columns
# agg = {c: aggregate if c in all_num else 'first' for c in all_others}
data = data.groupby(idx_cols).agg(aggregate)
else:
# Set subject + within factors as index.
# Sorting is done to avoid performance warning when dropping.
data = data.set_index(idx_cols).sort_index()
# Find index with missing values
if dv is None:
iloc_nan = data.isnull().values.nonzero()[0]
else:
iloc_nan = data[dv].isnull().values.nonzero()[0]
# Drop the last within level
idx_nan = data.index[iloc_nan].droplevel(-1)
# Drop and re-order
data = data.drop(idx_nan).reset_index(drop=False)
return data.reindex(columns=all_cols).dropna(how='all', axis=1) | python | def remove_rm_na(dv=None, within=None, subject=None, data=None,
aggregate='mean'):
"""Remove missing values in long-format repeated-measures dataframe.
Parameters
----------
dv : string or list
Dependent variable(s), from which the missing values should be removed.
If ``dv`` is not specified, all the columns in the dataframe are
considered. ``dv`` must be numeric.
within : string or list
Within-subject factor(s).
subject : string
Subject identifier.
data : dataframe
Long-format dataframe.
aggregate : string
Aggregation method if there are more within-factors in the data than
specified in the ``within`` argument. Can be `mean`, `median`, `sum`,
`first`, `last`, or any other function accepted by
:py:meth:`pandas.DataFrame.groupby`.
Returns
-------
data : dataframe
Dataframe without the missing values.
Notes
-----
If multiple factors are specified, the missing values are removed on the
last factor, so the order of ``within`` is important.
In addition, if there are more within-factors in the data than specified in
the ``within`` argument, data will be aggregated using the function
specified in ``aggregate``. Note that in the default case (aggregation
using the mean), all the non-numeric column(s) will be dropped.
"""
# Safety checks
assert isinstance(aggregate, str), 'aggregate must be a str.'
assert isinstance(within, (str, list)), 'within must be str or list.'
assert isinstance(subject, str), 'subject must be a string.'
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame.'
idx_cols = _flatten_list([subject, within])
all_cols = data.columns
if data[idx_cols].isnull().any().any():
raise ValueError("NaN are present in the within-factors or in the "
"subject column. Please remove them manually.")
# Check if more within-factors are present and if so, aggregate
if (data.groupby(idx_cols).count() > 1).any().any():
# Make sure that we keep the non-numeric columns when aggregating
# This is disabled by default to avoid any confusion.
# all_others = all_cols.difference(idx_cols)
# all_num = data[all_others].select_dtypes(include='number').columns
# agg = {c: aggregate if c in all_num else 'first' for c in all_others}
data = data.groupby(idx_cols).agg(aggregate)
else:
# Set subject + within factors as index.
# Sorting is done to avoid performance warning when dropping.
data = data.set_index(idx_cols).sort_index()
# Find index with missing values
if dv is None:
iloc_nan = data.isnull().values.nonzero()[0]
else:
iloc_nan = data[dv].isnull().values.nonzero()[0]
# Drop the last within level
idx_nan = data.index[iloc_nan].droplevel(-1)
# Drop and re-order
data = data.drop(idx_nan).reset_index(drop=False)
return data.reindex(columns=all_cols).dropna(how='all', axis=1) | [
"def",
"remove_rm_na",
"(",
"dv",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"data",
"=",
"None",
",",
"aggregate",
"=",
"'mean'",
")",
":",
"# Safety checks",
"assert",
"isinstance",
"(",
"aggregate",
",",
"str",
")",
"... | Remove missing values in long-format repeated-measures dataframe.
Parameters
----------
dv : string or list
Dependent variable(s), from which the missing values should be removed.
If ``dv`` is not specified, all the columns in the dataframe are
considered. ``dv`` must be numeric.
within : string or list
Within-subject factor(s).
subject : string
Subject identifier.
data : dataframe
Long-format dataframe.
aggregate : string
Aggregation method if there are more within-factors in the data than
specified in the ``within`` argument. Can be `mean`, `median`, `sum`,
`first`, `last`, or any other function accepted by
:py:meth:`pandas.DataFrame.groupby`.
Returns
-------
data : dataframe
Dataframe without the missing values.
Notes
-----
If multiple factors are specified, the missing values are removed on the
last factor, so the order of ``within`` is important.
In addition, if there are more within-factors in the data than specified in
the ``within`` argument, data will be aggregated using the function
specified in ``aggregate``. Note that in the default case (aggregation
using the mean), all the non-numeric column(s) will be dropped. | [
"Remove",
"missing",
"values",
"in",
"long",
"-",
"format",
"repeated",
"-",
"measures",
"dataframe",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L193-L267 | train | 206,618 |
raphaelvallat/pingouin | pingouin/utils.py | _flatten_list | def _flatten_list(x):
"""Flatten an arbitrarily nested list into a new list.
This can be useful to select pandas DataFrame columns.
From https://stackoverflow.com/a/16176969/10581531
Examples
--------
>>> from pingouin.utils import _flatten_list
>>> x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
>>> _flatten_list(x)
['X1', 'M1', 'M2', 'Y1', 'Y2']
>>> x = ['Xaa', 'Xbb', 'Xcc']
>>> _flatten_list(x)
['Xaa', 'Xbb', 'Xcc']
"""
result = []
# Remove None
x = list(filter(None.__ne__, x))
for el in x:
x_is_iter = isinstance(x, collections.Iterable)
if x_is_iter and not isinstance(el, (str, tuple)):
result.extend(_flatten_list(el))
else:
result.append(el)
return result | python | def _flatten_list(x):
"""Flatten an arbitrarily nested list into a new list.
This can be useful to select pandas DataFrame columns.
From https://stackoverflow.com/a/16176969/10581531
Examples
--------
>>> from pingouin.utils import _flatten_list
>>> x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
>>> _flatten_list(x)
['X1', 'M1', 'M2', 'Y1', 'Y2']
>>> x = ['Xaa', 'Xbb', 'Xcc']
>>> _flatten_list(x)
['Xaa', 'Xbb', 'Xcc']
"""
result = []
# Remove None
x = list(filter(None.__ne__, x))
for el in x:
x_is_iter = isinstance(x, collections.Iterable)
if x_is_iter and not isinstance(el, (str, tuple)):
result.extend(_flatten_list(el))
else:
result.append(el)
return result | [
"def",
"_flatten_list",
"(",
"x",
")",
":",
"result",
"=",
"[",
"]",
"# Remove None",
"x",
"=",
"list",
"(",
"filter",
"(",
"None",
".",
"__ne__",
",",
"x",
")",
")",
"for",
"el",
"in",
"x",
":",
"x_is_iter",
"=",
"isinstance",
"(",
"x",
",",
"co... | Flatten an arbitrarily nested list into a new list.
This can be useful to select pandas DataFrame columns.
From https://stackoverflow.com/a/16176969/10581531
Examples
--------
>>> from pingouin.utils import _flatten_list
>>> x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
>>> _flatten_list(x)
['X1', 'M1', 'M2', 'Y1', 'Y2']
>>> x = ['Xaa', 'Xbb', 'Xcc']
>>> _flatten_list(x)
['Xaa', 'Xbb', 'Xcc'] | [
"Flatten",
"an",
"arbitrarily",
"nested",
"list",
"into",
"a",
"new",
"list",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/utils.py#L274-L301 | train | 206,619 |
raphaelvallat/pingouin | pingouin/bayesian.py | _format_bf | def _format_bf(bf, precision=3, trim='0'):
"""Format BF10 to floating point or scientific notation.
"""
if bf >= 1e4 or bf <= 1e-4:
out = np.format_float_scientific(bf, precision=precision, trim=trim)
else:
out = np.format_float_positional(bf, precision=precision, trim=trim)
return out | python | def _format_bf(bf, precision=3, trim='0'):
"""Format BF10 to floating point or scientific notation.
"""
if bf >= 1e4 or bf <= 1e-4:
out = np.format_float_scientific(bf, precision=precision, trim=trim)
else:
out = np.format_float_positional(bf, precision=precision, trim=trim)
return out | [
"def",
"_format_bf",
"(",
"bf",
",",
"precision",
"=",
"3",
",",
"trim",
"=",
"'0'",
")",
":",
"if",
"bf",
">=",
"1e4",
"or",
"bf",
"<=",
"1e-4",
":",
"out",
"=",
"np",
".",
"format_float_scientific",
"(",
"bf",
",",
"precision",
"=",
"precision",
... | Format BF10 to floating point or scientific notation. | [
"Format",
"BF10",
"to",
"floating",
"point",
"or",
"scientific",
"notation",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/bayesian.py#L9-L16 | train | 206,620 |
raphaelvallat/pingouin | pingouin/bayesian.py | bayesfactor_pearson | def bayesfactor_pearson(r, n):
"""
Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient
n : int
Sample size
Returns
-------
bf : str
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m
If you would like to compute the Bayes Factor directly from the raw data
instead of from the correlation coefficient, use the
:py:func:`pingouin.corr` function.
The JZS Bayes Factor is approximated using the formula described in
ref [1]_:
.. math::
BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where **n** is the sample size and **r** is the Pearson correlation
coefficient.
References
----------
.. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian
hypothesis test for correlations and partial correlations.
Psychon. Bull. Rev. 19, 1057–1064.
https://doi.org/10.3758/s13423-012-0295-x
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> bf = bayesfactor_pearson(0.6, 20)
>>> print("Bayes Factor: %s" % bf)
Bayes Factor: 8.221
"""
from scipy.special import gamma
# Function to be integrated
def fun(g, r, n):
return np.exp(((n - 2) / 2) * np.log(1 + g) + (-(n - 1) / 2)
* np.log(1 + (1 - r**2) * g) + (-3 / 2)
* np.log(g) + - n / (2 * g))
# JZS Bayes factor calculation
integr = quad(fun, 0, np.inf, args=(r, n))[0]
bf10 = np.sqrt((n / 2)) / gamma(1 / 2) * integr
return _format_bf(bf10) | python | def bayesfactor_pearson(r, n):
"""
Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient
n : int
Sample size
Returns
-------
bf : str
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m
If you would like to compute the Bayes Factor directly from the raw data
instead of from the correlation coefficient, use the
:py:func:`pingouin.corr` function.
The JZS Bayes Factor is approximated using the formula described in
ref [1]_:
.. math::
BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where **n** is the sample size and **r** is the Pearson correlation
coefficient.
References
----------
.. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian
hypothesis test for correlations and partial correlations.
Psychon. Bull. Rev. 19, 1057–1064.
https://doi.org/10.3758/s13423-012-0295-x
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> bf = bayesfactor_pearson(0.6, 20)
>>> print("Bayes Factor: %s" % bf)
Bayes Factor: 8.221
"""
from scipy.special import gamma
# Function to be integrated
def fun(g, r, n):
return np.exp(((n - 2) / 2) * np.log(1 + g) + (-(n - 1) / 2)
* np.log(1 + (1 - r**2) * g) + (-3 / 2)
* np.log(g) + - n / (2 * g))
# JZS Bayes factor calculation
integr = quad(fun, 0, np.inf, args=(r, n))[0]
bf10 = np.sqrt((n / 2)) / gamma(1 / 2) * integr
return _format_bf(bf10) | [
"def",
"bayesfactor_pearson",
"(",
"r",
",",
"n",
")",
":",
"from",
"scipy",
".",
"special",
"import",
"gamma",
"# Function to be integrated",
"def",
"fun",
"(",
"g",
",",
"r",
",",
"n",
")",
":",
"return",
"np",
".",
"exp",
"(",
"(",
"(",
"n",
"-",
... | Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient
n : int
Sample size
Returns
-------
bf : str
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m
If you would like to compute the Bayes Factor directly from the raw data
instead of from the correlation coefficient, use the
:py:func:`pingouin.corr` function.
The JZS Bayes Factor is approximated using the formula described in
ref [1]_:
.. math::
BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where **n** is the sample size and **r** is the Pearson correlation
coefficient.
References
----------
.. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian
hypothesis test for correlations and partial correlations.
Psychon. Bull. Rev. 19, 1057–1064.
https://doi.org/10.3758/s13423-012-0295-x
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> bf = bayesfactor_pearson(0.6, 20)
>>> print("Bayes Factor: %s" % bf)
Bayes Factor: 8.221 | [
"Bayes",
"Factor",
"of",
"a",
"Pearson",
"correlation",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/bayesian.py#L124-L192 | train | 206,621 |
raphaelvallat/pingouin | pingouin/distribution.py | normality | def normality(*args, alpha=.05):
"""Shapiro-Wilk univariate normality test.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be of different lengths.
Returns
-------
normal : boolean
True if x comes from a normal distribution.
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a
random sample :math:`x_1, x_2, ..., x_n` comes from a normal distribution.
The :math:`W` statistic is calculated as follows:
.. math::
W = \\frac{(\\sum_{i=1}^n a_i x_{i})^2}
{\\sum_{i=1}^n (x_i - \\overline{x})^2}
where the :math:`x_i` are the ordered sample values (in ascending
order) and the :math:`a_i` are constants generated from the means,
variances and covariances of the order statistics of a sample of size
:math:`n` from a standard normal distribution. Specifically:
.. math:: (a_1, ..., a_n) = \\frac{m^TV^{-1}}{(m^TV^{-1}V^{-1}m)^{1/2}}
with :math:`m = (m_1, ..., m_n)^T` and :math:`(m_1, ..., m_n)` are the
expected values of the order statistics of independent and identically
distributed random variables sampled from the standard normal distribution,
and :math:`V` is the covariance matrix of those order statistics.
The null-hypothesis of this test is that the population is normally
distributed. Thus, if the p-value is less than the
chosen alpha level (typically set at 0.05), then the null hypothesis is
rejected and there is evidence that the data tested are not normally
distributed.
The result of the Shapiro-Wilk test should be interpreted with caution in
the case of large sample sizes. Indeed, quoting from Wikipedia:
*"Like most statistical significance tests, if the sample size is
sufficiently large this test may detect even trivial departures from the
null hypothesis (i.e., although there may be some statistically significant
effect, it may be too small to be of any practical significance); thus,
additional investigation of the effect size is typically advisable,
e.g., a Q–Q plot in this case."*
References
----------
.. [1] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [2] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [3] https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test
Examples
--------
1. Test the normality of one array.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> normal, p = normality(x, alpha=.05)
>>> print(normal, p)
True 0.275
2. Test the normality of two arrays.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> y = np.random.rand(100)
>>> normal, p = normality(x, y, alpha=.05)
>>> print(normal, p)
[ True False] [0.275 0.001]
"""
from scipy.stats import shapiro
k = len(args)
p = np.zeros(k)
normal = np.zeros(k, 'bool')
for j in range(k):
_, p[j] = shapiro(args[j])
normal[j] = True if p[j] > alpha else False
if k == 1:
normal = bool(normal)
p = float(p)
return normal, np.round(p, 3) | python | def normality(*args, alpha=.05):
"""Shapiro-Wilk univariate normality test.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be of different lengths.
Returns
-------
normal : boolean
True if x comes from a normal distribution.
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a
random sample :math:`x_1, x_2, ..., x_n` comes from a normal distribution.
The :math:`W` statistic is calculated as follows:
.. math::
W = \\frac{(\\sum_{i=1}^n a_i x_{i})^2}
{\\sum_{i=1}^n (x_i - \\overline{x})^2}
where the :math:`x_i` are the ordered sample values (in ascending
order) and the :math:`a_i` are constants generated from the means,
variances and covariances of the order statistics of a sample of size
:math:`n` from a standard normal distribution. Specifically:
.. math:: (a_1, ..., a_n) = \\frac{m^TV^{-1}}{(m^TV^{-1}V^{-1}m)^{1/2}}
with :math:`m = (m_1, ..., m_n)^T` and :math:`(m_1, ..., m_n)` are the
expected values of the order statistics of independent and identically
distributed random variables sampled from the standard normal distribution,
and :math:`V` is the covariance matrix of those order statistics.
The null-hypothesis of this test is that the population is normally
distributed. Thus, if the p-value is less than the
chosen alpha level (typically set at 0.05), then the null hypothesis is
rejected and there is evidence that the data tested are not normally
distributed.
The result of the Shapiro-Wilk test should be interpreted with caution in
the case of large sample sizes. Indeed, quoting from Wikipedia:
*"Like most statistical significance tests, if the sample size is
sufficiently large this test may detect even trivial departures from the
null hypothesis (i.e., although there may be some statistically significant
effect, it may be too small to be of any practical significance); thus,
additional investigation of the effect size is typically advisable,
e.g., a Q–Q plot in this case."*
References
----------
.. [1] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [2] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [3] https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test
Examples
--------
1. Test the normality of one array.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> normal, p = normality(x, alpha=.05)
>>> print(normal, p)
True 0.275
2. Test the normality of two arrays.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> y = np.random.rand(100)
>>> normal, p = normality(x, y, alpha=.05)
>>> print(normal, p)
[ True False] [0.275 0.001]
"""
from scipy.stats import shapiro
k = len(args)
p = np.zeros(k)
normal = np.zeros(k, 'bool')
for j in range(k):
_, p[j] = shapiro(args[j])
normal[j] = True if p[j] > alpha else False
if k == 1:
normal = bool(normal)
p = float(p)
return normal, np.round(p, 3) | [
"def",
"normality",
"(",
"*",
"args",
",",
"alpha",
"=",
".05",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"shapiro",
"k",
"=",
"len",
"(",
"args",
")",
"p",
"=",
"np",
".",
"zeros",
"(",
"k",
")",
"normal",
"=",
"np",
".",
"zeros",
"("... | Shapiro-Wilk univariate normality test.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be of different lengths.
Returns
-------
normal : boolean
True if x comes from a normal distribution.
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
sphericity : Mauchly's test for sphericity.
Notes
-----
The Shapiro-Wilk test calculates a :math:`W` statistic that tests whether a
random sample :math:`x_1, x_2, ..., x_n` comes from a normal distribution.
The :math:`W` statistic is calculated as follows:
.. math::
W = \\frac{(\\sum_{i=1}^n a_i x_{i})^2}
{\\sum_{i=1}^n (x_i - \\overline{x})^2}
where the :math:`x_i` are the ordered sample values (in ascending
order) and the :math:`a_i` are constants generated from the means,
variances and covariances of the order statistics of a sample of size
:math:`n` from a standard normal distribution. Specifically:
.. math:: (a_1, ..., a_n) = \\frac{m^TV^{-1}}{(m^TV^{-1}V^{-1}m)^{1/2}}
with :math:`m = (m_1, ..., m_n)^T` and :math:`(m_1, ..., m_n)` are the
expected values of the order statistics of independent and identically
distributed random variables sampled from the standard normal distribution,
and :math:`V` is the covariance matrix of those order statistics.
The null-hypothesis of this test is that the population is normally
distributed. Thus, if the p-value is less than the
chosen alpha level (typically set at 0.05), then the null hypothesis is
rejected and there is evidence that the data tested are not normally
distributed.
The result of the Shapiro-Wilk test should be interpreted with caution in
the case of large sample sizes. Indeed, quoting from Wikipedia:
*"Like most statistical significance tests, if the sample size is
sufficiently large this test may detect even trivial departures from the
null hypothesis (i.e., although there may be some statistically significant
effect, it may be too small to be of any practical significance); thus,
additional investigation of the effect size is typically advisable,
e.g., a Q–Q plot in this case."*
References
----------
.. [1] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test
for normality (complete samples). Biometrika, 52(3/4), 591-611.
.. [2] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [3] https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test
Examples
--------
1. Test the normality of one array.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> normal, p = normality(x, alpha=.05)
>>> print(normal, p)
True 0.275
2. Test the normality of two arrays.
>>> import numpy as np
>>> from pingouin import normality
>>> np.random.seed(123)
>>> x = np.random.normal(size=100)
>>> y = np.random.rand(100)
>>> normal, p = normality(x, y, alpha=.05)
>>> print(normal, p)
[ True False] [0.275 0.001] | [
"Shapiro",
"-",
"Wilk",
"univariate",
"normality",
"test",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L58-L162 | train | 206,622 |
raphaelvallat/pingouin | pingouin/distribution.py | homoscedasticity | def homoscedasticity(*args, alpha=.05):
"""Test equality of variance.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
Returns
-------
equal_var : boolean
True if data have equal variance.
p : float
P-value.
See Also
--------
normality : Test the univariate normality of one or more array(s).
sphericity : Mauchly's test for sphericity.
Notes
-----
This function first tests if the data are normally distributed using the
Shapiro-Wilk test. If yes, then the homogeneity of variances is measured
using the Bartlett test. If the data are not normally distributed, the
Levene (1960) test, which is less sensitive to departure from
normality, is used.
The **Bartlett** :math:`T` statistic is defined as:
.. math::
T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1)
\\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))}
- 1/(N-k))}
where :math:`s_i^2` is the variance of the :math:`i^{th}` group,
:math:`N` is the total sample size, :math:`N_i` is the sample size of the
:math:`i^{th}` group, :math:`k` is the number of groups,
and :math:`s_p^2` is the pooled variance.
The pooled variance is a weighted average of the group variances and is
defined as:
.. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k)
The p-value is then computed using a chi-square distribution:
.. math:: T \\sim \\chi^2(k-1)
The **Levene** :math:`W` statistic is defined as:
.. math::
W = \\frac{(N-k)} {(k-1)}
\\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} }
{\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} }
where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`,
:math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and
:math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`.
The p-value is then computed using a F-distribution:
.. math:: W \\sim F(k-1, N-k)
References
----------
.. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical
tests. Proc. R. Soc. Lond. A, 160(901), 268-282.
.. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the
equality of variances. Journal of the American Statistical
Association, 69(346), 364-367.
.. [3] NIST/SEMATECH e-Handbook of Statistical Methods,
http://www.itl.nist.gov/div898/handbook/
Examples
--------
Test the homoscedasticity of two arrays.
>>> import numpy as np
>>> from pingouin import homoscedasticity
>>> np.random.seed(123)
>>> # Scale = standard deviation of the distribution.
>>> x = np.random.normal(loc=0, scale=1., size=100)
>>> y = np.random.normal(loc=0, scale=0.8,size=100)
>>> equal_var, p = homoscedasticity(x, y, alpha=.05)
>>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p)
1.273 0.602 False 0.0
"""
from scipy.stats import levene, bartlett
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# Test normality of data
normal, _ = normality(*args)
if np.count_nonzero(normal) != normal.size:
# print('Data are not normally distributed. Using Levene test.')
_, p = levene(*args)
else:
_, p = bartlett(*args)
equal_var = True if p > alpha else False
return equal_var, np.round(p, 3) | python | def homoscedasticity(*args, alpha=.05):
"""Test equality of variance.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
Returns
-------
equal_var : boolean
True if data have equal variance.
p : float
P-value.
See Also
--------
normality : Test the univariate normality of one or more array(s).
sphericity : Mauchly's test for sphericity.
Notes
-----
This function first tests if the data are normally distributed using the
Shapiro-Wilk test. If yes, then the homogeneity of variances is measured
using the Bartlett test. If the data are not normally distributed, the
Levene (1960) test, which is less sensitive to departure from
normality, is used.
The **Bartlett** :math:`T` statistic is defined as:
.. math::
T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1)
\\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))}
- 1/(N-k))}
where :math:`s_i^2` is the variance of the :math:`i^{th}` group,
:math:`N` is the total sample size, :math:`N_i` is the sample size of the
:math:`i^{th}` group, :math:`k` is the number of groups,
and :math:`s_p^2` is the pooled variance.
The pooled variance is a weighted average of the group variances and is
defined as:
.. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k)
The p-value is then computed using a chi-square distribution:
.. math:: T \\sim \\chi^2(k-1)
The **Levene** :math:`W` statistic is defined as:
.. math::
W = \\frac{(N-k)} {(k-1)}
\\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} }
{\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} }
where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`,
:math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and
:math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`.
The p-value is then computed using a F-distribution:
.. math:: W \\sim F(k-1, N-k)
References
----------
.. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical
tests. Proc. R. Soc. Lond. A, 160(901), 268-282.
.. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the
equality of variances. Journal of the American Statistical
Association, 69(346), 364-367.
.. [3] NIST/SEMATECH e-Handbook of Statistical Methods,
http://www.itl.nist.gov/div898/handbook/
Examples
--------
Test the homoscedasticity of two arrays.
>>> import numpy as np
>>> from pingouin import homoscedasticity
>>> np.random.seed(123)
>>> # Scale = standard deviation of the distribution.
>>> x = np.random.normal(loc=0, scale=1., size=100)
>>> y = np.random.normal(loc=0, scale=0.8,size=100)
>>> equal_var, p = homoscedasticity(x, y, alpha=.05)
>>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p)
1.273 0.602 False 0.0
"""
from scipy.stats import levene, bartlett
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# Test normality of data
normal, _ = normality(*args)
if np.count_nonzero(normal) != normal.size:
# print('Data are not normally distributed. Using Levene test.')
_, p = levene(*args)
else:
_, p = bartlett(*args)
equal_var = True if p > alpha else False
return equal_var, np.round(p, 3) | [
"def",
"homoscedasticity",
"(",
"*",
"args",
",",
"alpha",
"=",
".05",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"levene",
",",
"bartlett",
"k",
"=",
"len",
"(",
"args",
")",
"if",
"k",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Must ente... | Test equality of variance.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
Returns
-------
equal_var : boolean
True if data have equal variance.
p : float
P-value.
See Also
--------
normality : Test the univariate normality of one or more array(s).
sphericity : Mauchly's test for sphericity.
Notes
-----
This function first tests if the data are normally distributed using the
Shapiro-Wilk test. If yes, then the homogeneity of variances is measured
using the Bartlett test. If the data are not normally distributed, the
Levene (1960) test, which is less sensitive to departure from
normality, is used.
The **Bartlett** :math:`T` statistic is defined as:
.. math::
T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1)
\\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))}
- 1/(N-k))}
where :math:`s_i^2` is the variance of the :math:`i^{th}` group,
:math:`N` is the total sample size, :math:`N_i` is the sample size of the
:math:`i^{th}` group, :math:`k` is the number of groups,
and :math:`s_p^2` is the pooled variance.
The pooled variance is a weighted average of the group variances and is
defined as:
.. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k)
The p-value is then computed using a chi-square distribution:
.. math:: T \\sim \\chi^2(k-1)
The **Levene** :math:`W` statistic is defined as:
.. math::
W = \\frac{(N-k)} {(k-1)}
\\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} }
{\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} }
where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`,
:math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and
:math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`.
The p-value is then computed using a F-distribution:
.. math:: W \\sim F(k-1, N-k)
References
----------
.. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical
tests. Proc. R. Soc. Lond. A, 160(901), 268-282.
.. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the
equality of variances. Journal of the American Statistical
Association, 69(346), 364-367.
.. [3] NIST/SEMATECH e-Handbook of Statistical Methods,
http://www.itl.nist.gov/div898/handbook/
Examples
--------
Test the homoscedasticity of two arrays.
>>> import numpy as np
>>> from pingouin import homoscedasticity
>>> np.random.seed(123)
>>> # Scale = standard deviation of the distribution.
>>> x = np.random.normal(loc=0, scale=1., size=100)
>>> y = np.random.normal(loc=0, scale=0.8,size=100)
>>> equal_var, p = homoscedasticity(x, y, alpha=.05)
>>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p)
1.273 0.602 False 0.0 | [
"Test",
"equality",
"of",
"variance",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L165-L271 | train | 206,623 |
raphaelvallat/pingouin | pingouin/distribution.py | anderson | def anderson(*args, dist='norm'):
"""Anderson-Darling test of distribution.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
dist : string
Distribution ('norm', 'expon', 'logistic', 'gumbel')
Returns
-------
from_dist : boolean
True if data comes from this distribution.
sig_level : float
The significance levels for the corresponding critical values in %.
(See :py:func:`scipy.stats.anderson` for more details)
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4]
>>> anderson(x, dist='norm')
(False, 15.0)
2. Test that two arrays comes from an exponential distribution
>>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2]
>>> anderson(x, y, dist='expon')
(array([False, False]), array([15., 15.]))
"""
from scipy.stats import anderson as ads
k = len(args)
from_dist = np.zeros(k, 'bool')
sig_level = np.zeros(k)
for j in range(k):
st, cr, sig = ads(args[j], dist=dist)
from_dist[j] = True if (st > cr).any() else False
sig_level[j] = sig[np.argmin(np.abs(st - cr))]
if k == 1:
from_dist = bool(from_dist)
sig_level = float(sig_level)
return from_dist, sig_level | python | def anderson(*args, dist='norm'):
"""Anderson-Darling test of distribution.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
dist : string
Distribution ('norm', 'expon', 'logistic', 'gumbel')
Returns
-------
from_dist : boolean
True if data comes from this distribution.
sig_level : float
The significance levels for the corresponding critical values in %.
(See :py:func:`scipy.stats.anderson` for more details)
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4]
>>> anderson(x, dist='norm')
(False, 15.0)
2. Test that two arrays comes from an exponential distribution
>>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2]
>>> anderson(x, y, dist='expon')
(array([False, False]), array([15., 15.]))
"""
from scipy.stats import anderson as ads
k = len(args)
from_dist = np.zeros(k, 'bool')
sig_level = np.zeros(k)
for j in range(k):
st, cr, sig = ads(args[j], dist=dist)
from_dist[j] = True if (st > cr).any() else False
sig_level[j] = sig[np.argmin(np.abs(st - cr))]
if k == 1:
from_dist = bool(from_dist)
sig_level = float(sig_level)
return from_dist, sig_level | [
"def",
"anderson",
"(",
"*",
"args",
",",
"dist",
"=",
"'norm'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"anderson",
"as",
"ads",
"k",
"=",
"len",
"(",
"args",
")",
"from_dist",
"=",
"np",
".",
"zeros",
"(",
"k",
",",
"'bool'",
")",
"s... | Anderson-Darling test of distribution.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
dist : string
Distribution ('norm', 'expon', 'logistic', 'gumbel')
Returns
-------
from_dist : boolean
True if data comes from this distribution.
sig_level : float
The significance levels for the corresponding critical values in %.
(See :py:func:`scipy.stats.anderson` for more details)
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4]
>>> anderson(x, dist='norm')
(False, 15.0)
2. Test that two arrays comes from an exponential distribution
>>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2]
>>> anderson(x, y, dist='expon')
(array([False, False]), array([15., 15.])) | [
"Anderson",
"-",
"Darling",
"test",
"of",
"distribution",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L274-L319 | train | 206,624 |
raphaelvallat/pingouin | pingouin/distribution.py | epsilon | def epsilon(data, correction='gg'):
"""Epsilon adjustement factor for repeated measures.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
correction : string
Specify the epsilon version ::
'gg' : Greenhouse-Geisser
'hf' : Huynh-Feldt
'lb' : Lower bound
Returns
-------
eps : float
Epsilon adjustement factor.
Notes
-----
The **lower bound** for epsilon is:
.. math:: lb = \\frac{1}{k - 1}
where :math:`k` is the number of groups (= data.shape[1]).
The **Greenhouse-Geisser epsilon** is given by:
.. math::
\\epsilon_{GG} = \\frac{k^2(\\overline{diag(S)} - \\overline{S})^2}
{(k-1)(\\sum_{i=1}^{k}\\sum_{j=1}^{k}s_{ij}^2 - 2k\\sum_{j=1}^{k}
\\overline{s_i}^2 + k^2\\overline{S}^2)}
where :math:`S` is the covariance matrix, :math:`\\overline{S}` the
grandmean of S and :math:`\\overline{diag(S)}` the mean of all the elements
on the diagonal of S (i.e. mean of the variances).
The **Huynh-Feldt epsilon** is given by:
.. math::
\\epsilon_{HF} = \\frac{n(k-1)\\epsilon_{GG}-2}{(k-1)
(n-1-(k-1)\\epsilon_{GG})}
where :math:`n` is the number of subjects.
References
----------
.. [1] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
>>> import pandas as pd
>>> from pingouin import epsilon
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> epsilon(data, correction='gg')
0.5587754577585018
>>> epsilon(data, correction='hf')
0.6223448311539781
>>> epsilon(data, correction='lb')
0.5
"""
# Covariance matrix
S = data.cov()
n = data.shape[0]
k = data.shape[1]
# Lower bound
if correction == 'lb':
if S.columns.nlevels == 1:
return 1 / (k - 1)
elif S.columns.nlevels == 2:
ka = S.columns.levels[0].size
kb = S.columns.levels[1].size
return 1 / ((ka - 1) * (kb - 1))
# Compute GGEpsilon
# - Method 1
mean_var = np.diag(S).mean()
S_mean = S.mean().mean()
ss_mat = (S**2).sum().sum()
ss_rows = (S.mean(1)**2).sum().sum()
num = (k * (mean_var - S_mean))**2
den = (k - 1) * (ss_mat - 2 * k * ss_rows + k**2 * S_mean**2)
eps = np.min([num / den, 1])
# - Method 2
# S_pop = S - S.mean(0)[:, None] - S.mean(1)[None, :] + S.mean()
# eig = np.linalg.eigvalsh(S_pop)[1:]
# V = eig.sum()**2 / np.sum(eig**2)
# eps = V / (k - 1)
# Huynh-Feldt
if correction == 'hf':
num = n * (k - 1) * eps - 2
den = (k - 1) * (n - 1 - (k - 1) * eps)
eps = np.min([num / den, 1])
return eps | python | def epsilon(data, correction='gg'):
"""Epsilon adjustement factor for repeated measures.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
correction : string
Specify the epsilon version ::
'gg' : Greenhouse-Geisser
'hf' : Huynh-Feldt
'lb' : Lower bound
Returns
-------
eps : float
Epsilon adjustement factor.
Notes
-----
The **lower bound** for epsilon is:
.. math:: lb = \\frac{1}{k - 1}
where :math:`k` is the number of groups (= data.shape[1]).
The **Greenhouse-Geisser epsilon** is given by:
.. math::
\\epsilon_{GG} = \\frac{k^2(\\overline{diag(S)} - \\overline{S})^2}
{(k-1)(\\sum_{i=1}^{k}\\sum_{j=1}^{k}s_{ij}^2 - 2k\\sum_{j=1}^{k}
\\overline{s_i}^2 + k^2\\overline{S}^2)}
where :math:`S` is the covariance matrix, :math:`\\overline{S}` the
grandmean of S and :math:`\\overline{diag(S)}` the mean of all the elements
on the diagonal of S (i.e. mean of the variances).
The **Huynh-Feldt epsilon** is given by:
.. math::
\\epsilon_{HF} = \\frac{n(k-1)\\epsilon_{GG}-2}{(k-1)
(n-1-(k-1)\\epsilon_{GG})}
where :math:`n` is the number of subjects.
References
----------
.. [1] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
>>> import pandas as pd
>>> from pingouin import epsilon
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> epsilon(data, correction='gg')
0.5587754577585018
>>> epsilon(data, correction='hf')
0.6223448311539781
>>> epsilon(data, correction='lb')
0.5
"""
# Covariance matrix
S = data.cov()
n = data.shape[0]
k = data.shape[1]
# Lower bound
if correction == 'lb':
if S.columns.nlevels == 1:
return 1 / (k - 1)
elif S.columns.nlevels == 2:
ka = S.columns.levels[0].size
kb = S.columns.levels[1].size
return 1 / ((ka - 1) * (kb - 1))
# Compute GGEpsilon
# - Method 1
mean_var = np.diag(S).mean()
S_mean = S.mean().mean()
ss_mat = (S**2).sum().sum()
ss_rows = (S.mean(1)**2).sum().sum()
num = (k * (mean_var - S_mean))**2
den = (k - 1) * (ss_mat - 2 * k * ss_rows + k**2 * S_mean**2)
eps = np.min([num / den, 1])
# - Method 2
# S_pop = S - S.mean(0)[:, None] - S.mean(1)[None, :] + S.mean()
# eig = np.linalg.eigvalsh(S_pop)[1:]
# V = eig.sum()**2 / np.sum(eig**2)
# eps = V / (k - 1)
# Huynh-Feldt
if correction == 'hf':
num = n * (k - 1) * eps - 2
den = (k - 1) * (n - 1 - (k - 1) * eps)
eps = np.min([num / den, 1])
return eps | [
"def",
"epsilon",
"(",
"data",
",",
"correction",
"=",
"'gg'",
")",
":",
"# Covariance matrix",
"S",
"=",
"data",
".",
"cov",
"(",
")",
"n",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"k",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"# Lower bound",
... | Epsilon adjustement factor for repeated measures.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
correction : string
Specify the epsilon version ::
'gg' : Greenhouse-Geisser
'hf' : Huynh-Feldt
'lb' : Lower bound
Returns
-------
eps : float
Epsilon adjustement factor.
Notes
-----
The **lower bound** for epsilon is:
.. math:: lb = \\frac{1}{k - 1}
where :math:`k` is the number of groups (= data.shape[1]).
The **Greenhouse-Geisser epsilon** is given by:
.. math::
\\epsilon_{GG} = \\frac{k^2(\\overline{diag(S)} - \\overline{S})^2}
{(k-1)(\\sum_{i=1}^{k}\\sum_{j=1}^{k}s_{ij}^2 - 2k\\sum_{j=1}^{k}
\\overline{s_i}^2 + k^2\\overline{S}^2)}
where :math:`S` is the covariance matrix, :math:`\\overline{S}` the
grandmean of S and :math:`\\overline{diag(S)}` the mean of all the elements
on the diagonal of S (i.e. mean of the variances).
The **Huynh-Feldt epsilon** is given by:
.. math::
\\epsilon_{HF} = \\frac{n(k-1)\\epsilon_{GG}-2}{(k-1)
(n-1-(k-1)\\epsilon_{GG})}
where :math:`n` is the number of subjects.
References
----------
.. [1] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
>>> import pandas as pd
>>> from pingouin import epsilon
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> epsilon(data, correction='gg')
0.5587754577585018
>>> epsilon(data, correction='hf')
0.6223448311539781
>>> epsilon(data, correction='lb')
0.5 | [
"Epsilon",
"adjustement",
"factor",
"for",
"repeated",
"measures",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L322-L427 | train | 206,625 |
raphaelvallat/pingouin | pingouin/distribution.py | sphericity | def sphericity(data, method='mauchly', alpha=.05):
"""Mauchly and JNS test for sphericity.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
method : str
Method to compute sphericity ::
'jns' : John, Nagao and Sugiura test.
'mauchly' : Mauchly test.
alpha : float
Significance level
Returns
-------
spher : boolean
True if data have the sphericity property.
W : float
Test statistic
chi_sq : float
Chi-square statistic
ddof : int
Degrees of freedom
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
normality : Test the univariate normality of one or more array(s).
Notes
-----
The **Mauchly** :math:`W` statistic is defined by:
.. math::
W = \\frac{\\prod_{j=1}^{r-1} \\lambda_j}{(\\frac{1}{r-1}
\\cdot \\sum_{j=1}^{^{r-1}} \\lambda_j)^{r-1}}
where :math:`\\lambda_j` are the eigenvalues of the population
covariance matrix (= double-centered sample covariance matrix) and
:math:`r` is the number of conditions.
From then, the :math:`W` statistic is transformed into a chi-square
score using the number of observations per condition :math:`n`
.. math:: f = \\frac{2(r-1)^2+r+1}{6(r-1)(n-1)}
.. math:: \\chi_w^2 = (f-1)(n-1) log(W)
The p-value is then approximated using a chi-square distribution:
.. math:: \\chi_w^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
The **JNS** :math:`V` statistic is defined by:
.. math::
V = \\frac{(\\sum_j^{r-1} \\lambda_j)^2}{\\sum_j^{r-1} \\lambda_j^2}
.. math:: \\chi_v^2 = \\frac{n}{2} (r-1)^2 (V - \\frac{1}{r-1})
and the p-value approximated using a chi-square distribution
.. math:: \\chi_v^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
References
----------
.. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal
n-variate distribution. The Annals of Mathematical Statistics,
11(2), 204-209.
.. [2] Nagao, H. (1973). On some test criteria for covariance matrix.
The Annals of Statistics, 700-709.
.. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and
the limiting distributions. The Annals of Mathematical Statistics,
1312-1316.
.. [4] John, S. (1972). The distribution of a statistic used for testing
sphericity of normal distributions. Biometrika, 59(1), 169-173.
.. [5] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
1. Mauchly test for sphericity
>>> import pandas as pd
>>> from pingouin import sphericity
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> sphericity(data)
(True, 0.21, 4.677, 2, 0.09649016283209666)
2. JNS test for sphericity
>>> sphericity(data, method='jns')
(False, 1.118, 6.176, 2, 0.04560424030751982)
"""
from scipy.stats import chi2
S = data.cov().values
n = data.shape[0]
p = data.shape[1]
d = p - 1
# Estimate of the population covariance (= double-centered)
S_pop = S - S.mean(0)[:, np.newaxis] - S.mean(1)[np.newaxis, :] + S.mean()
# p - 1 eigenvalues (sorted by ascending importance)
eig = np.linalg.eigvalsh(S_pop)[1:]
if method == 'jns':
# eps = epsilon(data, correction='gg')
# W = eps * d
W = eig.sum()**2 / np.square(eig).sum()
chi_sq = 0.5 * n * d ** 2 * (W - 1 / d)
if method == 'mauchly':
# Mauchly's statistic
W = np.product(eig) / (eig.sum() / d)**d
# Chi-square
f = (2 * d**2 + p + 1) / (6 * d * (n - 1))
chi_sq = (f - 1) * (n - 1) * np.log(W)
# Compute dof and pval
ddof = 0.5 * d * p - 1
# Ensure that dof is not zero
ddof = 1 if ddof == 0 else ddof
pval = chi2.sf(chi_sq, ddof)
# Second order approximation
# pval2 = chi2.sf(chi_sq, ddof + 4)
# w2 = (d + 2) * (d - 1) * (d - 2) * (2 * d**3 + 6 * d * d + 3 * d + 2) / \
# (288 * d * d * nr * nr * dd * dd)
# pval += w2 * (pval2 - pval)
sphericity = True if pval > alpha else False
return sphericity, np.round(W, 3), np.round(chi_sq, 3), int(ddof), pval | python | def sphericity(data, method='mauchly', alpha=.05):
"""Mauchly and JNS test for sphericity.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
method : str
Method to compute sphericity ::
'jns' : John, Nagao and Sugiura test.
'mauchly' : Mauchly test.
alpha : float
Significance level
Returns
-------
spher : boolean
True if data have the sphericity property.
W : float
Test statistic
chi_sq : float
Chi-square statistic
ddof : int
Degrees of freedom
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
normality : Test the univariate normality of one or more array(s).
Notes
-----
The **Mauchly** :math:`W` statistic is defined by:
.. math::
W = \\frac{\\prod_{j=1}^{r-1} \\lambda_j}{(\\frac{1}{r-1}
\\cdot \\sum_{j=1}^{^{r-1}} \\lambda_j)^{r-1}}
where :math:`\\lambda_j` are the eigenvalues of the population
covariance matrix (= double-centered sample covariance matrix) and
:math:`r` is the number of conditions.
From then, the :math:`W` statistic is transformed into a chi-square
score using the number of observations per condition :math:`n`
.. math:: f = \\frac{2(r-1)^2+r+1}{6(r-1)(n-1)}
.. math:: \\chi_w^2 = (f-1)(n-1) log(W)
The p-value is then approximated using a chi-square distribution:
.. math:: \\chi_w^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
The **JNS** :math:`V` statistic is defined by:
.. math::
V = \\frac{(\\sum_j^{r-1} \\lambda_j)^2}{\\sum_j^{r-1} \\lambda_j^2}
.. math:: \\chi_v^2 = \\frac{n}{2} (r-1)^2 (V - \\frac{1}{r-1})
and the p-value approximated using a chi-square distribution
.. math:: \\chi_v^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
References
----------
.. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal
n-variate distribution. The Annals of Mathematical Statistics,
11(2), 204-209.
.. [2] Nagao, H. (1973). On some test criteria for covariance matrix.
The Annals of Statistics, 700-709.
.. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and
the limiting distributions. The Annals of Mathematical Statistics,
1312-1316.
.. [4] John, S. (1972). The distribution of a statistic used for testing
sphericity of normal distributions. Biometrika, 59(1), 169-173.
.. [5] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
1. Mauchly test for sphericity
>>> import pandas as pd
>>> from pingouin import sphericity
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> sphericity(data)
(True, 0.21, 4.677, 2, 0.09649016283209666)
2. JNS test for sphericity
>>> sphericity(data, method='jns')
(False, 1.118, 6.176, 2, 0.04560424030751982)
"""
from scipy.stats import chi2
S = data.cov().values
n = data.shape[0]
p = data.shape[1]
d = p - 1
# Estimate of the population covariance (= double-centered)
S_pop = S - S.mean(0)[:, np.newaxis] - S.mean(1)[np.newaxis, :] + S.mean()
# p - 1 eigenvalues (sorted by ascending importance)
eig = np.linalg.eigvalsh(S_pop)[1:]
if method == 'jns':
# eps = epsilon(data, correction='gg')
# W = eps * d
W = eig.sum()**2 / np.square(eig).sum()
chi_sq = 0.5 * n * d ** 2 * (W - 1 / d)
if method == 'mauchly':
# Mauchly's statistic
W = np.product(eig) / (eig.sum() / d)**d
# Chi-square
f = (2 * d**2 + p + 1) / (6 * d * (n - 1))
chi_sq = (f - 1) * (n - 1) * np.log(W)
# Compute dof and pval
ddof = 0.5 * d * p - 1
# Ensure that dof is not zero
ddof = 1 if ddof == 0 else ddof
pval = chi2.sf(chi_sq, ddof)
# Second order approximation
# pval2 = chi2.sf(chi_sq, ddof + 4)
# w2 = (d + 2) * (d - 1) * (d - 2) * (2 * d**3 + 6 * d * d + 3 * d + 2) / \
# (288 * d * d * nr * nr * dd * dd)
# pval += w2 * (pval2 - pval)
sphericity = True if pval > alpha else False
return sphericity, np.round(W, 3), np.round(chi_sq, 3), int(ddof), pval | [
"def",
"sphericity",
"(",
"data",
",",
"method",
"=",
"'mauchly'",
",",
"alpha",
"=",
".05",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"chi2",
"S",
"=",
"data",
".",
"cov",
"(",
")",
".",
"values",
"n",
"=",
"data",
".",
"shape",
"[",
"0... | Mauchly and JNS test for sphericity.
Parameters
----------
data : pd.DataFrame
DataFrame containing the repeated measurements.
``data`` must be in wide-format. To convert from wide to long format,
use the :py:func:`pandas.pivot_table` function.
method : str
Method to compute sphericity ::
'jns' : John, Nagao and Sugiura test.
'mauchly' : Mauchly test.
alpha : float
Significance level
Returns
-------
spher : boolean
True if data have the sphericity property.
W : float
Test statistic
chi_sq : float
Chi-square statistic
ddof : int
Degrees of freedom
p : float
P-value.
See Also
--------
homoscedasticity : Test equality of variance.
normality : Test the univariate normality of one or more array(s).
Notes
-----
The **Mauchly** :math:`W` statistic is defined by:
.. math::
W = \\frac{\\prod_{j=1}^{r-1} \\lambda_j}{(\\frac{1}{r-1}
\\cdot \\sum_{j=1}^{^{r-1}} \\lambda_j)^{r-1}}
where :math:`\\lambda_j` are the eigenvalues of the population
covariance matrix (= double-centered sample covariance matrix) and
:math:`r` is the number of conditions.
From then, the :math:`W` statistic is transformed into a chi-square
score using the number of observations per condition :math:`n`
.. math:: f = \\frac{2(r-1)^2+r+1}{6(r-1)(n-1)}
.. math:: \\chi_w^2 = (f-1)(n-1) log(W)
The p-value is then approximated using a chi-square distribution:
.. math:: \\chi_w^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
The **JNS** :math:`V` statistic is defined by:
.. math::
V = \\frac{(\\sum_j^{r-1} \\lambda_j)^2}{\\sum_j^{r-1} \\lambda_j^2}
.. math:: \\chi_v^2 = \\frac{n}{2} (r-1)^2 (V - \\frac{1}{r-1})
and the p-value approximated using a chi-square distribution
.. math:: \\chi_v^2 \\sim \\chi^2(\\frac{r(r-1)}{2}-1)
References
----------
.. [1] Mauchly, J. W. (1940). Significance test for sphericity of a normal
n-variate distribution. The Annals of Mathematical Statistics,
11(2), 204-209.
.. [2] Nagao, H. (1973). On some test criteria for covariance matrix.
The Annals of Statistics, 700-709.
.. [3] Sugiura, N. (1972). Locally best invariant test for sphericity and
the limiting distributions. The Annals of Mathematical Statistics,
1312-1316.
.. [4] John, S. (1972). The distribution of a statistic used for testing
sphericity of normal distributions. Biometrika, 59(1), 169-173.
.. [5] http://www.real-statistics.com/anova-repeated-measures/sphericity/
Examples
--------
1. Mauchly test for sphericity
>>> import pandas as pd
>>> from pingouin import sphericity
>>> data = pd.DataFrame({'A': [2.2, 3.1, 4.3, 4.1, 7.2],
... 'B': [1.1, 2.5, 4.1, 5.2, 6.4],
... 'C': [8.2, 4.5, 3.4, 6.2, 7.2]})
>>> sphericity(data)
(True, 0.21, 4.677, 2, 0.09649016283209666)
2. JNS test for sphericity
>>> sphericity(data, method='jns')
(False, 1.118, 6.176, 2, 0.04560424030751982) | [
"Mauchly",
"and",
"JNS",
"test",
"for",
"sphericity",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L430-L575 | train | 206,626 |
raphaelvallat/pingouin | pingouin/effsize.py | compute_esci | def compute_esci(stat=None, nx=None, ny=None, paired=False, eftype='cohen',
confidence=.95, decimals=2):
"""Parametric confidence intervals around a Cohen d or a
correlation coefficient.
Parameters
----------
stat : float
Original effect size. Must be either a correlation coefficient or a
Cohen-type effect size (Cohen d or Hedges g).
nx, ny : int
Length of vector x and y.
paired : bool
Indicates if the effect size was estimated from a paired sample.
This is only relevant for cohen or hedges effect size.
eftype : string
Effect size type. Must be 'r' (correlation) or 'cohen'
(Cohen d or Hedges g).
confidence : float
Confidence level (0.95 = 95%)
decimals : int
Number of rounded decimals.
Returns
-------
ci : array
Desired converted effect size
Notes
-----
To compute the parametric confidence interval around a
**Pearson r correlation** coefficient, one must first apply a
Fisher's r-to-z transformation:
.. math:: z = 0.5 \\cdot \\ln \\frac{1 + r}{1 - r} = \\text{arctanh}(r)
and compute the standard deviation:
.. math:: se = \\frac{1}{\\sqrt{n - 3}}
where :math:`n` is the sample size.
The lower and upper confidence intervals - *in z-space* - are then
given by:
.. math:: ci_z = z \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
These confidence intervals can then be easily converted back to *r-space*:
.. math::
ci_r = \\frac{\\exp(2 \\cdot ci_z) - 1}{\\exp(2 \\cdot ci_z) + 1} =
\\text{tanh}(ci_z)
A formula for calculating the confidence interval for a
**Cohen d effect size** is given by Hedges and Olkin (1985, p86).
If the effect size estimate from the sample is :math:`d`, then it is
normally distributed, with standard deviation:
.. math::
se = \\sqrt{\\frac{n_x + n_y}{n_x \\cdot n_y} +
\\frac{d^2}{2 (n_x + n_y)}}
where :math:`n_x` and :math:`n_y` are the sample sizes of the two groups.
In one-sample test or paired test, this becomes:
.. math::
se = \\sqrt{\\frac{1}{n_x} + \\frac{d^2}{2 \\cdot n_x}}
The lower and upper confidence intervals are then given by:
.. math:: ci_d = d \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher_transformation
.. [2] Hedges, L., and Ingram Olkin. "Statistical models for
meta-analysis." (1985).
.. [3] http://www.leeds.ac.uk/educol/documents/00002182.htm
Examples
--------
1. Confidence interval of a Pearson correlation coefficient
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = np.corrcoef(x, y)[0][1]
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='r')
>>> print(stat, ci)
0.7468280049029223 [0.27 0.93]
2. Confidence interval of a Cohen d
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='cohen')
>>> print(stat, ci)
0.1537753990658328 [-0.68 0.99]
"""
from scipy.stats import norm
# Safety check
assert eftype.lower() in['r', 'pearson', 'spearman', 'cohen',
'd', 'g', 'hedges']
assert stat is not None and nx is not None
assert isinstance(confidence, float)
assert 0 < confidence < 1
# Note that we are using a normal dist and not a T dist:
# from scipy.stats import t
# crit = np.abs(t.ppf((1 - confidence) / 2), dof)
crit = np.abs(norm.ppf((1 - confidence) / 2))
if eftype.lower() in ['r', 'pearson', 'spearman']:
# Standardize correlation coefficient
z = np.arctanh(stat)
se = 1 / np.sqrt(nx - 3)
ci_z = np.array([z - crit * se, z + crit * se])
# Transform back to r
ci = np.tanh(ci_z)
else:
if ny == 1 or paired:
# One sample or paired
se = np.sqrt(1 / nx + stat**2 / (2 * nx))
else:
# Two-sample test
se = np.sqrt(((nx + ny) / (nx * ny)) + (stat**2) / (2 * (nx + ny)))
ci = np.array([stat - crit * se, stat + crit * se])
return np.round(ci, decimals) | python | def compute_esci(stat=None, nx=None, ny=None, paired=False, eftype='cohen',
confidence=.95, decimals=2):
"""Parametric confidence intervals around a Cohen d or a
correlation coefficient.
Parameters
----------
stat : float
Original effect size. Must be either a correlation coefficient or a
Cohen-type effect size (Cohen d or Hedges g).
nx, ny : int
Length of vector x and y.
paired : bool
Indicates if the effect size was estimated from a paired sample.
This is only relevant for cohen or hedges effect size.
eftype : string
Effect size type. Must be 'r' (correlation) or 'cohen'
(Cohen d or Hedges g).
confidence : float
Confidence level (0.95 = 95%)
decimals : int
Number of rounded decimals.
Returns
-------
ci : array
Desired converted effect size
Notes
-----
To compute the parametric confidence interval around a
**Pearson r correlation** coefficient, one must first apply a
Fisher's r-to-z transformation:
.. math:: z = 0.5 \\cdot \\ln \\frac{1 + r}{1 - r} = \\text{arctanh}(r)
and compute the standard deviation:
.. math:: se = \\frac{1}{\\sqrt{n - 3}}
where :math:`n` is the sample size.
The lower and upper confidence intervals - *in z-space* - are then
given by:
.. math:: ci_z = z \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
These confidence intervals can then be easily converted back to *r-space*:
.. math::
ci_r = \\frac{\\exp(2 \\cdot ci_z) - 1}{\\exp(2 \\cdot ci_z) + 1} =
\\text{tanh}(ci_z)
A formula for calculating the confidence interval for a
**Cohen d effect size** is given by Hedges and Olkin (1985, p86).
If the effect size estimate from the sample is :math:`d`, then it is
normally distributed, with standard deviation:
.. math::
se = \\sqrt{\\frac{n_x + n_y}{n_x \\cdot n_y} +
\\frac{d^2}{2 (n_x + n_y)}}
where :math:`n_x` and :math:`n_y` are the sample sizes of the two groups.
In one-sample test or paired test, this becomes:
.. math::
se = \\sqrt{\\frac{1}{n_x} + \\frac{d^2}{2 \\cdot n_x}}
The lower and upper confidence intervals are then given by:
.. math:: ci_d = d \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher_transformation
.. [2] Hedges, L., and Ingram Olkin. "Statistical models for
meta-analysis." (1985).
.. [3] http://www.leeds.ac.uk/educol/documents/00002182.htm
Examples
--------
1. Confidence interval of a Pearson correlation coefficient
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = np.corrcoef(x, y)[0][1]
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='r')
>>> print(stat, ci)
0.7468280049029223 [0.27 0.93]
2. Confidence interval of a Cohen d
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='cohen')
>>> print(stat, ci)
0.1537753990658328 [-0.68 0.99]
"""
from scipy.stats import norm
# Safety check
assert eftype.lower() in['r', 'pearson', 'spearman', 'cohen',
'd', 'g', 'hedges']
assert stat is not None and nx is not None
assert isinstance(confidence, float)
assert 0 < confidence < 1
# Note that we are using a normal dist and not a T dist:
# from scipy.stats import t
# crit = np.abs(t.ppf((1 - confidence) / 2), dof)
crit = np.abs(norm.ppf((1 - confidence) / 2))
if eftype.lower() in ['r', 'pearson', 'spearman']:
# Standardize correlation coefficient
z = np.arctanh(stat)
se = 1 / np.sqrt(nx - 3)
ci_z = np.array([z - crit * se, z + crit * se])
# Transform back to r
ci = np.tanh(ci_z)
else:
if ny == 1 or paired:
# One sample or paired
se = np.sqrt(1 / nx + stat**2 / (2 * nx))
else:
# Two-sample test
se = np.sqrt(((nx + ny) / (nx * ny)) + (stat**2) / (2 * (nx + ny)))
ci = np.array([stat - crit * se, stat + crit * se])
return np.round(ci, decimals) | [
"def",
"compute_esci",
"(",
"stat",
"=",
"None",
",",
"nx",
"=",
"None",
",",
"ny",
"=",
"None",
",",
"paired",
"=",
"False",
",",
"eftype",
"=",
"'cohen'",
",",
"confidence",
"=",
".95",
",",
"decimals",
"=",
"2",
")",
":",
"from",
"scipy",
".",
... | Parametric confidence intervals around a Cohen d or a
correlation coefficient.
Parameters
----------
stat : float
Original effect size. Must be either a correlation coefficient or a
Cohen-type effect size (Cohen d or Hedges g).
nx, ny : int
Length of vector x and y.
paired : bool
Indicates if the effect size was estimated from a paired sample.
This is only relevant for cohen or hedges effect size.
eftype : string
Effect size type. Must be 'r' (correlation) or 'cohen'
(Cohen d or Hedges g).
confidence : float
Confidence level (0.95 = 95%)
decimals : int
Number of rounded decimals.
Returns
-------
ci : array
Desired converted effect size
Notes
-----
To compute the parametric confidence interval around a
**Pearson r correlation** coefficient, one must first apply a
Fisher's r-to-z transformation:
.. math:: z = 0.5 \\cdot \\ln \\frac{1 + r}{1 - r} = \\text{arctanh}(r)
and compute the standard deviation:
.. math:: se = \\frac{1}{\\sqrt{n - 3}}
where :math:`n` is the sample size.
The lower and upper confidence intervals - *in z-space* - are then
given by:
.. math:: ci_z = z \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
These confidence intervals can then be easily converted back to *r-space*:
.. math::
ci_r = \\frac{\\exp(2 \\cdot ci_z) - 1}{\\exp(2 \\cdot ci_z) + 1} =
\\text{tanh}(ci_z)
A formula for calculating the confidence interval for a
**Cohen d effect size** is given by Hedges and Olkin (1985, p86).
If the effect size estimate from the sample is :math:`d`, then it is
normally distributed, with standard deviation:
.. math::
se = \\sqrt{\\frac{n_x + n_y}{n_x \\cdot n_y} +
\\frac{d^2}{2 (n_x + n_y)}}
where :math:`n_x` and :math:`n_y` are the sample sizes of the two groups.
In one-sample test or paired test, this becomes:
.. math::
se = \\sqrt{\\frac{1}{n_x} + \\frac{d^2}{2 \\cdot n_x}}
The lower and upper confidence intervals are then given by:
.. math:: ci_d = d \\pm crit \\cdot se
where :math:`crit` is the critical value of the nomal distribution
corresponding to the desired confidence level (e.g. 1.96 in case of a 95%
confidence interval).
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher_transformation
.. [2] Hedges, L., and Ingram Olkin. "Statistical models for
meta-analysis." (1985).
.. [3] http://www.leeds.ac.uk/educol/documents/00002182.htm
Examples
--------
1. Confidence interval of a Pearson correlation coefficient
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = np.corrcoef(x, y)[0][1]
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='r')
>>> print(stat, ci)
0.7468280049029223 [0.27 0.93]
2. Confidence interval of a Cohen d
>>> import pingouin as pg
>>> x = [3, 4, 6, 7, 5, 6, 7, 3, 5, 4, 2]
>>> y = [4, 6, 6, 7, 6, 5, 5, 2, 3, 4, 1]
>>> nx, ny = len(x), len(y)
>>> stat = pg.compute_effsize(x, y, eftype='cohen')
>>> ci = pg.compute_esci(stat=stat, nx=nx, ny=ny, eftype='cohen')
>>> print(stat, ci)
0.1537753990658328 [-0.68 0.99] | [
"Parametric",
"confidence",
"intervals",
"around",
"a",
"Cohen",
"d",
"or",
"a",
"correlation",
"coefficient",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/effsize.py#L13-L158 | train | 206,627 |
raphaelvallat/pingouin | pingouin/effsize.py | convert_effsize | def convert_effsize(ef, input_type, output_type, nx=None, ny=None):
"""Conversion between effect sizes.
Parameters
----------
ef : float
Original effect size
input_type : string
Effect size type of ef. Must be 'r' or 'd'.
output_type : string
Desired effect size type.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
nx, ny : int, optional
Length of vector x and y.
nx and ny are required to convert to Hedges g
Returns
-------
ef : float
Desired converted effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
The formula to convert **r** to **d** is given in ref [1]:
.. math:: d = \\frac{2r}{\\sqrt{1 - r^2}}
The formula to convert **d** to **r** is given in ref [2]:
.. math::
r = \\frac{d}{\\sqrt{d^2 + \\frac{(n_x + n_y)^2 - 2(n_x + n_y)}
{n_xn_y}}}
The formula to convert **d** to :math:`\\eta^2` is given in ref [3]:
.. math:: \\eta^2 = \\frac{(0.5 * d)^2}{1 + (0.5 * d)^2}
The formula to convert **d** to an odds-ratio is given in ref [4]:
.. math:: OR = e(\\frac{d * \\pi}{\\sqrt{3}})
The formula to convert **d** to area under the curve is given in ref [5]:
.. math:: AUC = \\mathcal{N}_{cdf}(\\frac{d}{\\sqrt{2}})
References
----------
.. [1] Rosenthal, Robert. "Parametric measures of effect size."
The handbook of research synthesis 621 (1994): 231-244.
.. [2] McGrath, Robert E., and Gregory J. Meyer. "When effect sizes
disagree: the case of r and d." Psychological methods 11.4 (2006): 386.
.. [3] Cohen, Jacob. "Statistical power analysis for the behavioral
sciences. 2nd." (1988).
.. [4] Borenstein, Michael, et al. "Effect sizes for continuous data."
The handbook of research synthesis and meta-analysis 2 (2009): 221-235.
.. [5] Ruscio, John. "A probability-based measure of effect size:
Robustness to base rates and other factors." Psychological methods 1
3.1 (2008): 19.
Examples
--------
1. Convert from Cohen d to eta-square
>>> from pingouin import convert_effsize
>>> d = .45
>>> eta = convert_effsize(d, 'cohen', 'eta-square')
>>> print(eta)
0.048185603807257595
2. Convert from Cohen d to Hegdes g (requires the sample sizes of each
group)
>>> d = .45
>>> g = convert_effsize(d, 'cohen', 'hedges', nx=10, ny=10)
>>> print(g)
0.4309859154929578
3. Convert Pearson r to Cohen d
>>> r = 0.40
>>> d = convert_effsize(r, 'r', 'cohen')
>>> print(d)
0.8728715609439696
4. Reverse operation: convert Cohen d to Pearson r
>>> d = 0.873
>>> r = convert_effsize(d, 'cohen', 'r')
>>> print(r)
0.40004943911648533
"""
it = input_type.lower()
ot = output_type.lower()
# Check input and output type
for input in [it, ot]:
if not _check_eftype(input):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
if it not in ['r', 'cohen']:
raise ValueError("Input type must be 'r' or 'cohen'")
if it == ot:
return ef
d = (2 * ef) / np.sqrt(1 - ef**2) if it == 'r' else ef # Rosenthal 1994
# Then convert to the desired output type
if ot == 'cohen':
return d
elif ot == 'hedges':
if all(v is not None for v in [nx, ny]):
return d * (1 - (3 / (4 * (nx + ny) - 9)))
else:
# If shapes of x and y are not known, return cohen's d
warnings.warn("You need to pass nx and ny arguments to compute "
"Hedges g. Returning Cohen's d instead")
return d
elif ot == 'glass':
warnings.warn("Returning original effect size instead of Glass "
"because variance is not known.")
return ef
elif ot == 'r':
# McGrath and Meyer 2006
if all(v is not None for v in [nx, ny]):
a = ((nx + ny)**2 - 2 * (nx + ny)) / (nx * ny)
else:
a = 4
return d / np.sqrt(d**2 + a)
elif ot == 'eta-square':
# Cohen 1988
return (d / 2)**2 / (1 + (d / 2)**2)
elif ot == 'odds-ratio':
# Borenstein et al. 2009
return np.exp(d * np.pi / np.sqrt(3))
elif ot in ['auc', 'cles']:
# Ruscio 2008
from scipy.stats import norm
return norm.cdf(d / np.sqrt(2))
else:
return None | python | def convert_effsize(ef, input_type, output_type, nx=None, ny=None):
"""Conversion between effect sizes.
Parameters
----------
ef : float
Original effect size
input_type : string
Effect size type of ef. Must be 'r' or 'd'.
output_type : string
Desired effect size type.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
nx, ny : int, optional
Length of vector x and y.
nx and ny are required to convert to Hedges g
Returns
-------
ef : float
Desired converted effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
The formula to convert **r** to **d** is given in ref [1]:
.. math:: d = \\frac{2r}{\\sqrt{1 - r^2}}
The formula to convert **d** to **r** is given in ref [2]:
.. math::
r = \\frac{d}{\\sqrt{d^2 + \\frac{(n_x + n_y)^2 - 2(n_x + n_y)}
{n_xn_y}}}
The formula to convert **d** to :math:`\\eta^2` is given in ref [3]:
.. math:: \\eta^2 = \\frac{(0.5 * d)^2}{1 + (0.5 * d)^2}
The formula to convert **d** to an odds-ratio is given in ref [4]:
.. math:: OR = e(\\frac{d * \\pi}{\\sqrt{3}})
The formula to convert **d** to area under the curve is given in ref [5]:
.. math:: AUC = \\mathcal{N}_{cdf}(\\frac{d}{\\sqrt{2}})
References
----------
.. [1] Rosenthal, Robert. "Parametric measures of effect size."
The handbook of research synthesis 621 (1994): 231-244.
.. [2] McGrath, Robert E., and Gregory J. Meyer. "When effect sizes
disagree: the case of r and d." Psychological methods 11.4 (2006): 386.
.. [3] Cohen, Jacob. "Statistical power analysis for the behavioral
sciences. 2nd." (1988).
.. [4] Borenstein, Michael, et al. "Effect sizes for continuous data."
The handbook of research synthesis and meta-analysis 2 (2009): 221-235.
.. [5] Ruscio, John. "A probability-based measure of effect size:
Robustness to base rates and other factors." Psychological methods 1
3.1 (2008): 19.
Examples
--------
1. Convert from Cohen d to eta-square
>>> from pingouin import convert_effsize
>>> d = .45
>>> eta = convert_effsize(d, 'cohen', 'eta-square')
>>> print(eta)
0.048185603807257595
2. Convert from Cohen d to Hegdes g (requires the sample sizes of each
group)
>>> d = .45
>>> g = convert_effsize(d, 'cohen', 'hedges', nx=10, ny=10)
>>> print(g)
0.4309859154929578
3. Convert Pearson r to Cohen d
>>> r = 0.40
>>> d = convert_effsize(r, 'r', 'cohen')
>>> print(d)
0.8728715609439696
4. Reverse operation: convert Cohen d to Pearson r
>>> d = 0.873
>>> r = convert_effsize(d, 'cohen', 'r')
>>> print(r)
0.40004943911648533
"""
it = input_type.lower()
ot = output_type.lower()
# Check input and output type
for input in [it, ot]:
if not _check_eftype(input):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
if it not in ['r', 'cohen']:
raise ValueError("Input type must be 'r' or 'cohen'")
if it == ot:
return ef
d = (2 * ef) / np.sqrt(1 - ef**2) if it == 'r' else ef # Rosenthal 1994
# Then convert to the desired output type
if ot == 'cohen':
return d
elif ot == 'hedges':
if all(v is not None for v in [nx, ny]):
return d * (1 - (3 / (4 * (nx + ny) - 9)))
else:
# If shapes of x and y are not known, return cohen's d
warnings.warn("You need to pass nx and ny arguments to compute "
"Hedges g. Returning Cohen's d instead")
return d
elif ot == 'glass':
warnings.warn("Returning original effect size instead of Glass "
"because variance is not known.")
return ef
elif ot == 'r':
# McGrath and Meyer 2006
if all(v is not None for v in [nx, ny]):
a = ((nx + ny)**2 - 2 * (nx + ny)) / (nx * ny)
else:
a = 4
return d / np.sqrt(d**2 + a)
elif ot == 'eta-square':
# Cohen 1988
return (d / 2)**2 / (1 + (d / 2)**2)
elif ot == 'odds-ratio':
# Borenstein et al. 2009
return np.exp(d * np.pi / np.sqrt(3))
elif ot in ['auc', 'cles']:
# Ruscio 2008
from scipy.stats import norm
return norm.cdf(d / np.sqrt(2))
else:
return None | [
"def",
"convert_effsize",
"(",
"ef",
",",
"input_type",
",",
"output_type",
",",
"nx",
"=",
"None",
",",
"ny",
"=",
"None",
")",
":",
"it",
"=",
"input_type",
".",
"lower",
"(",
")",
"ot",
"=",
"output_type",
".",
"lower",
"(",
")",
"# Check input and ... | Conversion between effect sizes.
Parameters
----------
ef : float
Original effect size
input_type : string
Effect size type of ef. Must be 'r' or 'd'.
output_type : string
Desired effect size type.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
nx, ny : int, optional
Length of vector x and y.
nx and ny are required to convert to Hedges g
Returns
-------
ef : float
Desired converted effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
The formula to convert **r** to **d** is given in ref [1]:
.. math:: d = \\frac{2r}{\\sqrt{1 - r^2}}
The formula to convert **d** to **r** is given in ref [2]:
.. math::
r = \\frac{d}{\\sqrt{d^2 + \\frac{(n_x + n_y)^2 - 2(n_x + n_y)}
{n_xn_y}}}
The formula to convert **d** to :math:`\\eta^2` is given in ref [3]:
.. math:: \\eta^2 = \\frac{(0.5 * d)^2}{1 + (0.5 * d)^2}
The formula to convert **d** to an odds-ratio is given in ref [4]:
.. math:: OR = e(\\frac{d * \\pi}{\\sqrt{3}})
The formula to convert **d** to area under the curve is given in ref [5]:
.. math:: AUC = \\mathcal{N}_{cdf}(\\frac{d}{\\sqrt{2}})
References
----------
.. [1] Rosenthal, Robert. "Parametric measures of effect size."
The handbook of research synthesis 621 (1994): 231-244.
.. [2] McGrath, Robert E., and Gregory J. Meyer. "When effect sizes
disagree: the case of r and d." Psychological methods 11.4 (2006): 386.
.. [3] Cohen, Jacob. "Statistical power analysis for the behavioral
sciences. 2nd." (1988).
.. [4] Borenstein, Michael, et al. "Effect sizes for continuous data."
The handbook of research synthesis and meta-analysis 2 (2009): 221-235.
.. [5] Ruscio, John. "A probability-based measure of effect size:
Robustness to base rates and other factors." Psychological methods 1
3.1 (2008): 19.
Examples
--------
1. Convert from Cohen d to eta-square
>>> from pingouin import convert_effsize
>>> d = .45
>>> eta = convert_effsize(d, 'cohen', 'eta-square')
>>> print(eta)
0.048185603807257595
2. Convert from Cohen d to Hegdes g (requires the sample sizes of each
group)
>>> d = .45
>>> g = convert_effsize(d, 'cohen', 'hedges', nx=10, ny=10)
>>> print(g)
0.4309859154929578
3. Convert Pearson r to Cohen d
>>> r = 0.40
>>> d = convert_effsize(r, 'r', 'cohen')
>>> print(d)
0.8728715609439696
4. Reverse operation: convert Cohen d to Pearson r
>>> d = 0.873
>>> r = convert_effsize(d, 'cohen', 'r')
>>> print(r)
0.40004943911648533 | [
"Conversion",
"between",
"effect",
"sizes",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/effsize.py#L381-L539 | train | 206,628 |
raphaelvallat/pingouin | pingouin/effsize.py | compute_effsize | def compute_effsize(x, y, paired=False, eftype='cohen'):
"""Calculate effect size between two set of observations.
Parameters
----------
x : np.array or list
First set of observations.
y : np.array or list
Second set of observations.
paired : boolean
If True, uses Cohen d-avg formula to correct for repeated measurements
(Cumming 2012)
eftype : string
Desired output effect size.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'r' : correlation coefficient
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
'CLES' : Common language effect size
Returns
-------
ef : float
Effect size
See Also
--------
convert_effsize : Conversion between effect sizes.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
Missing values are automatically removed from the data. If ``x`` and ``y``
are paired, the entire row is removed.
If ``x`` and ``y`` are independent, the Cohen's d is:
.. math::
d = \\frac{\\overline{X} - \\overline{Y}}
{\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1)
\\sigma_{2}^{2}}{n1 + n2 - 2}}}
If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed:
.. math::
d_{avg} = \\frac{\\overline{X} - \\overline{Y}}
{0.5 * (\\sigma_1 + \\sigma_2)}
The Cohen’s d is a biased estimate of the population effect size,
especially for small samples (n < 20). It is often preferable
to use the corrected effect size, or Hedges’g, instead:
.. math:: g = d * (1 - \\frac{3}{4(n_1 + n_2) - 9})
If eftype = 'glass', the Glass :math:`\\delta` is reported, using the
group with the lowest variance as the control group:
.. math::
\\delta = \\frac{\\overline{X} - \\overline{Y}}{\\sigma_{control}}
References
----------
.. [1] Lakens, D., 2013. Calculating and reporting effect sizes to
facilitate cumulative science: a practical primer for t-tests and
ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863
.. [2] Cumming, Geoff. Understanding the new statistics: Effect sizes,
confidence intervals, and meta-analysis. Routledge, 2013.
Examples
--------
1. Compute Cohen d from two independent set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, size=100)
>>> y = np.random.normal(2.3, size=95)
>>> d = compute_effsize(x=x, y=y, eftype='cohen', paired=False)
>>> print(d)
-0.2835170152506578
2. Compute Hedges g from two paired set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> x = [1.62, 2.21, 3.79, 1.66, 1.86, 1.87, 4.51, 4.49, 3.3 , 2.69]
>>> y = [0.91, 3., 2.28, 0.49, 1.42, 3.65, -0.43, 1.57, 3.27, 1.13]
>>> g = compute_effsize(x=x, y=y, eftype='hedges', paired=True)
>>> print(g)
0.8370985097811404
3. Compute Glass delta from two independent set of observations. The group
with the lowest variance will automatically be selected as the control.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, scale=1, size=50)
>>> y = np.random.normal(2, scale=2, size=45)
>>> d = compute_effsize(x=x, y=y, eftype='glass')
>>> print(d)
-0.1170721973604153
"""
# Check arguments
if not _check_eftype(eftype):
err = "Could not interpret input '{}'".format(eftype)
raise ValueError(err)
x = np.asarray(x)
y = np.asarray(y)
if x.size != y.size and paired:
warnings.warn("x and y have unequal sizes. Switching to "
"paired == False.")
paired = False
# Remove rows with missing values
x, y = remove_na(x, y, paired=paired)
nx, ny = x.size, y.size
if ny == 1:
# Case 1: One-sample Test
d = (x.mean() - y) / x.std(ddof=1)
return d
if eftype.lower() == 'glass':
# Find group with lowest variance
sd_control = np.min([x.std(ddof=1), y.std(ddof=1)])
d = (x.mean() - y.mean()) / sd_control
return d
elif eftype.lower() == 'r':
# Return correlation coefficient (useful for CI bootstrapping)
from scipy.stats import pearsonr
r, _ = pearsonr(x, y)
return r
elif eftype.lower() == 'cles':
# Compute exact CLES
diff = x[:, None] - y
return max((diff < 0).sum(), (diff > 0).sum()) / diff.size
else:
# Test equality of variance of data with a stringent threshold
# equal_var, p = homoscedasticity(x, y, alpha=.001)
# if not equal_var:
# print('Unequal variances (p<.001). You should report',
# 'Glass delta instead.')
# Compute unbiased Cohen's d effect size
if not paired:
# https://en.wikipedia.org/wiki/Effect_size
dof = nx + ny - 2
poolsd = np.sqrt(((nx - 1) * x.var(ddof=1)
+ (ny - 1) * y.var(ddof=1)) / dof)
d = (x.mean() - y.mean()) / poolsd
else:
# Report Cohen d-avg (Cumming 2012; Lakens 2013)
d = (x.mean() - y.mean()) / (.5 * (x.std(ddof=1)
+ y.std(ddof=1)))
return convert_effsize(d, 'cohen', eftype, nx=nx, ny=ny) | python | def compute_effsize(x, y, paired=False, eftype='cohen'):
"""Calculate effect size between two set of observations.
Parameters
----------
x : np.array or list
First set of observations.
y : np.array or list
Second set of observations.
paired : boolean
If True, uses Cohen d-avg formula to correct for repeated measurements
(Cumming 2012)
eftype : string
Desired output effect size.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'r' : correlation coefficient
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
'CLES' : Common language effect size
Returns
-------
ef : float
Effect size
See Also
--------
convert_effsize : Conversion between effect sizes.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
Missing values are automatically removed from the data. If ``x`` and ``y``
are paired, the entire row is removed.
If ``x`` and ``y`` are independent, the Cohen's d is:
.. math::
d = \\frac{\\overline{X} - \\overline{Y}}
{\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1)
\\sigma_{2}^{2}}{n1 + n2 - 2}}}
If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed:
.. math::
d_{avg} = \\frac{\\overline{X} - \\overline{Y}}
{0.5 * (\\sigma_1 + \\sigma_2)}
The Cohen’s d is a biased estimate of the population effect size,
especially for small samples (n < 20). It is often preferable
to use the corrected effect size, or Hedges’g, instead:
.. math:: g = d * (1 - \\frac{3}{4(n_1 + n_2) - 9})
If eftype = 'glass', the Glass :math:`\\delta` is reported, using the
group with the lowest variance as the control group:
.. math::
\\delta = \\frac{\\overline{X} - \\overline{Y}}{\\sigma_{control}}
References
----------
.. [1] Lakens, D., 2013. Calculating and reporting effect sizes to
facilitate cumulative science: a practical primer for t-tests and
ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863
.. [2] Cumming, Geoff. Understanding the new statistics: Effect sizes,
confidence intervals, and meta-analysis. Routledge, 2013.
Examples
--------
1. Compute Cohen d from two independent set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, size=100)
>>> y = np.random.normal(2.3, size=95)
>>> d = compute_effsize(x=x, y=y, eftype='cohen', paired=False)
>>> print(d)
-0.2835170152506578
2. Compute Hedges g from two paired set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> x = [1.62, 2.21, 3.79, 1.66, 1.86, 1.87, 4.51, 4.49, 3.3 , 2.69]
>>> y = [0.91, 3., 2.28, 0.49, 1.42, 3.65, -0.43, 1.57, 3.27, 1.13]
>>> g = compute_effsize(x=x, y=y, eftype='hedges', paired=True)
>>> print(g)
0.8370985097811404
3. Compute Glass delta from two independent set of observations. The group
with the lowest variance will automatically be selected as the control.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, scale=1, size=50)
>>> y = np.random.normal(2, scale=2, size=45)
>>> d = compute_effsize(x=x, y=y, eftype='glass')
>>> print(d)
-0.1170721973604153
"""
# Check arguments
if not _check_eftype(eftype):
err = "Could not interpret input '{}'".format(eftype)
raise ValueError(err)
x = np.asarray(x)
y = np.asarray(y)
if x.size != y.size and paired:
warnings.warn("x and y have unequal sizes. Switching to "
"paired == False.")
paired = False
# Remove rows with missing values
x, y = remove_na(x, y, paired=paired)
nx, ny = x.size, y.size
if ny == 1:
# Case 1: One-sample Test
d = (x.mean() - y) / x.std(ddof=1)
return d
if eftype.lower() == 'glass':
# Find group with lowest variance
sd_control = np.min([x.std(ddof=1), y.std(ddof=1)])
d = (x.mean() - y.mean()) / sd_control
return d
elif eftype.lower() == 'r':
# Return correlation coefficient (useful for CI bootstrapping)
from scipy.stats import pearsonr
r, _ = pearsonr(x, y)
return r
elif eftype.lower() == 'cles':
# Compute exact CLES
diff = x[:, None] - y
return max((diff < 0).sum(), (diff > 0).sum()) / diff.size
else:
# Test equality of variance of data with a stringent threshold
# equal_var, p = homoscedasticity(x, y, alpha=.001)
# if not equal_var:
# print('Unequal variances (p<.001). You should report',
# 'Glass delta instead.')
# Compute unbiased Cohen's d effect size
if not paired:
# https://en.wikipedia.org/wiki/Effect_size
dof = nx + ny - 2
poolsd = np.sqrt(((nx - 1) * x.var(ddof=1)
+ (ny - 1) * y.var(ddof=1)) / dof)
d = (x.mean() - y.mean()) / poolsd
else:
# Report Cohen d-avg (Cumming 2012; Lakens 2013)
d = (x.mean() - y.mean()) / (.5 * (x.std(ddof=1)
+ y.std(ddof=1)))
return convert_effsize(d, 'cohen', eftype, nx=nx, ny=ny) | [
"def",
"compute_effsize",
"(",
"x",
",",
"y",
",",
"paired",
"=",
"False",
",",
"eftype",
"=",
"'cohen'",
")",
":",
"# Check arguments",
"if",
"not",
"_check_eftype",
"(",
"eftype",
")",
":",
"err",
"=",
"\"Could not interpret input '{}'\"",
".",
"format",
"... | Calculate effect size between two set of observations.
Parameters
----------
x : np.array or list
First set of observations.
y : np.array or list
Second set of observations.
paired : boolean
If True, uses Cohen d-avg formula to correct for repeated measurements
(Cumming 2012)
eftype : string
Desired output effect size.
Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'r' : correlation coefficient
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
'CLES' : Common language effect size
Returns
-------
ef : float
Effect size
See Also
--------
convert_effsize : Conversion between effect sizes.
compute_effsize_from_t : Convert a T-statistic to an effect size.
Notes
-----
Missing values are automatically removed from the data. If ``x`` and ``y``
are paired, the entire row is removed.
If ``x`` and ``y`` are independent, the Cohen's d is:
.. math::
d = \\frac{\\overline{X} - \\overline{Y}}
{\\sqrt{\\frac{(n_{1} - 1)\\sigma_{1}^{2} + (n_{2} - 1)
\\sigma_{2}^{2}}{n1 + n2 - 2}}}
If ``x`` and ``y`` are paired, the Cohen :math:`d_{avg}` is computed:
.. math::
d_{avg} = \\frac{\\overline{X} - \\overline{Y}}
{0.5 * (\\sigma_1 + \\sigma_2)}
The Cohen’s d is a biased estimate of the population effect size,
especially for small samples (n < 20). It is often preferable
to use the corrected effect size, or Hedges’g, instead:
.. math:: g = d * (1 - \\frac{3}{4(n_1 + n_2) - 9})
If eftype = 'glass', the Glass :math:`\\delta` is reported, using the
group with the lowest variance as the control group:
.. math::
\\delta = \\frac{\\overline{X} - \\overline{Y}}{\\sigma_{control}}
References
----------
.. [1] Lakens, D., 2013. Calculating and reporting effect sizes to
facilitate cumulative science: a practical primer for t-tests and
ANOVAs. Front. Psychol. 4, 863. https://doi.org/10.3389/fpsyg.2013.00863
.. [2] Cumming, Geoff. Understanding the new statistics: Effect sizes,
confidence intervals, and meta-analysis. Routledge, 2013.
Examples
--------
1. Compute Cohen d from two independent set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, size=100)
>>> y = np.random.normal(2.3, size=95)
>>> d = compute_effsize(x=x, y=y, eftype='cohen', paired=False)
>>> print(d)
-0.2835170152506578
2. Compute Hedges g from two paired set of observations.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> x = [1.62, 2.21, 3.79, 1.66, 1.86, 1.87, 4.51, 4.49, 3.3 , 2.69]
>>> y = [0.91, 3., 2.28, 0.49, 1.42, 3.65, -0.43, 1.57, 3.27, 1.13]
>>> g = compute_effsize(x=x, y=y, eftype='hedges', paired=True)
>>> print(g)
0.8370985097811404
3. Compute Glass delta from two independent set of observations. The group
with the lowest variance will automatically be selected as the control.
>>> import numpy as np
>>> from pingouin import compute_effsize
>>> np.random.seed(123)
>>> x = np.random.normal(2, scale=1, size=50)
>>> y = np.random.normal(2, scale=2, size=45)
>>> d = compute_effsize(x=x, y=y, eftype='glass')
>>> print(d)
-0.1170721973604153 | [
"Calculate",
"effect",
"size",
"between",
"two",
"set",
"of",
"observations",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/effsize.py#L542-L708 | train | 206,629 |
raphaelvallat/pingouin | pingouin/effsize.py | compute_effsize_from_t | def compute_effsize_from_t(tval, nx=None, ny=None, N=None, eftype='cohen'):
"""Compute effect size from a T-value.
Parameters
----------
tval : float
T-value
nx, ny : int, optional
Group sample sizes.
N : int, optional
Total sample size (will not be used if nx and ny are specified)
eftype : string, optional
desired output effect size
Returns
-------
ef : float
Effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
convert_effsize : Conversion between effect sizes.
Notes
-----
If both nx and ny are specified, the formula to convert from *t* to *d* is:
.. math:: d = t * \\sqrt{\\frac{1}{n_x} + \\frac{1}{n_y}}
If only N (total sample size) is specified, the formula is:
.. math:: d = \\frac{2t}{\\sqrt{N}}
Examples
--------
1. Compute effect size from a T-value when both sample sizes are known.
>>> from pingouin import compute_effsize_from_t
>>> tval, nx, ny = 2.90, 35, 25
>>> d = compute_effsize_from_t(tval, nx=nx, ny=ny, eftype='cohen')
>>> print(d)
0.7593982580212534
2. Compute effect size when only total sample size is known (nx+ny)
>>> tval, N = 2.90, 60
>>> d = compute_effsize_from_t(tval, N=N, eftype='cohen')
>>> print(d)
0.7487767802667672
"""
if not _check_eftype(eftype):
err = "Could not interpret input '{}'".format(eftype)
raise ValueError(err)
if not isinstance(tval, float):
err = "T-value must be float"
raise ValueError(err)
# Compute Cohen d (Lakens, 2013)
if nx is not None and ny is not None:
d = tval * np.sqrt(1 / nx + 1 / ny)
elif N is not None:
d = 2 * tval / np.sqrt(N)
else:
raise ValueError('You must specify either nx + ny, or just N')
return convert_effsize(d, 'cohen', eftype, nx=nx, ny=ny) | python | def compute_effsize_from_t(tval, nx=None, ny=None, N=None, eftype='cohen'):
"""Compute effect size from a T-value.
Parameters
----------
tval : float
T-value
nx, ny : int, optional
Group sample sizes.
N : int, optional
Total sample size (will not be used if nx and ny are specified)
eftype : string, optional
desired output effect size
Returns
-------
ef : float
Effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
convert_effsize : Conversion between effect sizes.
Notes
-----
If both nx and ny are specified, the formula to convert from *t* to *d* is:
.. math:: d = t * \\sqrt{\\frac{1}{n_x} + \\frac{1}{n_y}}
If only N (total sample size) is specified, the formula is:
.. math:: d = \\frac{2t}{\\sqrt{N}}
Examples
--------
1. Compute effect size from a T-value when both sample sizes are known.
>>> from pingouin import compute_effsize_from_t
>>> tval, nx, ny = 2.90, 35, 25
>>> d = compute_effsize_from_t(tval, nx=nx, ny=ny, eftype='cohen')
>>> print(d)
0.7593982580212534
2. Compute effect size when only total sample size is known (nx+ny)
>>> tval, N = 2.90, 60
>>> d = compute_effsize_from_t(tval, N=N, eftype='cohen')
>>> print(d)
0.7487767802667672
"""
if not _check_eftype(eftype):
err = "Could not interpret input '{}'".format(eftype)
raise ValueError(err)
if not isinstance(tval, float):
err = "T-value must be float"
raise ValueError(err)
# Compute Cohen d (Lakens, 2013)
if nx is not None and ny is not None:
d = tval * np.sqrt(1 / nx + 1 / ny)
elif N is not None:
d = 2 * tval / np.sqrt(N)
else:
raise ValueError('You must specify either nx + ny, or just N')
return convert_effsize(d, 'cohen', eftype, nx=nx, ny=ny) | [
"def",
"compute_effsize_from_t",
"(",
"tval",
",",
"nx",
"=",
"None",
",",
"ny",
"=",
"None",
",",
"N",
"=",
"None",
",",
"eftype",
"=",
"'cohen'",
")",
":",
"if",
"not",
"_check_eftype",
"(",
"eftype",
")",
":",
"err",
"=",
"\"Could not interpret input ... | Compute effect size from a T-value.
Parameters
----------
tval : float
T-value
nx, ny : int, optional
Group sample sizes.
N : int, optional
Total sample size (will not be used if nx and ny are specified)
eftype : string, optional
desired output effect size
Returns
-------
ef : float
Effect size
See Also
--------
compute_effsize : Calculate effect size between two set of observations.
convert_effsize : Conversion between effect sizes.
Notes
-----
If both nx and ny are specified, the formula to convert from *t* to *d* is:
.. math:: d = t * \\sqrt{\\frac{1}{n_x} + \\frac{1}{n_y}}
If only N (total sample size) is specified, the formula is:
.. math:: d = \\frac{2t}{\\sqrt{N}}
Examples
--------
1. Compute effect size from a T-value when both sample sizes are known.
>>> from pingouin import compute_effsize_from_t
>>> tval, nx, ny = 2.90, 35, 25
>>> d = compute_effsize_from_t(tval, nx=nx, ny=ny, eftype='cohen')
>>> print(d)
0.7593982580212534
2. Compute effect size when only total sample size is known (nx+ny)
>>> tval, N = 2.90, 60
>>> d = compute_effsize_from_t(tval, N=N, eftype='cohen')
>>> print(d)
0.7487767802667672 | [
"Compute",
"effect",
"size",
"from",
"a",
"T",
"-",
"value",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/effsize.py#L711-L778 | train | 206,630 |
raphaelvallat/pingouin | pingouin/correlation.py | bsmahal | def bsmahal(a, b, n_boot=200):
"""
Bootstraps Mahalanobis distances for Shepherd's pi correlation.
Parameters
----------
a : ndarray (shape=(n, 2))
Data
b : ndarray (shape=(n, 2))
Data
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
m : ndarray (shape=(n,))
Mahalanobis distance for each row in a, averaged across all the
bootstrap resamples.
"""
n, m = b.shape
MD = np.zeros((n, n_boot))
nr = np.arange(n)
xB = np.random.choice(nr, size=(n_boot, n), replace=True)
# Bootstrap the MD
for i in np.arange(n_boot):
s1 = b[xB[i, :], 0]
s2 = b[xB[i, :], 1]
X = np.column_stack((s1, s2))
mu = X.mean(0)
_, R = np.linalg.qr(X - mu)
sol = np.linalg.solve(R.T, (a - mu).T)
MD[:, i] = np.sum(sol**2, 0) * (n - 1)
# Average across all bootstraps
return MD.mean(1) | python | def bsmahal(a, b, n_boot=200):
"""
Bootstraps Mahalanobis distances for Shepherd's pi correlation.
Parameters
----------
a : ndarray (shape=(n, 2))
Data
b : ndarray (shape=(n, 2))
Data
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
m : ndarray (shape=(n,))
Mahalanobis distance for each row in a, averaged across all the
bootstrap resamples.
"""
n, m = b.shape
MD = np.zeros((n, n_boot))
nr = np.arange(n)
xB = np.random.choice(nr, size=(n_boot, n), replace=True)
# Bootstrap the MD
for i in np.arange(n_boot):
s1 = b[xB[i, :], 0]
s2 = b[xB[i, :], 1]
X = np.column_stack((s1, s2))
mu = X.mean(0)
_, R = np.linalg.qr(X - mu)
sol = np.linalg.solve(R.T, (a - mu).T)
MD[:, i] = np.sum(sol**2, 0) * (n - 1)
# Average across all bootstraps
return MD.mean(1) | [
"def",
"bsmahal",
"(",
"a",
",",
"b",
",",
"n_boot",
"=",
"200",
")",
":",
"n",
",",
"m",
"=",
"b",
".",
"shape",
"MD",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n_boot",
")",
")",
"nr",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"xB",
... | Bootstraps Mahalanobis distances for Shepherd's pi correlation.
Parameters
----------
a : ndarray (shape=(n, 2))
Data
b : ndarray (shape=(n, 2))
Data
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
m : ndarray (shape=(n,))
Mahalanobis distance for each row in a, averaged across all the
bootstrap resamples. | [
"Bootstraps",
"Mahalanobis",
"distances",
"for",
"Shepherd",
"s",
"pi",
"correlation",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L110-L145 | train | 206,631 |
raphaelvallat/pingouin | pingouin/correlation.py | shepherd | def shepherd(x, y, n_boot=200):
"""
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal.
"""
from scipy.stats import spearmanr
X = np.column_stack((x, y))
# Bootstrapping on Mahalanobis distance
m = bsmahal(X, X, n_boot)
# Determine outliers
outliers = (m >= 6)
# Compute correlation
r, pval = spearmanr(x[~outliers], y[~outliers])
# (optional) double the p-value to achieve a nominal false alarm rate
# pval *= 2
# pval = 1 if pval > 1 else pval
return r, pval, outliers | python | def shepherd(x, y, n_boot=200):
"""
Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal.
"""
from scipy.stats import spearmanr
X = np.column_stack((x, y))
# Bootstrapping on Mahalanobis distance
m = bsmahal(X, X, n_boot)
# Determine outliers
outliers = (m >= 6)
# Compute correlation
r, pval = spearmanr(x[~outliers], y[~outliers])
# (optional) double the p-value to achieve a nominal false alarm rate
# pval *= 2
# pval = 1 if pval > 1 else pval
return r, pval, outliers | [
"def",
"shepherd",
"(",
"x",
",",
"y",
",",
"n_boot",
"=",
"200",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"spearmanr",
"X",
"=",
"np",
".",
"column_stack",
"(",
"(",
"x",
",",
"y",
")",
")",
"# Bootstrapping on Mahalanobis distance",
"m",
"=... | Shepherd's Pi correlation, equivalent to Spearman's rho after outliers
removal.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
n_boot : int
Number of bootstrap samples to calculate.
Returns
-------
r : float
Pi correlation coefficient
pval : float
Two-tailed adjusted p-value.
outliers : array of bool
Indicate if value is an outlier or not
Notes
-----
It first bootstraps the Mahalanobis distances, removes all observations
with m >= 6 and finally calculates the correlation of the remaining data.
Pi is Spearman's Rho after outlier removal. | [
"Shepherd",
"s",
"Pi",
"correlation",
"equivalent",
"to",
"Spearman",
"s",
"rho",
"after",
"outliers",
"removal",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L148-L193 | train | 206,632 |
raphaelvallat/pingouin | pingouin/correlation.py | rm_corr | def rm_corr(data=None, x=None, y=None, subject=None, tail='two-sided'):
"""Repeated measures correlation.
Parameters
----------
data : pd.DataFrame
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'r' : Repeated measures correlation coefficient
'dof' : Degrees of freedom
'pval' : one or two tailed p-value
'CI95' : 95% parametric confidence intervals
'power' : achieved power of the test (= 1 - type II error).
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
From Bakdash and Marusich (2017):
"Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables."
Results have been tested against the `rmcorr` R package.
Please note that NaN are automatically removed from the dataframe
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
.. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating
correlation coefficients with repeated observations:
Part 1—correlation within subjects. Bmj, 310(6977), 446.
.. [3] https://github.com/cran/rmcorr
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93
"""
from pingouin import ancova, power_corr
# Safety checks
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame'
assert x in data, 'The %s column is not in data.' % x
assert y in data, 'The %s column is not in data.' % y
assert subject in data, 'The %s column is not in data.' % subject
if data[subject].nunique() < 3:
raise ValueError('rm_corr requires at least 3 unique subjects.')
# Remove missing values
data = data[[x, y, subject]].dropna(axis=0)
# Using PINGOUIN
aov, bw = ancova(dv=y, covar=x, between=subject, data=data,
return_bw=True)
sign = np.sign(bw)
dof = int(aov.loc[2, 'DF'])
n = dof + 2
ssfactor = aov.loc[1, 'SS']
sserror = aov.loc[2, 'SS']
rm = sign * np.sqrt(ssfactor / (ssfactor + sserror))
pval = aov.loc[1, 'p-unc']
pval *= 0.5 if tail == 'one-sided' else 1
ci = compute_esci(stat=rm, nx=n, eftype='pearson').tolist()
pwr = power_corr(r=rm, n=n, tail=tail)
# Convert to Dataframe
stats = pd.DataFrame({"r": round(rm, 3), "dof": int(dof),
"pval": pval, "CI95%": str(ci),
"power": round(pwr, 3)}, index=["rm_corr"])
return stats | python | def rm_corr(data=None, x=None, y=None, subject=None, tail='two-sided'):
"""Repeated measures correlation.
Parameters
----------
data : pd.DataFrame
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'r' : Repeated measures correlation coefficient
'dof' : Degrees of freedom
'pval' : one or two tailed p-value
'CI95' : 95% parametric confidence intervals
'power' : achieved power of the test (= 1 - type II error).
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
From Bakdash and Marusich (2017):
"Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables."
Results have been tested against the `rmcorr` R package.
Please note that NaN are automatically removed from the dataframe
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
.. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating
correlation coefficients with repeated observations:
Part 1—correlation within subjects. Bmj, 310(6977), 446.
.. [3] https://github.com/cran/rmcorr
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93
"""
from pingouin import ancova, power_corr
# Safety checks
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame'
assert x in data, 'The %s column is not in data.' % x
assert y in data, 'The %s column is not in data.' % y
assert subject in data, 'The %s column is not in data.' % subject
if data[subject].nunique() < 3:
raise ValueError('rm_corr requires at least 3 unique subjects.')
# Remove missing values
data = data[[x, y, subject]].dropna(axis=0)
# Using PINGOUIN
aov, bw = ancova(dv=y, covar=x, between=subject, data=data,
return_bw=True)
sign = np.sign(bw)
dof = int(aov.loc[2, 'DF'])
n = dof + 2
ssfactor = aov.loc[1, 'SS']
sserror = aov.loc[2, 'SS']
rm = sign * np.sqrt(ssfactor / (ssfactor + sserror))
pval = aov.loc[1, 'p-unc']
pval *= 0.5 if tail == 'one-sided' else 1
ci = compute_esci(stat=rm, nx=n, eftype='pearson').tolist()
pwr = power_corr(r=rm, n=n, tail=tail)
# Convert to Dataframe
stats = pd.DataFrame({"r": round(rm, 3), "dof": int(dof),
"pval": pval, "CI95%": str(ci),
"power": round(pwr, 3)}, index=["rm_corr"])
return stats | [
"def",
"rm_corr",
"(",
"data",
"=",
"None",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"from",
"pingouin",
"import",
"ancova",
",",
"power_corr",
"# Safety checks",
"assert",
"i... | Repeated measures correlation.
Parameters
----------
data : pd.DataFrame
Dataframe.
x, y : string
Name of columns in ``data`` containing the two dependent variables.
subject : string
Name of column in ``data`` containing the subject indicator.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'r' : Repeated measures correlation coefficient
'dof' : Degrees of freedom
'pval' : one or two tailed p-value
'CI95' : 95% parametric confidence intervals
'power' : achieved power of the test (= 1 - type II error).
Notes
-----
Repeated measures correlation (rmcorr) is a statistical technique
for determining the common within-individual association for paired
measures assessed on two or more occasions for multiple individuals.
From Bakdash and Marusich (2017):
"Rmcorr accounts for non-independence among observations using analysis
of covariance (ANCOVA) to statistically adjust for inter-individual
variability. By removing measured variance between-participants,
rmcorr provides the best linear fit for each participant using parallel
regression lines (the same slope) with varying intercepts.
Like a Pearson correlation coefficient, the rmcorr coefficient
is bounded by − 1 to 1 and represents the strength of the linear
association between two variables."
Results have been tested against the `rmcorr` R package.
Please note that NaN are automatically removed from the dataframe
(listwise deletion).
References
----------
.. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation.
Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456
.. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating
correlation coefficients with repeated observations:
Part 1—correlation within subjects. Bmj, 310(6977), 446.
.. [3] https://github.com/cran/rmcorr
Examples
--------
>>> import pingouin as pg
>>> df = pg.read_dataset('rm_corr')
>>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject')
r dof pval CI95% power
rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93 | [
"Repeated",
"measures",
"correlation",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L664-L758 | train | 206,633 |
raphaelvallat/pingouin | pingouin/correlation.py | _dcorr | def _dcorr(y, n2, A, dcov2_xx):
"""Helper function for distance correlation bootstrapping.
"""
# Pairwise Euclidean distances
b = squareform(pdist(y, metric='euclidean'))
# Double centering
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
# Compute squared distance covariances
dcov2_yy = np.vdot(B, B) / n2
dcov2_xy = np.vdot(A, B) / n2
return np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy)) | python | def _dcorr(y, n2, A, dcov2_xx):
"""Helper function for distance correlation bootstrapping.
"""
# Pairwise Euclidean distances
b = squareform(pdist(y, metric='euclidean'))
# Double centering
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
# Compute squared distance covariances
dcov2_yy = np.vdot(B, B) / n2
dcov2_xy = np.vdot(A, B) / n2
return np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy)) | [
"def",
"_dcorr",
"(",
"y",
",",
"n2",
",",
"A",
",",
"dcov2_xx",
")",
":",
"# Pairwise Euclidean distances",
"b",
"=",
"squareform",
"(",
"pdist",
"(",
"y",
",",
"metric",
"=",
"'euclidean'",
")",
")",
"# Double centering",
"B",
"=",
"b",
"-",
"b",
"."... | Helper function for distance correlation bootstrapping. | [
"Helper",
"function",
"for",
"distance",
"correlation",
"bootstrapping",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L761-L771 | train | 206,634 |
raphaelvallat/pingouin | pingouin/correlation.py | distance_corr | def distance_corr(x, y, tail='upper', n_boot=1000, seed=None):
"""Distance correlation between two arrays.
Statistical significance (p-value) is evaluated with a permutation test.
Parameters
----------
x, y : np.ndarray
1D or 2D input arrays, shape (n_samples, n_features).
x and y must have the same number of samples and must not
contain missing values.
tail : str
Tail for p-value ::
'upper' : one-sided (upper tail)
'lower' : one-sided (lower tail)
'two-sided' : two-sided
n_boot : int or None
Number of bootstrap to perform.
If None, no bootstrapping is performed and the function
only returns the distance correlation (no p-value).
Default is 1000 (thus giving a precision of 0.001).
seed : int or None
Random state seed.
Returns
-------
dcor : float
Sample distance correlation (range from 0 to 1).
pval : float
P-value
Notes
-----
From Wikipedia:
*Distance correlation is a measure of dependence between two paired
random vectors of arbitrary, not necessarily equal, dimension. The
distance correlation coefficient is zero if and only if the random vectors
are independent. Thus, distance correlation measures both linear and
nonlinear association between two random variables or random vectors.
This is in contrast to Pearson's correlation, which can only detect
linear association between two random variables.*
The distance correlation of two random variables is obtained by
dividing their distance covariance by the product of their distance
standard deviations:
.. math::
\\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)}
{\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}}
where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic
average of the product of the double-centered pairwise Euclidean distance
matrices.
Note that by contrast to Pearson's correlation, the distance correlation
cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`.
Results have been tested against the 'energy' R package. To be consistent
with this latter, only the one-sided p-value is computed, i.e. the upper
tail of the T-statistic.
References
----------
.. [1] https://en.wikipedia.org/wiki/Distance_correlation
.. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007).
Measuring and testing dependence by correlation of distances.
The annals of statistics, 35(6), 2769-2794.
.. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941
.. [4] https://gist.github.com/wladston/c931b1495184fbb99bec
.. [5] https://cran.r-project.org/web/packages/energy/energy.pdf
Examples
--------
1. With two 1D vectors
>>> from pingouin import distance_corr
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 9, 4, 4]
>>> distance_corr(a, b, seed=9)
(0.7626762424168667, 0.312)
2. With two 2D arrays and no p-value
>>> import numpy as np
>>> np.random.seed(123)
>>> from pingouin import distance_corr
>>> a = np.random.random((10, 10))
>>> b = np.random.random((10, 10))
>>> distance_corr(a, b, n_boot=None)
0.8799633012275321
"""
assert tail in ['upper', 'lower', 'two-sided'], 'Wrong tail argument.'
x = np.asarray(x)
y = np.asarray(y)
# Check for NaN values
if any([np.isnan(np.min(x)), np.isnan(np.min(y))]):
raise ValueError('Input arrays must not contain NaN values.')
if x.ndim == 1:
x = x[:, None]
if y.ndim == 1:
y = y[:, None]
assert x.shape[0] == y.shape[0], 'x and y must have same number of samples'
# Extract number of samples
n = x.shape[0]
n2 = n**2
# Process first array to avoid redundancy when performing bootstrap
a = squareform(pdist(x, metric='euclidean'))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
dcov2_xx = np.vdot(A, A) / n2
# Process second array and compute final distance correlation
dcor = _dcorr(y, n2, A, dcov2_xx)
# Compute one-sided p-value using a bootstrap procedure
if n_boot is not None and n_boot > 1:
# Define random seed and permutation
rng = np.random.RandomState(seed)
bootsam = rng.random_sample((n_boot, n)).argsort(axis=1)
bootstat = np.empty(n_boot)
for i in range(n_boot):
bootstat[i] = _dcorr(y[bootsam[i, :]], n2, A, dcov2_xx)
pval = _perm_pval(bootstat, dcor, tail=tail)
return dcor, pval
else:
return dcor | python | def distance_corr(x, y, tail='upper', n_boot=1000, seed=None):
"""Distance correlation between two arrays.
Statistical significance (p-value) is evaluated with a permutation test.
Parameters
----------
x, y : np.ndarray
1D or 2D input arrays, shape (n_samples, n_features).
x and y must have the same number of samples and must not
contain missing values.
tail : str
Tail for p-value ::
'upper' : one-sided (upper tail)
'lower' : one-sided (lower tail)
'two-sided' : two-sided
n_boot : int or None
Number of bootstrap to perform.
If None, no bootstrapping is performed and the function
only returns the distance correlation (no p-value).
Default is 1000 (thus giving a precision of 0.001).
seed : int or None
Random state seed.
Returns
-------
dcor : float
Sample distance correlation (range from 0 to 1).
pval : float
P-value
Notes
-----
From Wikipedia:
*Distance correlation is a measure of dependence between two paired
random vectors of arbitrary, not necessarily equal, dimension. The
distance correlation coefficient is zero if and only if the random vectors
are independent. Thus, distance correlation measures both linear and
nonlinear association between two random variables or random vectors.
This is in contrast to Pearson's correlation, which can only detect
linear association between two random variables.*
The distance correlation of two random variables is obtained by
dividing their distance covariance by the product of their distance
standard deviations:
.. math::
\\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)}
{\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}}
where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic
average of the product of the double-centered pairwise Euclidean distance
matrices.
Note that by contrast to Pearson's correlation, the distance correlation
cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`.
Results have been tested against the 'energy' R package. To be consistent
with this latter, only the one-sided p-value is computed, i.e. the upper
tail of the T-statistic.
References
----------
.. [1] https://en.wikipedia.org/wiki/Distance_correlation
.. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007).
Measuring and testing dependence by correlation of distances.
The annals of statistics, 35(6), 2769-2794.
.. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941
.. [4] https://gist.github.com/wladston/c931b1495184fbb99bec
.. [5] https://cran.r-project.org/web/packages/energy/energy.pdf
Examples
--------
1. With two 1D vectors
>>> from pingouin import distance_corr
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 9, 4, 4]
>>> distance_corr(a, b, seed=9)
(0.7626762424168667, 0.312)
2. With two 2D arrays and no p-value
>>> import numpy as np
>>> np.random.seed(123)
>>> from pingouin import distance_corr
>>> a = np.random.random((10, 10))
>>> b = np.random.random((10, 10))
>>> distance_corr(a, b, n_boot=None)
0.8799633012275321
"""
assert tail in ['upper', 'lower', 'two-sided'], 'Wrong tail argument.'
x = np.asarray(x)
y = np.asarray(y)
# Check for NaN values
if any([np.isnan(np.min(x)), np.isnan(np.min(y))]):
raise ValueError('Input arrays must not contain NaN values.')
if x.ndim == 1:
x = x[:, None]
if y.ndim == 1:
y = y[:, None]
assert x.shape[0] == y.shape[0], 'x and y must have same number of samples'
# Extract number of samples
n = x.shape[0]
n2 = n**2
# Process first array to avoid redundancy when performing bootstrap
a = squareform(pdist(x, metric='euclidean'))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
dcov2_xx = np.vdot(A, A) / n2
# Process second array and compute final distance correlation
dcor = _dcorr(y, n2, A, dcov2_xx)
# Compute one-sided p-value using a bootstrap procedure
if n_boot is not None and n_boot > 1:
# Define random seed and permutation
rng = np.random.RandomState(seed)
bootsam = rng.random_sample((n_boot, n)).argsort(axis=1)
bootstat = np.empty(n_boot)
for i in range(n_boot):
bootstat[i] = _dcorr(y[bootsam[i, :]], n2, A, dcov2_xx)
pval = _perm_pval(bootstat, dcor, tail=tail)
return dcor, pval
else:
return dcor | [
"def",
"distance_corr",
"(",
"x",
",",
"y",
",",
"tail",
"=",
"'upper'",
",",
"n_boot",
"=",
"1000",
",",
"seed",
"=",
"None",
")",
":",
"assert",
"tail",
"in",
"[",
"'upper'",
",",
"'lower'",
",",
"'two-sided'",
"]",
",",
"'Wrong tail argument.'",
"x"... | Distance correlation between two arrays.
Statistical significance (p-value) is evaluated with a permutation test.
Parameters
----------
x, y : np.ndarray
1D or 2D input arrays, shape (n_samples, n_features).
x and y must have the same number of samples and must not
contain missing values.
tail : str
Tail for p-value ::
'upper' : one-sided (upper tail)
'lower' : one-sided (lower tail)
'two-sided' : two-sided
n_boot : int or None
Number of bootstrap to perform.
If None, no bootstrapping is performed and the function
only returns the distance correlation (no p-value).
Default is 1000 (thus giving a precision of 0.001).
seed : int or None
Random state seed.
Returns
-------
dcor : float
Sample distance correlation (range from 0 to 1).
pval : float
P-value
Notes
-----
From Wikipedia:
*Distance correlation is a measure of dependence between two paired
random vectors of arbitrary, not necessarily equal, dimension. The
distance correlation coefficient is zero if and only if the random vectors
are independent. Thus, distance correlation measures both linear and
nonlinear association between two random variables or random vectors.
This is in contrast to Pearson's correlation, which can only detect
linear association between two random variables.*
The distance correlation of two random variables is obtained by
dividing their distance covariance by the product of their distance
standard deviations:
.. math::
\\text{dCor}(X, Y) = \\frac{\\text{dCov}(X, Y)}
{\\sqrt{\\text{dVar}(X) \\cdot \\text{dVar}(Y)}}
where :math:`\\text{dCov}(X, Y)` is the square root of the arithmetic
average of the product of the double-centered pairwise Euclidean distance
matrices.
Note that by contrast to Pearson's correlation, the distance correlation
cannot be negative, i.e :math:`0 \\leq \\text{dCor} \\leq 1`.
Results have been tested against the 'energy' R package. To be consistent
with this latter, only the one-sided p-value is computed, i.e. the upper
tail of the T-statistic.
References
----------
.. [1] https://en.wikipedia.org/wiki/Distance_correlation
.. [2] Székely, G. J., Rizzo, M. L., & Bakirov, N. K. (2007).
Measuring and testing dependence by correlation of distances.
The annals of statistics, 35(6), 2769-2794.
.. [3] https://gist.github.com/satra/aa3d19a12b74e9ab7941
.. [4] https://gist.github.com/wladston/c931b1495184fbb99bec
.. [5] https://cran.r-project.org/web/packages/energy/energy.pdf
Examples
--------
1. With two 1D vectors
>>> from pingouin import distance_corr
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 9, 4, 4]
>>> distance_corr(a, b, seed=9)
(0.7626762424168667, 0.312)
2. With two 2D arrays and no p-value
>>> import numpy as np
>>> np.random.seed(123)
>>> from pingouin import distance_corr
>>> a = np.random.random((10, 10))
>>> b = np.random.random((10, 10))
>>> distance_corr(a, b, n_boot=None)
0.8799633012275321 | [
"Distance",
"correlation",
"between",
"two",
"arrays",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L774-L909 | train | 206,635 |
raphaelvallat/pingouin | pingouin/regression.py | _point_estimate | def _point_estimate(X_val, XM_val, M_val, y_val, idx, n_mediator,
mtype='linear'):
"""Point estimate of indirect effect based on bootstrap sample."""
# Mediator(s) model (M(j) ~ X + covar)
beta_m = []
for j in range(n_mediator):
if mtype == 'linear':
beta_m.append(linear_regression(X_val[idx], M_val[idx, j],
coef_only=True)[1])
else:
beta_m.append(logistic_regression(X_val[idx], M_val[idx, j],
coef_only=True)[1])
# Full model (Y ~ X + M + covar)
beta_y = linear_regression(XM_val[idx], y_val[idx],
coef_only=True)[2:(2 + n_mediator)]
# Point estimate
return beta_m * beta_y | python | def _point_estimate(X_val, XM_val, M_val, y_val, idx, n_mediator,
mtype='linear'):
"""Point estimate of indirect effect based on bootstrap sample."""
# Mediator(s) model (M(j) ~ X + covar)
beta_m = []
for j in range(n_mediator):
if mtype == 'linear':
beta_m.append(linear_regression(X_val[idx], M_val[idx, j],
coef_only=True)[1])
else:
beta_m.append(logistic_regression(X_val[idx], M_val[idx, j],
coef_only=True)[1])
# Full model (Y ~ X + M + covar)
beta_y = linear_regression(XM_val[idx], y_val[idx],
coef_only=True)[2:(2 + n_mediator)]
# Point estimate
return beta_m * beta_y | [
"def",
"_point_estimate",
"(",
"X_val",
",",
"XM_val",
",",
"M_val",
",",
"y_val",
",",
"idx",
",",
"n_mediator",
",",
"mtype",
"=",
"'linear'",
")",
":",
"# Mediator(s) model (M(j) ~ X + covar)",
"beta_m",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"n... | Point estimate of indirect effect based on bootstrap sample. | [
"Point",
"estimate",
"of",
"indirect",
"effect",
"based",
"on",
"bootstrap",
"sample",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/regression.py#L414-L432 | train | 206,636 |
raphaelvallat/pingouin | pingouin/regression.py | _pval_from_bootci | def _pval_from_bootci(boot, estimate):
"""Compute p-value from bootstrap distribution.
Similar to the pval function in the R package mediation.
Note that this is less accurate than a permutation test because the
bootstrap distribution is not conditioned on a true null hypothesis.
"""
if estimate == 0:
out = 1
else:
out = 2 * min(sum(boot > 0), sum(boot < 0)) / len(boot)
return min(out, 1) | python | def _pval_from_bootci(boot, estimate):
"""Compute p-value from bootstrap distribution.
Similar to the pval function in the R package mediation.
Note that this is less accurate than a permutation test because the
bootstrap distribution is not conditioned on a true null hypothesis.
"""
if estimate == 0:
out = 1
else:
out = 2 * min(sum(boot > 0), sum(boot < 0)) / len(boot)
return min(out, 1) | [
"def",
"_pval_from_bootci",
"(",
"boot",
",",
"estimate",
")",
":",
"if",
"estimate",
"==",
"0",
":",
"out",
"=",
"1",
"else",
":",
"out",
"=",
"2",
"*",
"min",
"(",
"sum",
"(",
"boot",
">",
"0",
")",
",",
"sum",
"(",
"boot",
"<",
"0",
")",
"... | Compute p-value from bootstrap distribution.
Similar to the pval function in the R package mediation.
Note that this is less accurate than a permutation test because the
bootstrap distribution is not conditioned on a true null hypothesis. | [
"Compute",
"p",
"-",
"value",
"from",
"bootstrap",
"distribution",
".",
"Similar",
"to",
"the",
"pval",
"function",
"in",
"the",
"R",
"package",
"mediation",
".",
"Note",
"that",
"this",
"is",
"less",
"accurate",
"than",
"a",
"permutation",
"test",
"because"... | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/regression.py#L469-L479 | train | 206,637 |
raphaelvallat/pingouin | pingouin/pandas.py | _anova | def _anova(self, dv=None, between=None, detailed=False, export_filename=None):
"""Return one-way and two-way ANOVA."""
aov = anova(data=self, dv=dv, between=between, detailed=detailed,
export_filename=export_filename)
return aov | python | def _anova(self, dv=None, between=None, detailed=False, export_filename=None):
"""Return one-way and two-way ANOVA."""
aov = anova(data=self, dv=dv, between=between, detailed=detailed,
export_filename=export_filename)
return aov | [
"def",
"_anova",
"(",
"self",
",",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"detailed",
"=",
"False",
",",
"export_filename",
"=",
"None",
")",
":",
"aov",
"=",
"anova",
"(",
"data",
"=",
"self",
",",
"dv",
"=",
"dv",
",",
"between",
... | Return one-way and two-way ANOVA. | [
"Return",
"one",
"-",
"way",
"and",
"two",
"-",
"way",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L18-L22 | train | 206,638 |
raphaelvallat/pingouin | pingouin/pandas.py | _welch_anova | def _welch_anova(self, dv=None, between=None, export_filename=None):
"""Return one-way Welch ANOVA."""
aov = welch_anova(data=self, dv=dv, between=between,
export_filename=export_filename)
return aov | python | def _welch_anova(self, dv=None, between=None, export_filename=None):
"""Return one-way Welch ANOVA."""
aov = welch_anova(data=self, dv=dv, between=between,
export_filename=export_filename)
return aov | [
"def",
"_welch_anova",
"(",
"self",
",",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"export_filename",
"=",
"None",
")",
":",
"aov",
"=",
"welch_anova",
"(",
"data",
"=",
"self",
",",
"dv",
"=",
"dv",
",",
"between",
"=",
"between",
",",
... | Return one-way Welch ANOVA. | [
"Return",
"one",
"-",
"way",
"Welch",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L29-L33 | train | 206,639 |
raphaelvallat/pingouin | pingouin/pandas.py | _mixed_anova | def _mixed_anova(self, dv=None, between=None, within=None, subject=None,
correction=False, export_filename=None):
"""Two-way mixed ANOVA."""
aov = mixed_anova(data=self, dv=dv, between=between, within=within,
subject=subject, correction=correction,
export_filename=export_filename)
return aov | python | def _mixed_anova(self, dv=None, between=None, within=None, subject=None,
correction=False, export_filename=None):
"""Two-way mixed ANOVA."""
aov = mixed_anova(data=self, dv=dv, between=between, within=within,
subject=subject, correction=correction,
export_filename=export_filename)
return aov | [
"def",
"_mixed_anova",
"(",
"self",
",",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"correction",
"=",
"False",
",",
"export_filename",
"=",
"None",
")",
":",
"aov",
"=",
"mixed_anova",... | Two-way mixed ANOVA. | [
"Two",
"-",
"way",
"mixed",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L71-L77 | train | 206,640 |
raphaelvallat/pingouin | pingouin/pandas.py | _mediation_analysis | def _mediation_analysis(self, x=None, m=None, y=None, covar=None,
alpha=0.05, n_boot=500, seed=None, return_dist=False):
"""Mediation analysis."""
stats = mediation_analysis(data=self, x=x, m=m, y=y, covar=covar,
alpha=alpha, n_boot=n_boot, seed=seed,
return_dist=return_dist)
return stats | python | def _mediation_analysis(self, x=None, m=None, y=None, covar=None,
alpha=0.05, n_boot=500, seed=None, return_dist=False):
"""Mediation analysis."""
stats = mediation_analysis(data=self, x=x, m=m, y=y, covar=covar,
alpha=alpha, n_boot=n_boot, seed=seed,
return_dist=return_dist)
return stats | [
"def",
"_mediation_analysis",
"(",
"self",
",",
"x",
"=",
"None",
",",
"m",
"=",
"None",
",",
"y",
"=",
"None",
",",
"covar",
"=",
"None",
",",
"alpha",
"=",
"0.05",
",",
"n_boot",
"=",
"500",
",",
"seed",
"=",
"None",
",",
"return_dist",
"=",
"F... | Mediation analysis. | [
"Mediation",
"analysis",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L168-L174 | train | 206,641 |
raphaelvallat/pingouin | pingouin/nonparametric.py | mad | def mad(a, normalize=True, axis=0):
"""
Median Absolute Deviation along given axis of an array.
Parameters
----------
a : array-like
Input array.
normalize : boolean.
If True, scale by a normalization constant (~0.67)
axis : int, optional
The defaul is 0. Can also be None.
Returns
-------
mad : float
mad = median(abs(a - median(a))) / c
References
----------
.. [1] https://en.wikipedia.org/wiki/Median_absolute_deviation
Examples
--------
>>> from pingouin import mad
>>> a = [1.2, 5.4, 3.2, 7.8, 2.5]
>>> mad(a)
2.965204437011204
>>> mad(a, normalize=False)
2.0
"""
from scipy.stats import norm
a = np.asarray(a)
c = norm.ppf(3 / 4.) if normalize else 1
center = np.apply_over_axes(np.median, a, axis)
return np.median((np.fabs(a - center)) / c, axis=axis) | python | def mad(a, normalize=True, axis=0):
"""
Median Absolute Deviation along given axis of an array.
Parameters
----------
a : array-like
Input array.
normalize : boolean.
If True, scale by a normalization constant (~0.67)
axis : int, optional
The defaul is 0. Can also be None.
Returns
-------
mad : float
mad = median(abs(a - median(a))) / c
References
----------
.. [1] https://en.wikipedia.org/wiki/Median_absolute_deviation
Examples
--------
>>> from pingouin import mad
>>> a = [1.2, 5.4, 3.2, 7.8, 2.5]
>>> mad(a)
2.965204437011204
>>> mad(a, normalize=False)
2.0
"""
from scipy.stats import norm
a = np.asarray(a)
c = norm.ppf(3 / 4.) if normalize else 1
center = np.apply_over_axes(np.median, a, axis)
return np.median((np.fabs(a - center)) / c, axis=axis) | [
"def",
"mad",
"(",
"a",
",",
"normalize",
"=",
"True",
",",
"axis",
"=",
"0",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"norm",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"c",
"=",
"norm",
".",
"ppf",
"(",
"3",
"/",
"4.",
")",
... | Median Absolute Deviation along given axis of an array.
Parameters
----------
a : array-like
Input array.
normalize : boolean.
If True, scale by a normalization constant (~0.67)
axis : int, optional
The defaul is 0. Can also be None.
Returns
-------
mad : float
mad = median(abs(a - median(a))) / c
References
----------
.. [1] https://en.wikipedia.org/wiki/Median_absolute_deviation
Examples
--------
>>> from pingouin import mad
>>> a = [1.2, 5.4, 3.2, 7.8, 2.5]
>>> mad(a)
2.965204437011204
>>> mad(a, normalize=False)
2.0 | [
"Median",
"Absolute",
"Deviation",
"along",
"given",
"axis",
"of",
"an",
"array",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L11-L47 | train | 206,642 |
raphaelvallat/pingouin | pingouin/nonparametric.py | madmedianrule | def madmedianrule(a):
"""Outlier detection based on the MAD-median rule.
Parameters
----------
a : array-like
Input array.
Returns
-------
outliers: boolean (same shape as a)
Boolean array indicating whether each sample is an outlier (True) or
not (False).
References
----------
.. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median
deviation. Ann. Inst. Stat. Math. 37, 27–36.
https://doi.org/10.1007/BF02481078
Examples
--------
>>> from pingouin import madmedianrule
>>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43]
>>> madmedianrule(a)
array([False, False, False, False, False, True, False, False])
"""
from scipy.stats import chi2
a = np.asarray(a)
k = np.sqrt(chi2.ppf(0.975, 1))
return (np.fabs(a - np.median(a)) / mad(a)) > k | python | def madmedianrule(a):
"""Outlier detection based on the MAD-median rule.
Parameters
----------
a : array-like
Input array.
Returns
-------
outliers: boolean (same shape as a)
Boolean array indicating whether each sample is an outlier (True) or
not (False).
References
----------
.. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median
deviation. Ann. Inst. Stat. Math. 37, 27–36.
https://doi.org/10.1007/BF02481078
Examples
--------
>>> from pingouin import madmedianrule
>>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43]
>>> madmedianrule(a)
array([False, False, False, False, False, True, False, False])
"""
from scipy.stats import chi2
a = np.asarray(a)
k = np.sqrt(chi2.ppf(0.975, 1))
return (np.fabs(a - np.median(a)) / mad(a)) > k | [
"def",
"madmedianrule",
"(",
"a",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"chi2",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"k",
"=",
"np",
".",
"sqrt",
"(",
"chi2",
".",
"ppf",
"(",
"0.975",
",",
"1",
")",
")",
"return",
"(",
... | Outlier detection based on the MAD-median rule.
Parameters
----------
a : array-like
Input array.
Returns
-------
outliers: boolean (same shape as a)
Boolean array indicating whether each sample is an outlier (True) or
not (False).
References
----------
.. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median
deviation. Ann. Inst. Stat. Math. 37, 27–36.
https://doi.org/10.1007/BF02481078
Examples
--------
>>> from pingouin import madmedianrule
>>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43]
>>> madmedianrule(a)
array([False, False, False, False, False, True, False, False]) | [
"Outlier",
"detection",
"based",
"on",
"the",
"MAD",
"-",
"median",
"rule",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L50-L80 | train | 206,643 |
raphaelvallat/pingouin | pingouin/nonparametric.py | wilcoxon | def wilcoxon(x, y, tail='two-sided'):
"""Wilcoxon signed-rank test. It is the non-parametric version of the
paired T-test.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be related (e.g
repeated measures).
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'W-val' : W-value
'p-val' : p-value
'RBC' : matched pairs rank-biserial correlation (effect size)
'CLES' : common language effect size
Notes
-----
The Wilcoxon signed-rank test tests the null hypothesis that two related
paired samples come from the same distribution.
A continuity correction is applied by default
(see :py:func:`scipy.stats.wilcoxon` for details).
The rank biserial correlation is the difference between the proportion of
favorable evidence minus the proportion of unfavorable evidence
(see Kerby 2014).
The common language effect size is the probability (from 0 to 1) that a
randomly selected observation from the first sample will be greater than a
randomly selected observation from the second sample.
References
----------
.. [1] Wilcoxon, F. (1945). Individual comparisons by ranking methods.
Biometrics bulletin, 1(6), 80-83.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
Examples
--------
1. Wilcoxon test on two related samples.
>>> import numpy as np
>>> from pingouin import wilcoxon
>>> x = [20, 22, 19, 20, 22, 18, 24, 20, 19, 24, 26, 13]
>>> y = [38, 37, 33, 29, 14, 12, 20, 22, 17, 25, 26, 16]
>>> wilcoxon(x, y, tail='two-sided')
W-val p-val RBC CLES
Wilcoxon 20.5 0.070844 0.333 0.583
"""
from scipy.stats import wilcoxon
x = np.asarray(x)
y = np.asarray(y)
# Remove NA
x, y = remove_na(x, y, paired=True)
# Compute test
wval, pval = wilcoxon(x, y, zero_method='wilcox', correction=False)
pval *= .5 if tail == 'one-sided' else pval
# Effect size 1: common language effect size (McGraw and Wong 1992)
diff = x[:, None] - y
cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size
# Effect size 2: matched-pairs rank biserial correlation (Kerby 2014)
rank = np.arange(x.size, 0, -1)
rsum = rank.sum()
fav = rank[np.sign(y - x) > 0].sum()
unfav = rank[np.sign(y - x) < 0].sum()
rbc = fav / rsum - unfav / rsum
# Fill output DataFrame
stats = pd.DataFrame({}, index=['Wilcoxon'])
stats['W-val'] = round(wval, 3)
stats['p-val'] = pval
stats['RBC'] = round(rbc, 3)
stats['CLES'] = round(cles, 3)
col_order = ['W-val', 'p-val', 'RBC', 'CLES']
stats = stats.reindex(columns=col_order)
return stats | python | def wilcoxon(x, y, tail='two-sided'):
"""Wilcoxon signed-rank test. It is the non-parametric version of the
paired T-test.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be related (e.g
repeated measures).
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'W-val' : W-value
'p-val' : p-value
'RBC' : matched pairs rank-biserial correlation (effect size)
'CLES' : common language effect size
Notes
-----
The Wilcoxon signed-rank test tests the null hypothesis that two related
paired samples come from the same distribution.
A continuity correction is applied by default
(see :py:func:`scipy.stats.wilcoxon` for details).
The rank biserial correlation is the difference between the proportion of
favorable evidence minus the proportion of unfavorable evidence
(see Kerby 2014).
The common language effect size is the probability (from 0 to 1) that a
randomly selected observation from the first sample will be greater than a
randomly selected observation from the second sample.
References
----------
.. [1] Wilcoxon, F. (1945). Individual comparisons by ranking methods.
Biometrics bulletin, 1(6), 80-83.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
Examples
--------
1. Wilcoxon test on two related samples.
>>> import numpy as np
>>> from pingouin import wilcoxon
>>> x = [20, 22, 19, 20, 22, 18, 24, 20, 19, 24, 26, 13]
>>> y = [38, 37, 33, 29, 14, 12, 20, 22, 17, 25, 26, 16]
>>> wilcoxon(x, y, tail='two-sided')
W-val p-val RBC CLES
Wilcoxon 20.5 0.070844 0.333 0.583
"""
from scipy.stats import wilcoxon
x = np.asarray(x)
y = np.asarray(y)
# Remove NA
x, y = remove_na(x, y, paired=True)
# Compute test
wval, pval = wilcoxon(x, y, zero_method='wilcox', correction=False)
pval *= .5 if tail == 'one-sided' else pval
# Effect size 1: common language effect size (McGraw and Wong 1992)
diff = x[:, None] - y
cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size
# Effect size 2: matched-pairs rank biserial correlation (Kerby 2014)
rank = np.arange(x.size, 0, -1)
rsum = rank.sum()
fav = rank[np.sign(y - x) > 0].sum()
unfav = rank[np.sign(y - x) < 0].sum()
rbc = fav / rsum - unfav / rsum
# Fill output DataFrame
stats = pd.DataFrame({}, index=['Wilcoxon'])
stats['W-val'] = round(wval, 3)
stats['p-val'] = pval
stats['RBC'] = round(rbc, 3)
stats['CLES'] = round(cles, 3)
col_order = ['W-val', 'p-val', 'RBC', 'CLES']
stats = stats.reindex(columns=col_order)
return stats | [
"def",
"wilcoxon",
"(",
"x",
",",
"y",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"wilcoxon",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"y",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"# Remove NA",
"x... | Wilcoxon signed-rank test. It is the non-parametric version of the
paired T-test.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be related (e.g
repeated measures).
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'W-val' : W-value
'p-val' : p-value
'RBC' : matched pairs rank-biserial correlation (effect size)
'CLES' : common language effect size
Notes
-----
The Wilcoxon signed-rank test tests the null hypothesis that two related
paired samples come from the same distribution.
A continuity correction is applied by default
(see :py:func:`scipy.stats.wilcoxon` for details).
The rank biserial correlation is the difference between the proportion of
favorable evidence minus the proportion of unfavorable evidence
(see Kerby 2014).
The common language effect size is the probability (from 0 to 1) that a
randomly selected observation from the first sample will be greater than a
randomly selected observation from the second sample.
References
----------
.. [1] Wilcoxon, F. (1945). Individual comparisons by ranking methods.
Biometrics bulletin, 1(6), 80-83.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
Examples
--------
1. Wilcoxon test on two related samples.
>>> import numpy as np
>>> from pingouin import wilcoxon
>>> x = [20, 22, 19, 20, 22, 18, 24, 20, 19, 24, 26, 13]
>>> y = [38, 37, 33, 29, 14, 12, 20, 22, 17, 25, 26, 16]
>>> wilcoxon(x, y, tail='two-sided')
W-val p-val RBC CLES
Wilcoxon 20.5 0.070844 0.333 0.583 | [
"Wilcoxon",
"signed",
"-",
"rank",
"test",
".",
"It",
"is",
"the",
"non",
"-",
"parametric",
"version",
"of",
"the",
"paired",
"T",
"-",
"test",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L175-L267 | train | 206,644 |
raphaelvallat/pingouin | pingouin/nonparametric.py | kruskal | def kruskal(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
"""
from scipy.stats import chi2, rankdata, tiecorrect
# Check data
_check_dataframe(dv=dv, between=between, data=data,
effects='between')
# Remove NaN values
data = data.dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
# Rank data, dealing with ties appropriately
data['rank'] = rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between)['rank']
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
# Calculate chi-square statistic (H)
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
# Correct for ties
H /= tiecorrect(data['rank'].values)
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'H': np.round(H, 3),
'p-unc': p_unc,
}, index=['Kruskal'])
col_order = ['Source', 'ddof1', 'H', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | python | def kruskal(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
"""
from scipy.stats import chi2, rankdata, tiecorrect
# Check data
_check_dataframe(dv=dv, between=between, data=data,
effects='between')
# Remove NaN values
data = data.dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
# Rank data, dealing with ties appropriately
data['rank'] = rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between)['rank']
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
# Calculate chi-square statistic (H)
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
# Correct for ties
H /= tiecorrect(data['rank'].values)
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'H': np.round(H, 3),
'p-unc': p_unc,
}, index=['Kruskal'])
col_order = ['Source', 'ddof1', 'H', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | [
"def",
"kruskal",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"detailed",
"=",
"False",
",",
"export_filename",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"chi2",
",",
"rankdata",
",",
"tie... | Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172 | [
"Kruskal",
"-",
"Wallis",
"H",
"-",
"test",
"for",
"independent",
"samples",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L270-L370 | train | 206,645 |
raphaelvallat/pingouin | pingouin/nonparametric.py | friedman | def friedman(dv=None, within=None, subject=None, data=None,
export_filename=None):
"""Friedman test for repeated measurements.
Parameters
----------
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Friedman Q statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Friedman test is used for one-way repeated measures ANOVA by ranks.
Data are expected to be in long-format.
Note that if the dataset contains one or more other within subject
factors, an automatic collapsing to the mean is applied on the dependant
variable (same behavior as the ezANOVA R package). As such, results can
differ from those of JASP. If you can, always double-check the results.
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than 6
repeated measurements.
NaN values are automatically removed.
Examples
--------
Compute the Friedman test for repeated measurements.
>>> from pingouin import friedman, read_dataset
>>> df = read_dataset('rm_anova')
>>> friedman(dv='DesireToKill', within='Disgustingness',
... subject='Subject', data=df)
Source ddof1 Q p-unc
Friedman Disgustingness 1 9.228 0.002384
"""
from scipy.stats import rankdata, chi2, find_repeats
# Check data
_check_dataframe(dv=dv, within=within, data=data, subject=subject,
effects='within')
# Collapse to the mean
data = data.groupby([subject, within]).mean().reset_index()
# Remove NaN
if data[dv].isnull().any():
data = remove_rm_na(dv=dv, within=within, subject=subject,
data=data[[subject, within, dv]])
# Extract number of groups and total sample size
grp = data.groupby(within)[dv]
rm = list(data[within].unique())
k = len(rm)
X = np.array([grp.get_group(r).values for r in rm]).T
n = X.shape[0]
# Rank per subject
ranked = np.zeros(X.shape)
for i in range(n):
ranked[i] = rankdata(X[i, :])
ssbn = (ranked.sum(axis=0)**2).sum()
# Compute the test statistic
Q = (12 / (n * k * (k + 1))) * ssbn - 3 * n * (k + 1)
# Correct for ties
ties = 0
for i in range(n):
replist, repnum = find_repeats(X[i])
for t in repnum:
ties += t * (t * t - 1)
c = 1 - ties / float(k * (k * k - 1) * n)
Q /= c
# Approximate the p-value
ddof1 = k - 1
p_unc = chi2.sf(Q, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': within,
'ddof1': ddof1,
'Q': np.round(Q, 3),
'p-unc': p_unc,
}, index=['Friedman'])
col_order = ['Source', 'ddof1', 'Q', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | python | def friedman(dv=None, within=None, subject=None, data=None,
export_filename=None):
"""Friedman test for repeated measurements.
Parameters
----------
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Friedman Q statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Friedman test is used for one-way repeated measures ANOVA by ranks.
Data are expected to be in long-format.
Note that if the dataset contains one or more other within subject
factors, an automatic collapsing to the mean is applied on the dependant
variable (same behavior as the ezANOVA R package). As such, results can
differ from those of JASP. If you can, always double-check the results.
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than 6
repeated measurements.
NaN values are automatically removed.
Examples
--------
Compute the Friedman test for repeated measurements.
>>> from pingouin import friedman, read_dataset
>>> df = read_dataset('rm_anova')
>>> friedman(dv='DesireToKill', within='Disgustingness',
... subject='Subject', data=df)
Source ddof1 Q p-unc
Friedman Disgustingness 1 9.228 0.002384
"""
from scipy.stats import rankdata, chi2, find_repeats
# Check data
_check_dataframe(dv=dv, within=within, data=data, subject=subject,
effects='within')
# Collapse to the mean
data = data.groupby([subject, within]).mean().reset_index()
# Remove NaN
if data[dv].isnull().any():
data = remove_rm_na(dv=dv, within=within, subject=subject,
data=data[[subject, within, dv]])
# Extract number of groups and total sample size
grp = data.groupby(within)[dv]
rm = list(data[within].unique())
k = len(rm)
X = np.array([grp.get_group(r).values for r in rm]).T
n = X.shape[0]
# Rank per subject
ranked = np.zeros(X.shape)
for i in range(n):
ranked[i] = rankdata(X[i, :])
ssbn = (ranked.sum(axis=0)**2).sum()
# Compute the test statistic
Q = (12 / (n * k * (k + 1))) * ssbn - 3 * n * (k + 1)
# Correct for ties
ties = 0
for i in range(n):
replist, repnum = find_repeats(X[i])
for t in repnum:
ties += t * (t * t - 1)
c = 1 - ties / float(k * (k * k - 1) * n)
Q /= c
# Approximate the p-value
ddof1 = k - 1
p_unc = chi2.sf(Q, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': within,
'ddof1': ddof1,
'Q': np.round(Q, 3),
'p-unc': p_unc,
}, index=['Friedman'])
col_order = ['Source', 'ddof1', 'Q', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | [
"def",
"friedman",
"(",
"dv",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"data",
"=",
"None",
",",
"export_filename",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"rankdata",
",",
"chi2",
",",
"find_... | Friedman test for repeated measurements.
Parameters
----------
dv : string
Name of column containing the dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Friedman Q statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Friedman test is used for one-way repeated measures ANOVA by ranks.
Data are expected to be in long-format.
Note that if the dataset contains one or more other within subject
factors, an automatic collapsing to the mean is applied on the dependant
variable (same behavior as the ezANOVA R package). As such, results can
differ from those of JASP. If you can, always double-check the results.
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than 6
repeated measurements.
NaN values are automatically removed.
Examples
--------
Compute the Friedman test for repeated measurements.
>>> from pingouin import friedman, read_dataset
>>> df = read_dataset('rm_anova')
>>> friedman(dv='DesireToKill', within='Disgustingness',
... subject='Subject', data=df)
Source ddof1 Q p-unc
Friedman Disgustingness 1 9.228 0.002384 | [
"Friedman",
"test",
"for",
"repeated",
"measurements",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L373-L490 | train | 206,646 |
raphaelvallat/pingouin | pingouin/nonparametric.py | cochran | def cochran(dv=None, within=None, subject=None, data=None,
export_filename=None):
"""Cochran Q test. Special case of the Friedman test when the dependant
variable is binary.
Parameters
----------
dv : string
Name of column containing the binary dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Cochran Q statistic
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Cochran Q Test is a non-parametric test for ANOVA with repeated
measures where the dependent variable is binary.
Data are expected to be in long-format. NaN are automatically removed
from the data.
The Q statistics is defined as:
.. math:: Q = \\frac{(r-1)(r\\sum_j^rx_j^2-N^2)}{rN-\\sum_i^nx_i^2}
where :math:`N` is the total sum of all observations, :math:`j=1,...,r`
where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where
:math:`n` is the number of observations per condition.
The p-value is then approximated using a chi-square distribution with
:math:`r-1` degrees of freedom:
.. math:: Q \\sim \\chi^2(r-1)
References
----------
.. [1] Cochran, W.G., 1950. The comparison of percentages in matched
samples. Biometrika 37, 256–266.
https://doi.org/10.1093/biomet/37.3-4.256
Examples
--------
Compute the Cochran Q test for repeated measurements.
>>> from pingouin import cochran, read_dataset
>>> df = read_dataset('cochran')
>>> cochran(dv='Energetic', within='Time', subject='Subject', data=df)
Source dof Q p-unc
cochran Time 2 6.706 0.034981
"""
from scipy.stats import chi2
# Check data
_check_dataframe(dv=dv, within=within, data=data, subject=subject,
effects='within')
# Remove NaN
if data[dv].isnull().any():
data = remove_rm_na(dv=dv, within=within, subject=subject,
data=data[[subject, within, dv]])
# Groupby and extract size
grp = data.groupby(within)[dv]
grp_s = data.groupby(subject)[dv]
k = data[within].nunique()
dof = k - 1
# n = grp.count().unique()[0]
# Q statistic and p-value
q = (dof * (k * np.sum(grp.sum()**2) - grp.sum().sum()**2)) / \
(k * grp.sum().sum() - np.sum(grp_s.sum()**2))
p_unc = chi2.sf(q, dof)
# Create output dataframe
stats = pd.DataFrame({'Source': within,
'dof': dof,
'Q': np.round(q, 3),
'p-unc': p_unc,
}, index=['cochran'])
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | python | def cochran(dv=None, within=None, subject=None, data=None,
export_filename=None):
"""Cochran Q test. Special case of the Friedman test when the dependant
variable is binary.
Parameters
----------
dv : string
Name of column containing the binary dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Cochran Q statistic
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Cochran Q Test is a non-parametric test for ANOVA with repeated
measures where the dependent variable is binary.
Data are expected to be in long-format. NaN are automatically removed
from the data.
The Q statistics is defined as:
.. math:: Q = \\frac{(r-1)(r\\sum_j^rx_j^2-N^2)}{rN-\\sum_i^nx_i^2}
where :math:`N` is the total sum of all observations, :math:`j=1,...,r`
where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where
:math:`n` is the number of observations per condition.
The p-value is then approximated using a chi-square distribution with
:math:`r-1` degrees of freedom:
.. math:: Q \\sim \\chi^2(r-1)
References
----------
.. [1] Cochran, W.G., 1950. The comparison of percentages in matched
samples. Biometrika 37, 256–266.
https://doi.org/10.1093/biomet/37.3-4.256
Examples
--------
Compute the Cochran Q test for repeated measurements.
>>> from pingouin import cochran, read_dataset
>>> df = read_dataset('cochran')
>>> cochran(dv='Energetic', within='Time', subject='Subject', data=df)
Source dof Q p-unc
cochran Time 2 6.706 0.034981
"""
from scipy.stats import chi2
# Check data
_check_dataframe(dv=dv, within=within, data=data, subject=subject,
effects='within')
# Remove NaN
if data[dv].isnull().any():
data = remove_rm_na(dv=dv, within=within, subject=subject,
data=data[[subject, within, dv]])
# Groupby and extract size
grp = data.groupby(within)[dv]
grp_s = data.groupby(subject)[dv]
k = data[within].nunique()
dof = k - 1
# n = grp.count().unique()[0]
# Q statistic and p-value
q = (dof * (k * np.sum(grp.sum()**2) - grp.sum().sum()**2)) / \
(k * grp.sum().sum() - np.sum(grp_s.sum()**2))
p_unc = chi2.sf(q, dof)
# Create output dataframe
stats = pd.DataFrame({'Source': within,
'dof': dof,
'Q': np.round(q, 3),
'p-unc': p_unc,
}, index=['cochran'])
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | [
"def",
"cochran",
"(",
"dv",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"data",
"=",
"None",
",",
"export_filename",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"chi2",
"# Check data",
"_check_dataframe... | Cochran Q test. Special case of the Friedman test when the dependant
variable is binary.
Parameters
----------
dv : string
Name of column containing the binary dependant variable.
within : string
Name of column containing the within-subject factor.
subject : string
Name of column containing the subject identifier.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'Q' : The Cochran Q statistic
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Cochran Q Test is a non-parametric test for ANOVA with repeated
measures where the dependent variable is binary.
Data are expected to be in long-format. NaN are automatically removed
from the data.
The Q statistics is defined as:
.. math:: Q = \\frac{(r-1)(r\\sum_j^rx_j^2-N^2)}{rN-\\sum_i^nx_i^2}
where :math:`N` is the total sum of all observations, :math:`j=1,...,r`
where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where
:math:`n` is the number of observations per condition.
The p-value is then approximated using a chi-square distribution with
:math:`r-1` degrees of freedom:
.. math:: Q \\sim \\chi^2(r-1)
References
----------
.. [1] Cochran, W.G., 1950. The comparison of percentages in matched
samples. Biometrika 37, 256–266.
https://doi.org/10.1093/biomet/37.3-4.256
Examples
--------
Compute the Cochran Q test for repeated measurements.
>>> from pingouin import cochran, read_dataset
>>> df = read_dataset('cochran')
>>> cochran(dv='Energetic', within='Time', subject='Subject', data=df)
Source dof Q p-unc
cochran Time 2 6.706 0.034981 | [
"Cochran",
"Q",
"test",
".",
"Special",
"case",
"of",
"the",
"Friedman",
"test",
"when",
"the",
"dependant",
"variable",
"is",
"binary",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/nonparametric.py#L493-L594 | train | 206,647 |
raphaelvallat/pingouin | pingouin/external/tabulate.py | _multiline_width | def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s))) | python | def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s))) | [
"def",
"_multiline_width",
"(",
"multiline_s",
",",
"line_width_fn",
"=",
"len",
")",
":",
"return",
"max",
"(",
"map",
"(",
"line_width_fn",
",",
"re",
".",
"split",
"(",
"\"[\\r\\n]\"",
",",
"multiline_s",
")",
")",
")"
] | Visible width of a potentially multiline content. | [
"Visible",
"width",
"of",
"a",
"potentially",
"multiline",
"content",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L601-L603 | train | 206,648 |
raphaelvallat/pingouin | pingouin/external/tabulate.py | _choose_width_fn | def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
def width_fn(s): return _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn | python | def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
def width_fn(s): return _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn | [
"def",
"_choose_width_fn",
"(",
"has_invisible",
",",
"enable_widechars",
",",
"is_multiline",
")",
":",
"if",
"has_invisible",
":",
"line_width_fn",
"=",
"_visible_width",
"elif",
"enable_widechars",
":",
"# optional wide-character support if available",
"line_width_fn",
"... | Return a function to calculate visible cell width. | [
"Return",
"a",
"function",
"to",
"calculate",
"visible",
"cell",
"width",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L606-L618 | train | 206,649 |
raphaelvallat/pingouin | pingouin/external/tabulate.py | _align_header | def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header) | python | def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header) | [
"def",
"_align_header",
"(",
"header",
",",
"alignment",
",",
"width",
",",
"visible_width",
",",
"is_multiline",
"=",
"False",
",",
"width_fn",
"=",
"None",
")",
":",
"if",
"is_multiline",
":",
"header_lines",
"=",
"re",
".",
"split",
"(",
"_multiline_codes... | Pad string header to width chars given known visible_width of the header. | [
"Pad",
"string",
"header",
"to",
"width",
"chars",
"given",
"known",
"visible_width",
"of",
"the",
"header",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L751-L769 | train | 206,650 |
raphaelvallat/pingouin | pingouin/external/tabulate.py | _prepend_row_index | def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows | python | def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows | [
"def",
"_prepend_row_index",
"(",
"rows",
",",
"index",
")",
":",
"if",
"index",
"is",
"None",
"or",
"index",
"is",
"False",
":",
"return",
"rows",
"if",
"len",
"(",
"index",
")",
"!=",
"len",
"(",
"rows",
")",
":",
"print",
"(",
"'index='",
",",
"... | Add a left-most index column. | [
"Add",
"a",
"left",
"-",
"most",
"index",
"column",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L772-L781 | train | 206,651 |
raphaelvallat/pingouin | pingouin/external/tabulate.py | _expand_numparse | def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count | python | def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count | [
"def",
"_expand_numparse",
"(",
"disable_numparse",
",",
"column_count",
")",
":",
"if",
"isinstance",
"(",
"disable_numparse",
",",
"Iterable",
")",
":",
"numparses",
"=",
"[",
"True",
"]",
"*",
"column_count",
"for",
"index",
"in",
"disable_numparse",
":",
"... | Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same. | [
"Return",
"a",
"list",
"of",
"bools",
"of",
"length",
"column_count",
"which",
"indicates",
"whether",
"number",
"parsing",
"should",
"be",
"used",
"on",
"each",
"column",
".",
"If",
"disable_numparse",
"is",
"a",
"list",
"of",
"indices",
"each",
"of",
"thos... | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/tabulate.py#L1038-L1052 | train | 206,652 |
raphaelvallat/pingouin | pingouin/pairwise.py | pairwise_tukey | def pairwise_tukey(dv=None, between=None, data=None, alpha=.05,
tail='two-sided', effsize='hedges'):
'''Pairwise Tukey-HSD post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'p-tukey' : Tukey-HSD corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Tukey HSD post-hoc is best for balanced one-way ANOVA.
It has been proven to be conservative for one-way ANOVA with unequal
sample sizes. However, it is not robust if the groups have unequal
variances, in which case the Games-Howell test is more adequate.
Tukey HSD is not valid for repeated measures ANOVA.
Note that when the sample sizes are unequal, this function actually
performs the Tukey-Kramer test (which allows for unequal sample sizes).
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{2 \\cdot MS_w / n}}
where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of
the first and second group, respectively, :math:`MS_w` the mean squares of
the error (computed using ANOVA) and :math:`n` the sample size.
If the sample sizes are unequal, the Tukey-Kramer procedure is
automatically used:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i}
+ \\frac{MS_w}{n_j}}}
where :math:`n_i` and :math:`n_j` are the sample sizes of the first and
second group, respectively.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of
groups and :math:`N` is the total sample size.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Tukey, John W. "Comparing individual means in the analysis of
variance." Biometrics (1949): 99-114.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Tukey post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_tukey, read_dataset
>>> df = read_dataset('anova')
>>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df)
'''
from pingouin.external.qsturng import psturng
# First compute the ANOVA
aov = anova(dv=dv, data=data, between=between, detailed=True)
df = aov.loc[1, 'DF']
ng = aov.loc[0, 'DF'] + 1
grp = data.groupby(between)[dv]
n = grp.count().values
gmeans = grp.mean().values
gvar = aov.loc[1, 'MS'] / n
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(gvar[g1] + gvar[g2])
tval = mn / se
# Critical values and p-values
# from pingouin.external.qsturng import qsturng
# crit = qsturng(1 - alpha, ng, df) / np.sqrt(2)
pval = psturng(np.sqrt(2) * np.abs(tval), ng, df)
pval *= 0.5 if tail == 'one-sided' else 1
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2])
# Create dataframe
# Careful: pd.unique does NOT sort whereas numpy does
stats = pd.DataFrame({
'A': np.unique(data[between])[g1],
'B': np.unique(data[between])[g2],
'mean(A)': gmeans[g1],
'mean(B)': gmeans[g2],
'diff': mn,
'SE': np.round(se, 3),
'tail': tail,
'T': np.round(tval, 3),
# 'alpha': alpha,
# 'crit': np.round(crit, 3),
'p-tukey': pval,
'efsize': np.round(ef, 3),
'eftype': effsize,
})
return stats | python | def pairwise_tukey(dv=None, between=None, data=None, alpha=.05,
tail='two-sided', effsize='hedges'):
'''Pairwise Tukey-HSD post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'p-tukey' : Tukey-HSD corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Tukey HSD post-hoc is best for balanced one-way ANOVA.
It has been proven to be conservative for one-way ANOVA with unequal
sample sizes. However, it is not robust if the groups have unequal
variances, in which case the Games-Howell test is more adequate.
Tukey HSD is not valid for repeated measures ANOVA.
Note that when the sample sizes are unequal, this function actually
performs the Tukey-Kramer test (which allows for unequal sample sizes).
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{2 \\cdot MS_w / n}}
where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of
the first and second group, respectively, :math:`MS_w` the mean squares of
the error (computed using ANOVA) and :math:`n` the sample size.
If the sample sizes are unequal, the Tukey-Kramer procedure is
automatically used:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i}
+ \\frac{MS_w}{n_j}}}
where :math:`n_i` and :math:`n_j` are the sample sizes of the first and
second group, respectively.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of
groups and :math:`N` is the total sample size.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Tukey, John W. "Comparing individual means in the analysis of
variance." Biometrics (1949): 99-114.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Tukey post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_tukey, read_dataset
>>> df = read_dataset('anova')
>>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df)
'''
from pingouin.external.qsturng import psturng
# First compute the ANOVA
aov = anova(dv=dv, data=data, between=between, detailed=True)
df = aov.loc[1, 'DF']
ng = aov.loc[0, 'DF'] + 1
grp = data.groupby(between)[dv]
n = grp.count().values
gmeans = grp.mean().values
gvar = aov.loc[1, 'MS'] / n
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(gvar[g1] + gvar[g2])
tval = mn / se
# Critical values and p-values
# from pingouin.external.qsturng import qsturng
# crit = qsturng(1 - alpha, ng, df) / np.sqrt(2)
pval = psturng(np.sqrt(2) * np.abs(tval), ng, df)
pval *= 0.5 if tail == 'one-sided' else 1
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2])
# Create dataframe
# Careful: pd.unique does NOT sort whereas numpy does
stats = pd.DataFrame({
'A': np.unique(data[between])[g1],
'B': np.unique(data[between])[g2],
'mean(A)': gmeans[g1],
'mean(B)': gmeans[g2],
'diff': mn,
'SE': np.round(se, 3),
'tail': tail,
'T': np.round(tval, 3),
# 'alpha': alpha,
# 'crit': np.round(crit, 3),
'p-tukey': pval,
'efsize': np.round(ef, 3),
'eftype': effsize,
})
return stats | [
"def",
"pairwise_tukey",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"alpha",
"=",
".05",
",",
"tail",
"=",
"'two-sided'",
",",
"effsize",
"=",
"'hedges'",
")",
":",
"from",
"pingouin",
".",
"external",
".",
"q... | Pairwise Tukey-HSD post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'p-tukey' : Tukey-HSD corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Tukey HSD post-hoc is best for balanced one-way ANOVA.
It has been proven to be conservative for one-way ANOVA with unequal
sample sizes. However, it is not robust if the groups have unequal
variances, in which case the Games-Howell test is more adequate.
Tukey HSD is not valid for repeated measures ANOVA.
Note that when the sample sizes are unequal, this function actually
performs the Tukey-Kramer test (which allows for unequal sample sizes).
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{2 \\cdot MS_w / n}}
where :math:`\\overline{x}_i` and :math:`\\overline{x}_j` are the means of
the first and second group, respectively, :math:`MS_w` the mean squares of
the error (computed using ANOVA) and :math:`n` the sample size.
If the sample sizes are unequal, the Tukey-Kramer procedure is
automatically used:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}{\\sqrt{\\frac{MS_w}{n_i}
+ \\frac{MS_w}{n_j}}}
where :math:`n_i` and :math:`n_j` are the sample sizes of the first and
second group, respectively.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, N - r)` where :math:`r` is the total number of
groups and :math:`N` is the total sample size.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Tukey, John W. "Comparing individual means in the analysis of
variance." Biometrics (1949): 99-114.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Tukey post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_tukey, read_dataset
>>> df = read_dataset('anova')
>>> pt = pairwise_tukey(dv='Pain threshold', between='Hair color', data=df) | [
"Pairwise",
"Tukey",
"-",
"HSD",
"post",
"-",
"hoc",
"test",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pairwise.py#L411-L562 | train | 206,653 |
raphaelvallat/pingouin | pingouin/pairwise.py | pairwise_gameshowell | def pairwise_gameshowell(dv=None, between=None, data=None, alpha=.05,
tail='two-sided', effsize='hedges'):
'''Pairwise Games-Howell post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'df' : adjusted degrees of freedom
'pval' : Games-Howell corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Games-Howell is very similar to the Tukey HSD post-hoc test but is much
more robust to heterogeneity of variances. While the
Tukey-HSD post-hoc is optimal after a classic one-way ANOVA, the
Games-Howell is optimal after a Welch ANOVA.
Games-Howell is not valid for repeated measures ANOVA.
Compared to the Tukey-HSD test, the Games-Howell test uses different pooled
variances for each pair of variables instead of the same pooled variance.
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})}}
and the corrected degrees of freedom are:
.. math::
v = \\frac{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})^2}
{\\frac{(\\frac{s_i^2}{n_i})^2}{n_i-1} +
\\frac{(\\frac{s_j^2}{n_j})^2}{n_j-1}}
where :math:`\\overline{x}_i`, :math:`s_i^2`, and :math:`n_i`
are the mean, variance and sample size of the first group and
:math:`\\overline{x}_j`, :math:`s_j^2`, and :math:`n_j` the mean, variance
and sample size of the second group.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, v_i)`.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Games, Paul A., and John F. Howell. "Pairwise multiple comparison
procedures with unequal n’s and/or variances: a Monte Carlo study."
Journal of Educational Statistics 1.2 (1976): 113-125.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Games-Howell post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_gameshowell, read_dataset
>>> df = read_dataset('anova')
>>> pairwise_gameshowell(dv='Pain threshold', between='Hair color',
... data=df) # doctest: +SKIP
'''
from pingouin.external.qsturng import psturng
# Check the dataframe
_check_dataframe(dv=dv, between=between, effects='between', data=data)
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract infos
ng = data[between].nunique()
grp = data.groupby(between)[dv]
n = grp.count().values
gmeans = grp.mean().values
gvars = grp.var().values
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(0.5 * (gvars[g1] / n[g1] + gvars[g2] / n[g2]))
tval = mn / np.sqrt(gvars[g1] / n[g1] + gvars[g2] / n[g2])
df = (gvars[g1] / n[g1] + gvars[g2] / n[g2])**2 / \
((((gvars[g1] / n[g1])**2) / (n[g1] - 1)) +
(((gvars[g2] / n[g2])**2) / (n[g2] - 1)))
# Compute corrected p-values
pval = psturng(np.sqrt(2) * np.abs(tval), ng, df)
pval *= 0.5 if tail == 'one-sided' else 1
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2])
# Create dataframe
# Careful: pd.unique does NOT sort whereas numpy does
stats = pd.DataFrame({
'A': np.unique(data[between])[g1],
'B': np.unique(data[between])[g2],
'mean(A)': gmeans[g1],
'mean(B)': gmeans[g2],
'diff': mn,
'SE': se,
'tail': tail,
'T': tval,
'df': df,
'pval': pval,
'efsize': ef,
'eftype': effsize,
})
col_round = ['mean(A)', 'mean(B)', 'diff', 'SE', 'T', 'df', 'efsize']
stats[col_round] = stats[col_round].round(3)
return stats | python | def pairwise_gameshowell(dv=None, between=None, data=None, alpha=.05,
tail='two-sided', effsize='hedges'):
'''Pairwise Games-Howell post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'df' : adjusted degrees of freedom
'pval' : Games-Howell corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Games-Howell is very similar to the Tukey HSD post-hoc test but is much
more robust to heterogeneity of variances. While the
Tukey-HSD post-hoc is optimal after a classic one-way ANOVA, the
Games-Howell is optimal after a Welch ANOVA.
Games-Howell is not valid for repeated measures ANOVA.
Compared to the Tukey-HSD test, the Games-Howell test uses different pooled
variances for each pair of variables instead of the same pooled variance.
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})}}
and the corrected degrees of freedom are:
.. math::
v = \\frac{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})^2}
{\\frac{(\\frac{s_i^2}{n_i})^2}{n_i-1} +
\\frac{(\\frac{s_j^2}{n_j})^2}{n_j-1}}
where :math:`\\overline{x}_i`, :math:`s_i^2`, and :math:`n_i`
are the mean, variance and sample size of the first group and
:math:`\\overline{x}_j`, :math:`s_j^2`, and :math:`n_j` the mean, variance
and sample size of the second group.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, v_i)`.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Games, Paul A., and John F. Howell. "Pairwise multiple comparison
procedures with unequal n’s and/or variances: a Monte Carlo study."
Journal of Educational Statistics 1.2 (1976): 113-125.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Games-Howell post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_gameshowell, read_dataset
>>> df = read_dataset('anova')
>>> pairwise_gameshowell(dv='Pain threshold', between='Hair color',
... data=df) # doctest: +SKIP
'''
from pingouin.external.qsturng import psturng
# Check the dataframe
_check_dataframe(dv=dv, between=between, effects='between', data=data)
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract infos
ng = data[between].nunique()
grp = data.groupby(between)[dv]
n = grp.count().values
gmeans = grp.mean().values
gvars = grp.var().values
# Pairwise combinations
g1, g2 = np.array(list(combinations(np.arange(ng), 2))).T
mn = gmeans[g1] - gmeans[g2]
se = np.sqrt(0.5 * (gvars[g1] / n[g1] + gvars[g2] / n[g2]))
tval = mn / np.sqrt(gvars[g1] / n[g1] + gvars[g2] / n[g2])
df = (gvars[g1] / n[g1] + gvars[g2] / n[g2])**2 / \
((((gvars[g1] / n[g1])**2) / (n[g1] - 1)) +
(((gvars[g2] / n[g2])**2) / (n[g2] - 1)))
# Compute corrected p-values
pval = psturng(np.sqrt(2) * np.abs(tval), ng, df)
pval *= 0.5 if tail == 'one-sided' else 1
# Uncorrected p-values
# from scipy.stats import t
# punc = t.sf(np.abs(tval), n[g1].size + n[g2].size - 2) * 2
# Effect size
d = tval * np.sqrt(1 / n[g1] + 1 / n[g2])
ef = convert_effsize(d, 'cohen', effsize, n[g1], n[g2])
# Create dataframe
# Careful: pd.unique does NOT sort whereas numpy does
stats = pd.DataFrame({
'A': np.unique(data[between])[g1],
'B': np.unique(data[between])[g2],
'mean(A)': gmeans[g1],
'mean(B)': gmeans[g2],
'diff': mn,
'SE': se,
'tail': tail,
'T': tval,
'df': df,
'pval': pval,
'efsize': ef,
'eftype': effsize,
})
col_round = ['mean(A)', 'mean(B)', 'diff', 'SE', 'T', 'df', 'efsize']
stats[col_round] = stats[col_round].round(3)
return stats | [
"def",
"pairwise_gameshowell",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"data",
"=",
"None",
",",
"alpha",
"=",
".05",
",",
"tail",
"=",
"'two-sided'",
",",
"effsize",
"=",
"'hedges'",
")",
":",
"from",
"pingouin",
".",
"external",
"."... | Pairwise Games-Howell post-hoc test.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between: string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
alpha : float
Significance level
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
effsize : string or None
Effect size type. Available methods are ::
'none' : no effect size
'cohen' : Unbiased Cohen d
'hedges' : Hedges g
'glass': Glass delta
'eta-square' : Eta-square
'odds-ratio' : Odds ratio
'AUC' : Area Under the Curve
Returns
-------
stats : DataFrame
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'mean(A)' : Mean of first measurement
'mean(B)' : Mean of second measurement
'diff' : Mean difference
'SE' : Standard error
'tail' : indicate whether the p-values are one-sided or two-sided
'T' : T-values
'df' : adjusted degrees of freedom
'pval' : Games-Howell corrected p-values
'efsize' : effect sizes
'eftype' : type of effect size
Notes
-----
Games-Howell is very similar to the Tukey HSD post-hoc test but is much
more robust to heterogeneity of variances. While the
Tukey-HSD post-hoc is optimal after a classic one-way ANOVA, the
Games-Howell is optimal after a Welch ANOVA.
Games-Howell is not valid for repeated measures ANOVA.
Compared to the Tukey-HSD test, the Games-Howell test uses different pooled
variances for each pair of variables instead of the same pooled variance.
The T-values are defined as:
.. math::
t = \\frac{\\overline{x}_i - \\overline{x}_j}
{\\sqrt{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})}}
and the corrected degrees of freedom are:
.. math::
v = \\frac{(\\frac{s_i^2}{n_i} + \\frac{s_j^2}{n_j})^2}
{\\frac{(\\frac{s_i^2}{n_i})^2}{n_i-1} +
\\frac{(\\frac{s_j^2}{n_j})^2}{n_j-1}}
where :math:`\\overline{x}_i`, :math:`s_i^2`, and :math:`n_i`
are the mean, variance and sample size of the first group and
:math:`\\overline{x}_j`, :math:`s_j^2`, and :math:`n_j` the mean, variance
and sample size of the second group.
The p-values are then approximated using the Studentized range distribution
:math:`Q(\\sqrt2*|t_i|, r, v_i)`.
Note that the p-values might be slightly different than those obtained
using R or Matlab since the studentized range approximation is done using
the Gleason (1999) algorithm, which is more efficient and accurate than
the algorithms used in Matlab or R.
References
----------
.. [1] Games, Paul A., and John F. Howell. "Pairwise multiple comparison
procedures with unequal n’s and/or variances: a Monte Carlo study."
Journal of Educational Statistics 1.2 (1976): 113-125.
.. [2] Gleason, John R. "An accurate, non-iterative approximation for
studentized range quantiles." Computational statistics & data
analysis 31.2 (1999): 147-158.
Examples
--------
Pairwise Games-Howell post-hocs on the pain threshold dataset.
>>> from pingouin import pairwise_gameshowell, read_dataset
>>> df = read_dataset('anova')
>>> pairwise_gameshowell(dv='Pain threshold', between='Hair color',
... data=df) # doctest: +SKIP | [
"Pairwise",
"Games",
"-",
"Howell",
"post",
"-",
"hoc",
"test",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pairwise.py#L565-L722 | train | 206,654 |
raphaelvallat/pingouin | pingouin/circular.py | circ_axial | def circ_axial(alpha, n):
"""Transforms n-axial data to a common scale.
Parameters
----------
alpha : array
Sample of angles in radians
n : int
Number of modes
Returns
-------
alpha : float
Transformed angles
Notes
-----
Tranform data with multiple modes (known as axial data) to a unimodal
sample, for the purpose of certain analysis such as computation of a
mean resultant vector (see Berens 2009).
Examples
--------
Transform degrees to unimodal radians in the Berens 2009 neuro dataset.
>>> import numpy as np
>>> from pingouin import read_dataset
>>> from pingouin.circular import circ_axial
>>> df = read_dataset('circular')
>>> alpha = df['Orientation'].values
>>> alpha = circ_axial(np.deg2rad(alpha), 2)
"""
alpha = np.array(alpha)
return np.remainder(alpha * n, 2 * np.pi) | python | def circ_axial(alpha, n):
"""Transforms n-axial data to a common scale.
Parameters
----------
alpha : array
Sample of angles in radians
n : int
Number of modes
Returns
-------
alpha : float
Transformed angles
Notes
-----
Tranform data with multiple modes (known as axial data) to a unimodal
sample, for the purpose of certain analysis such as computation of a
mean resultant vector (see Berens 2009).
Examples
--------
Transform degrees to unimodal radians in the Berens 2009 neuro dataset.
>>> import numpy as np
>>> from pingouin import read_dataset
>>> from pingouin.circular import circ_axial
>>> df = read_dataset('circular')
>>> alpha = df['Orientation'].values
>>> alpha = circ_axial(np.deg2rad(alpha), 2)
"""
alpha = np.array(alpha)
return np.remainder(alpha * n, 2 * np.pi) | [
"def",
"circ_axial",
"(",
"alpha",
",",
"n",
")",
":",
"alpha",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"return",
"np",
".",
"remainder",
"(",
"alpha",
"*",
"n",
",",
"2",
"*",
"np",
".",
"pi",
")"
] | Transforms n-axial data to a common scale.
Parameters
----------
alpha : array
Sample of angles in radians
n : int
Number of modes
Returns
-------
alpha : float
Transformed angles
Notes
-----
Tranform data with multiple modes (known as axial data) to a unimodal
sample, for the purpose of certain analysis such as computation of a
mean resultant vector (see Berens 2009).
Examples
--------
Transform degrees to unimodal radians in the Berens 2009 neuro dataset.
>>> import numpy as np
>>> from pingouin import read_dataset
>>> from pingouin.circular import circ_axial
>>> df = read_dataset('circular')
>>> alpha = df['Orientation'].values
>>> alpha = circ_axial(np.deg2rad(alpha), 2) | [
"Transforms",
"n",
"-",
"axial",
"data",
"to",
"a",
"common",
"scale",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L16-L49 | train | 206,655 |
raphaelvallat/pingouin | pingouin/circular.py | circ_corrcc | def circ_corrcc(x, y, tail='two-sided'):
"""Correlation coefficient between two circular variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (expressed in radians)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Adapted from the CircStats MATLAB toolbox (Berens 2009).
Use the np.deg2rad function to convert angles from degrees to radians.
Please note that NaN are automatically removed.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(r, pval)
0.942 0.06579836070349088
"""
from scipy.stats import norm
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficient
x_sin = np.sin(x - circmean(x))
y_sin = np.sin(y - circmean(y))
# Similar to np.corrcoef(x_sin, y_sin)[0][1]
r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
# Compute T- and p-values
tval = np.sqrt((n * (x_sin**2).mean() * (y_sin**2).mean())
/ np.mean(x_sin**2 * y_sin**2)) * r
# Approximately distributed as a standard normal
pval = 2 * norm.sf(abs(tval))
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | python | def circ_corrcc(x, y, tail='two-sided'):
"""Correlation coefficient between two circular variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (expressed in radians)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Adapted from the CircStats MATLAB toolbox (Berens 2009).
Use the np.deg2rad function to convert angles from degrees to radians.
Please note that NaN are automatically removed.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(r, pval)
0.942 0.06579836070349088
"""
from scipy.stats import norm
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficient
x_sin = np.sin(x - circmean(x))
y_sin = np.sin(y - circmean(y))
# Similar to np.corrcoef(x_sin, y_sin)[0][1]
r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
# Compute T- and p-values
tval = np.sqrt((n * (x_sin**2).mean() * (y_sin**2).mean())
/ np.mean(x_sin**2 * y_sin**2)) * r
# Approximately distributed as a standard normal
pval = 2 * norm.sf(abs(tval))
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | [
"def",
"circ_corrcc",
"(",
"x",
",",
"y",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"norm",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"y",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"# Check size",
"i... | Correlation coefficient between two circular variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (expressed in radians)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Adapted from the CircStats MATLAB toolbox (Berens 2009).
Use the np.deg2rad function to convert angles from degrees to radians.
Please note that NaN are automatically removed.
Examples
--------
Compute the r and p-value of two circular variables
>>> from pingouin import circ_corrcc
>>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
>>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
>>> r, pval = circ_corrcc(x, y)
>>> print(r, pval)
0.942 0.06579836070349088 | [
"Correlation",
"coefficient",
"between",
"two",
"circular",
"variables",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L52-L115 | train | 206,656 |
raphaelvallat/pingouin | pingouin/circular.py | circ_corrcl | def circ_corrcl(x, y, tail='two-sided'):
"""Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (linear)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats)
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(r, pval)
0.109 0.9708899750629236
"""
from scipy.stats import pearsonr, chi2
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficent for sin and cos independently
rxs = pearsonr(y, np.sin(x))[0]
rxc = pearsonr(y, np.cos(x))[0]
rcs = pearsonr(np.sin(x), np.cos(x))[0]
# Compute angular-linear correlation (equ. 27.47)
r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute p-value
pval = chi2.sf(n * r**2, 2)
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | python | def circ_corrcl(x, y, tail='two-sided'):
"""Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (linear)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats)
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(r, pval)
0.109 0.9708899750629236
"""
from scipy.stats import pearsonr, chi2
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficent for sin and cos independently
rxs = pearsonr(y, np.sin(x))[0]
rxc = pearsonr(y, np.cos(x))[0]
rcs = pearsonr(np.sin(x), np.cos(x))[0]
# Compute angular-linear correlation (equ. 27.47)
r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute p-value
pval = chi2.sf(n * r**2, 2)
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | [
"def",
"circ_corrcl",
"(",
"x",
",",
"y",
",",
"tail",
"=",
"'two-sided'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"pearsonr",
",",
"chi2",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"y",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
... | Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (linear)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats)
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(r, pval)
0.109 0.9708899750629236 | [
"Correlation",
"coefficient",
"between",
"one",
"circular",
"and",
"one",
"linear",
"variable",
"random",
"variables",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L118-L178 | train | 206,657 |
raphaelvallat/pingouin | pingouin/circular.py | circ_mean | def circ_mean(alpha, w=None, axis=0):
"""Mean direction for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
axis : int
Compute along this dimension
Returns
-------
mu : float
Mean direction
Examples
--------
Mean resultant vector of circular data
>>> from pingouin import circ_mean
>>> alpha = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_mean(alpha)
1.012962445838065
"""
alpha = np.array(alpha)
if isinstance(w, (list, np.ndarray)):
w = np.array(w)
if alpha.shape != w.shape:
raise ValueError("w must have the same shape as alpha.")
else:
w = np.ones_like(alpha)
return np.angle(np.multiply(w, np.exp(1j * alpha)).sum(axis=axis)) | python | def circ_mean(alpha, w=None, axis=0):
"""Mean direction for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
axis : int
Compute along this dimension
Returns
-------
mu : float
Mean direction
Examples
--------
Mean resultant vector of circular data
>>> from pingouin import circ_mean
>>> alpha = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_mean(alpha)
1.012962445838065
"""
alpha = np.array(alpha)
if isinstance(w, (list, np.ndarray)):
w = np.array(w)
if alpha.shape != w.shape:
raise ValueError("w must have the same shape as alpha.")
else:
w = np.ones_like(alpha)
return np.angle(np.multiply(w, np.exp(1j * alpha)).sum(axis=axis)) | [
"def",
"circ_mean",
"(",
"alpha",
",",
"w",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"alpha",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"if",
"isinstance",
"(",
"w",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"w",
"=",... | Mean direction for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
axis : int
Compute along this dimension
Returns
-------
mu : float
Mean direction
Examples
--------
Mean resultant vector of circular data
>>> from pingouin import circ_mean
>>> alpha = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_mean(alpha)
1.012962445838065 | [
"Mean",
"direction",
"for",
"circular",
"data",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L181-L214 | train | 206,658 |
raphaelvallat/pingouin | pingouin/circular.py | circ_r | def circ_r(alpha, w=None, d=None, axis=0):
"""Mean resultant vector length for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
axis : int
Compute along this dimension
Returns
-------
r : float
Mean resultant length
Notes
-----
The length of the mean resultant vector is a crucial quantity for the
measurement of circular spread or hypothesis testing in directional
statistics. The closer it is to one, the more concentrated the data
sample is around the mean direction (Berens 2009).
Examples
--------
Mean resultant vector length of circular data
>>> from pingouin import circ_r
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_r(x)
0.49723034495605356
"""
alpha = np.array(alpha)
w = np.array(w) if w is not None else np.ones(alpha.shape)
if alpha.size is not w.size:
raise ValueError("Input dimensions do not match")
# Compute weighted sum of cos and sin of angles:
r = np.multiply(w, np.exp(1j * alpha)).sum(axis=axis)
# Obtain length:
r = np.abs(r) / w.sum(axis=axis)
# For data with known spacing, apply correction factor
if d is not None:
c = d / 2 / np.sin(d / 2)
r = c * r
return r | python | def circ_r(alpha, w=None, d=None, axis=0):
"""Mean resultant vector length for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
axis : int
Compute along this dimension
Returns
-------
r : float
Mean resultant length
Notes
-----
The length of the mean resultant vector is a crucial quantity for the
measurement of circular spread or hypothesis testing in directional
statistics. The closer it is to one, the more concentrated the data
sample is around the mean direction (Berens 2009).
Examples
--------
Mean resultant vector length of circular data
>>> from pingouin import circ_r
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_r(x)
0.49723034495605356
"""
alpha = np.array(alpha)
w = np.array(w) if w is not None else np.ones(alpha.shape)
if alpha.size is not w.size:
raise ValueError("Input dimensions do not match")
# Compute weighted sum of cos and sin of angles:
r = np.multiply(w, np.exp(1j * alpha)).sum(axis=axis)
# Obtain length:
r = np.abs(r) / w.sum(axis=axis)
# For data with known spacing, apply correction factor
if d is not None:
c = d / 2 / np.sin(d / 2)
r = c * r
return r | [
"def",
"circ_r",
"(",
"alpha",
",",
"w",
"=",
"None",
",",
"d",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"alpha",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"w",
"=",
"np",
".",
"array",
"(",
"w",
")",
"if",
"w",
"is",
"not",
"None",
... | Mean resultant vector length for circular data.
Parameters
----------
alpha : array
Sample of angles in radians
w : array
Number of incidences in case of binned angle data
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
axis : int
Compute along this dimension
Returns
-------
r : float
Mean resultant length
Notes
-----
The length of the mean resultant vector is a crucial quantity for the
measurement of circular spread or hypothesis testing in directional
statistics. The closer it is to one, the more concentrated the data
sample is around the mean direction (Berens 2009).
Examples
--------
Mean resultant vector length of circular data
>>> from pingouin import circ_r
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> circ_r(x)
0.49723034495605356 | [
"Mean",
"resultant",
"vector",
"length",
"for",
"circular",
"data",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L217-L270 | train | 206,659 |
raphaelvallat/pingouin | pingouin/circular.py | circ_rayleigh | def circ_rayleigh(alpha, w=None, d=None):
"""Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801)
"""
alpha = np.array(alpha)
if w is None:
r = circ_r(alpha)
n = len(alpha)
else:
if len(alpha) is not len(w):
raise ValueError("Input dimensions do not match")
r = circ_r(alpha, w, d)
n = np.sum(w)
# Compute Rayleigh's statistic
R = n * r
z = (R**2) / n
# Compute p value using approxation in Zar (1999), p. 617
pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n))
return np.round(z, 3), pval | python | def circ_rayleigh(alpha, w=None, d=None):
"""Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801)
"""
alpha = np.array(alpha)
if w is None:
r = circ_r(alpha)
n = len(alpha)
else:
if len(alpha) is not len(w):
raise ValueError("Input dimensions do not match")
r = circ_r(alpha, w, d)
n = np.sum(w)
# Compute Rayleigh's statistic
R = n * r
z = (R**2) / n
# Compute p value using approxation in Zar (1999), p. 617
pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n))
return np.round(z, 3), pval | [
"def",
"circ_rayleigh",
"(",
"alpha",
",",
"w",
"=",
"None",
",",
"d",
"=",
"None",
")",
":",
"alpha",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"if",
"w",
"is",
"None",
":",
"r",
"=",
"circ_r",
"(",
"alpha",
")",
"n",
"=",
"len",
"(",
"alp... | Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801) | [
"Rayleigh",
"test",
"for",
"non",
"-",
"uniformity",
"of",
"circular",
"data",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/circular.py#L273-L337 | train | 206,660 |
raphaelvallat/pingouin | pingouin/multicomp.py | bonf | def bonf(pvals, alpha=0.05):
"""P-values correction with Bonferroni method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
P-values adjusted for multiple hypothesis testing using the Bonferroni
procedure (= multiplied by the number of tests).
See also
--------
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
Statistical hypothesis testing is based on rejecting the null hypothesis
if the likelihood of the observed data under the null hypotheses is low.
If multiple hypotheses are tested, the chance of a rare event increases,
and therefore, the likelihood of incorrectly rejecting a null hypothesis
(i.e., making a Type I error) increases.
The Bonferroni correction compensates for that increase by testing each
individual hypothesis :math:`p_i` at a significance level of
:math:`p_i = \\alpha / n` where :math:`\\alpha` is the desired overall
alpha level and :math:`n` is the number of hypotheses. For example, if a
trial is testing :math:`n=20` hypotheses with a desired
:math:`\\alpha=0.05`, then the Bonferroni correction would test each
individual hypothesis at :math:`\\alpha=0.05/20=0.0025``.
The Bonferroni adjusted p-values are defined as:
.. math::
\\widetilde {p}_{{(i)}}= n \\cdot p_{{(i)}}
The Bonferroni correction tends to be a bit too conservative.
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- https://en.wikipedia.org/wiki/Bonferroni_correction
Examples
--------
>>> from pingouin import bonf
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = bonf(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [1. 0.015 1. 0.27 0.0015]
"""
pvals = np.asarray(pvals)
num_nan = np.isnan(pvals).sum()
pvals_corrected = pvals * (float(pvals.size) - num_nan)
pvals_corrected = np.clip(pvals_corrected, None, 1)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
return reject, pvals_corrected | python | def bonf(pvals, alpha=0.05):
"""P-values correction with Bonferroni method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
P-values adjusted for multiple hypothesis testing using the Bonferroni
procedure (= multiplied by the number of tests).
See also
--------
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
Statistical hypothesis testing is based on rejecting the null hypothesis
if the likelihood of the observed data under the null hypotheses is low.
If multiple hypotheses are tested, the chance of a rare event increases,
and therefore, the likelihood of incorrectly rejecting a null hypothesis
(i.e., making a Type I error) increases.
The Bonferroni correction compensates for that increase by testing each
individual hypothesis :math:`p_i` at a significance level of
:math:`p_i = \\alpha / n` where :math:`\\alpha` is the desired overall
alpha level and :math:`n` is the number of hypotheses. For example, if a
trial is testing :math:`n=20` hypotheses with a desired
:math:`\\alpha=0.05`, then the Bonferroni correction would test each
individual hypothesis at :math:`\\alpha=0.05/20=0.0025``.
The Bonferroni adjusted p-values are defined as:
.. math::
\\widetilde {p}_{{(i)}}= n \\cdot p_{{(i)}}
The Bonferroni correction tends to be a bit too conservative.
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- https://en.wikipedia.org/wiki/Bonferroni_correction
Examples
--------
>>> from pingouin import bonf
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = bonf(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [1. 0.015 1. 0.27 0.0015]
"""
pvals = np.asarray(pvals)
num_nan = np.isnan(pvals).sum()
pvals_corrected = pvals * (float(pvals.size) - num_nan)
pvals_corrected = np.clip(pvals_corrected, None, 1)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
return reject, pvals_corrected | [
"def",
"bonf",
"(",
"pvals",
",",
"alpha",
"=",
"0.05",
")",
":",
"pvals",
"=",
"np",
".",
"asarray",
"(",
"pvals",
")",
"num_nan",
"=",
"np",
".",
"isnan",
"(",
"pvals",
")",
".",
"sum",
"(",
")",
"pvals_corrected",
"=",
"pvals",
"*",
"(",
"floa... | P-values correction with Bonferroni method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
P-values adjusted for multiple hypothesis testing using the Bonferroni
procedure (= multiplied by the number of tests).
See also
--------
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
Statistical hypothesis testing is based on rejecting the null hypothesis
if the likelihood of the observed data under the null hypotheses is low.
If multiple hypotheses are tested, the chance of a rare event increases,
and therefore, the likelihood of incorrectly rejecting a null hypothesis
(i.e., making a Type I error) increases.
The Bonferroni correction compensates for that increase by testing each
individual hypothesis :math:`p_i` at a significance level of
:math:`p_i = \\alpha / n` where :math:`\\alpha` is the desired overall
alpha level and :math:`n` is the number of hypotheses. For example, if a
trial is testing :math:`n=20` hypotheses with a desired
:math:`\\alpha=0.05`, then the Bonferroni correction would test each
individual hypothesis at :math:`\\alpha=0.05/20=0.0025``.
The Bonferroni adjusted p-values are defined as:
.. math::
\\widetilde {p}_{{(i)}}= n \\cdot p_{{(i)}}
The Bonferroni correction tends to be a bit too conservative.
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- https://en.wikipedia.org/wiki/Bonferroni_correction
Examples
--------
>>> from pingouin import bonf
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = bonf(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [1. 0.015 1. 0.27 0.0015] | [
"P",
"-",
"values",
"correction",
"with",
"Bonferroni",
"method",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/multicomp.py#L119-L190 | train | 206,661 |
raphaelvallat/pingouin | pingouin/multicomp.py | holm | def holm(pvals, alpha=.05):
"""P-values correction with Holm method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pvals_corrected : array
P-values adjusted for multiple hypothesis testing using the Holm
procedure.
See also
--------
bonf : Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
In statistics, the Holm–Bonferroni method (also called the Holm method) is
used to counteract the problem of multiple comparisons. It is intended to
control the family-wise error rate and offers a simple test uniformly more
powerful than the Bonferroni correction.
The Holm adjusted p-values are the running maximum of the sorted p-values
divided by the corresponding increasing alpha level:
.. math::
\\frac{\\alpha}{n}, \\frac{\\alpha}{n-1}, ..., \\frac{\\alpha}{1}
where :math:`n` is the number of test.
The full mathematical formula is:
.. math::
\\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}}
\\right\\}_{{1}}
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian journal of statistics, 65-70.
- https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method
Examples
--------
>>> from pingouin import holm
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = holm(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [0.64 0.012 0.64 0.162 0.0015]
"""
# Convert to array and save original shape
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
num_nan = np.isnan(pvals).sum()
# Sort the (flattened) p-values
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
ntests = pvals.size - num_nan
# Now we adjust the p-values
pvals_corr = np.diag(pvals_sorted * np.arange(ntests, 0, -1)[..., None])
pvals_corr = np.maximum.accumulate(pvals_corr)
pvals_corr = np.clip(pvals_corr, None, 1)
# And revert to the original shape and order
pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan))
pvals_corrected = pvals_corr[sortrevind].reshape(shape_init)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
return reject, pvals_corrected | python | def holm(pvals, alpha=.05):
"""P-values correction with Holm method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pvals_corrected : array
P-values adjusted for multiple hypothesis testing using the Holm
procedure.
See also
--------
bonf : Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
In statistics, the Holm–Bonferroni method (also called the Holm method) is
used to counteract the problem of multiple comparisons. It is intended to
control the family-wise error rate and offers a simple test uniformly more
powerful than the Bonferroni correction.
The Holm adjusted p-values are the running maximum of the sorted p-values
divided by the corresponding increasing alpha level:
.. math::
\\frac{\\alpha}{n}, \\frac{\\alpha}{n-1}, ..., \\frac{\\alpha}{1}
where :math:`n` is the number of test.
The full mathematical formula is:
.. math::
\\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}}
\\right\\}_{{1}}
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian journal of statistics, 65-70.
- https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method
Examples
--------
>>> from pingouin import holm
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = holm(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [0.64 0.012 0.64 0.162 0.0015]
"""
# Convert to array and save original shape
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
num_nan = np.isnan(pvals).sum()
# Sort the (flattened) p-values
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
ntests = pvals.size - num_nan
# Now we adjust the p-values
pvals_corr = np.diag(pvals_sorted * np.arange(ntests, 0, -1)[..., None])
pvals_corr = np.maximum.accumulate(pvals_corr)
pvals_corr = np.clip(pvals_corr, None, 1)
# And revert to the original shape and order
pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan))
pvals_corrected = pvals_corr[sortrevind].reshape(shape_init)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
return reject, pvals_corrected | [
"def",
"holm",
"(",
"pvals",
",",
"alpha",
"=",
".05",
")",
":",
"# Convert to array and save original shape",
"pvals",
"=",
"np",
".",
"asarray",
"(",
"pvals",
")",
"shape_init",
"=",
"pvals",
".",
"shape",
"pvals",
"=",
"pvals",
".",
"ravel",
"(",
")",
... | P-values correction with Holm method.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pvals_corrected : array
P-values adjusted for multiple hypothesis testing using the Holm
procedure.
See also
--------
bonf : Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
Notes
-----
From Wikipedia:
In statistics, the Holm–Bonferroni method (also called the Holm method) is
used to counteract the problem of multiple comparisons. It is intended to
control the family-wise error rate and offers a simple test uniformly more
powerful than the Bonferroni correction.
The Holm adjusted p-values are the running maximum of the sorted p-values
divided by the corresponding increasing alpha level:
.. math::
\\frac{\\alpha}{n}, \\frac{\\alpha}{n-1}, ..., \\frac{\\alpha}{1}
where :math:`n` is the number of test.
The full mathematical formula is:
.. math::
\\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}}
\\right\\}_{{1}}
Note that NaN values are not taken into account in the p-values correction.
References
----------
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian journal of statistics, 65-70.
- https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method
Examples
--------
>>> from pingouin import holm
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = holm(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [0.64 0.012 0.64 0.162 0.0015] | [
"P",
"-",
"values",
"correction",
"with",
"Holm",
"method",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/multicomp.py#L193-L279 | train | 206,662 |
raphaelvallat/pingouin | pingouin/multicomp.py | multicomp | def multicomp(pvals, alpha=0.05, method='holm'):
"""P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
'bonf' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
'none' : pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
See Also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
pairwise_ttests : Pairwise post-hocs T-tests
Notes
-----
This function is similar to the `p.adjust` R function.
The correction methods include the Bonferroni correction ("bonf")
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Holm (1979) ("holm"),
Benjamini & Hochberg (1995) ("fdr_bh"), and Benjamini
& Yekutieli (2001) ("fdr_by"), respectively.
The first two methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred
over the Bonferroni correction.
The "fdr_bh" and "fdr_by" methods control the false discovery rate, i.e.
the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian Journal of Statistics, 6, 65–70.
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import multicomp
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
"""
if not isinstance(pvals, (list, np.ndarray)):
err = "pvals must be a list or a np.ndarray"
raise ValueError(err)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject, pvals_corrected = bonf(pvals, alpha=alpha)
elif method.lower() in ['h', 'holm']:
reject, pvals_corrected = holm(pvals, alpha=alpha)
elif method.lower() in ['fdr', 'fdr_bh']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_bh')
elif method.lower() in ['fdr_by']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_by')
elif method.lower() == 'none':
pvals_corrected = pvals
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
else:
raise ValueError('Multiple comparison method not recognized')
return reject, pvals_corrected | python | def multicomp(pvals, alpha=0.05, method='holm'):
"""P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
'bonf' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
'none' : pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
See Also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
pairwise_ttests : Pairwise post-hocs T-tests
Notes
-----
This function is similar to the `p.adjust` R function.
The correction methods include the Bonferroni correction ("bonf")
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Holm (1979) ("holm"),
Benjamini & Hochberg (1995) ("fdr_bh"), and Benjamini
& Yekutieli (2001) ("fdr_by"), respectively.
The first two methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred
over the Bonferroni correction.
The "fdr_bh" and "fdr_by" methods control the false discovery rate, i.e.
the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian Journal of Statistics, 6, 65–70.
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import multicomp
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
"""
if not isinstance(pvals, (list, np.ndarray)):
err = "pvals must be a list or a np.ndarray"
raise ValueError(err)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject, pvals_corrected = bonf(pvals, alpha=alpha)
elif method.lower() in ['h', 'holm']:
reject, pvals_corrected = holm(pvals, alpha=alpha)
elif method.lower() in ['fdr', 'fdr_bh']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_bh')
elif method.lower() in ['fdr_by']:
reject, pvals_corrected = fdr(pvals, alpha=alpha, method='fdr_by')
elif method.lower() == 'none':
pvals_corrected = pvals
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
else:
raise ValueError('Multiple comparison method not recognized')
return reject, pvals_corrected | [
"def",
"multicomp",
"(",
"pvals",
",",
"alpha",
"=",
"0.05",
",",
"method",
"=",
"'holm'",
")",
":",
"if",
"not",
"isinstance",
"(",
"pvals",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"err",
"=",
"\"pvals must be a list or a np.ndarray\... | P-values correction for multiple comparisons.
Parameters
----------
pvals : array_like
uncorrected p-values.
alpha : float
Significance level.
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
'bonf' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
'none' : pass-through option (no correction applied)
Returns
-------
reject : array, boolean
True for hypothesis that can be rejected for given alpha.
pvals_corrected : array
P-values corrected for multiple testing.
See Also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction
pairwise_ttests : Pairwise post-hocs T-tests
Notes
-----
This function is similar to the `p.adjust` R function.
The correction methods include the Bonferroni correction ("bonf")
in which the p-values are multiplied by the number of comparisons.
Less conservative methods are also included such as Holm (1979) ("holm"),
Benjamini & Hochberg (1995) ("fdr_bh"), and Benjamini
& Yekutieli (2001) ("fdr_by"), respectively.
The first two methods are designed to give strong control of the
family-wise error rate. Note that the Holm's method is usually preferred
over the Bonferroni correction.
The "fdr_bh" and "fdr_by" methods control the false discovery rate, i.e.
the expected proportion of false discoveries amongst the rejected
hypotheses. The false discovery rate is a less stringent condition than
the family-wise error rate, so these methods are more powerful than the
others.
References
----------
- Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi
di teste. Studi in onore del professore salvatore ortu carboni, 13-60.
- Holm, S. (1979). A simple sequentially rejective multiple test procedure.
Scandinavian Journal of Statistics, 6, 65–70.
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import multicomp
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = multicomp(pvals, method='fdr_bh')
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015] | [
"P",
"-",
"values",
"correction",
"for",
"multiple",
"comparisons",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/multicomp.py#L282-L378 | train | 206,663 |
raphaelvallat/pingouin | pingouin/reliability.py | cronbach_alpha | def cronbach_alpha(data=None, items=None, scores=None, subject=None,
remove_na=False, ci=.95):
"""Cronbach's alpha reliability measure.
Parameters
----------
data : pandas dataframe
Wide or long-format dataframe.
items : str
Column in ``data`` with the items names (long-format only).
scores : str
Column in ``data`` with the scores (long-format only).
subject : str
Column in ``data`` with the subject identifier (long-format only).
remove_na : bool
If True, remove the entire rows that contain missing values
(= listwise deletion). If False, only pairwise missing values are
removed when computing the covariance matrix. For more details, please
refer to the :py:meth:`pandas.DataFrame.cov` method.
ci : float
Confidence interval (.95 = 95%)
Returns
-------
alpha : float
Cronbach's alpha
Notes
-----
This function works with both wide and long format dataframe. If you pass a
long-format dataframe, you must also pass the ``items``, ``scores`` and
``subj`` columns (in which case the data will be converted into wide
format using the :py:meth:`pandas.DataFrame.pivot` method).
Internal consistency is usually measured with Cronbach's alpha, a statistic
calculated from the pairwise correlations between items.
Internal consistency ranges between negative infinity and one.
Coefficient alpha will be negative whenever there is greater
within-subject variability than between-subject variability.
Cronbach's :math:`\\alpha` is defined as
.. math::
\\alpha ={k \\over k-1}\\left(1-{\\sum_{{i=1}}^{k}\\sigma_{{y_{i}}}^{2}
\\over\\sigma_{x}^{2}}\\right)
where :math:`k` refers to the number of items, :math:`\\sigma_{x}^{2}`
is the variance of the observed total scores, and
:math:`\\sigma_{{y_{i}}}^{2}` the variance of component :math:`i` for
the current sample of subjects.
Another formula for Cronbach's :math:`\\alpha` is
.. math::
\\alpha = \\frac{k \\times \\bar c}{\\bar v + (k - 1) \\times \\bar c}
where :math:`\\bar c` refers to the average of all covariances between
items and :math:`\\bar v` to the average variance of each item.
95% confidence intervals are calculated using Feldt's method:
.. math::
c_L = 1 - (1 - \\alpha) \\cdot F_{(0.025, n-1, (n-1)(k-1))}
c_U = 1 - (1 - \\alpha) \\cdot F_{(0.975, n-1, (n-1)(k-1))}
where :math:`n` is the number of subjects and :math:`k` the number of
items.
Results have been tested against the R package psych.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cronbach%27s_alpha
.. [2] http://www.real-statistics.com/reliability/cronbachs-alpha/
.. [3] https://cran.r-project.org/web/packages/psych/psych.pdf
.. [4] Feldt, Leonard S., Woodruff, David J., & Salih, Fathi A. (1987).
Statistical inference for coefficient alpha. Applied Psychological
Measurement, 11(1):93-103.
Examples
--------
Binary wide-format dataframe (with missing values)
>>> import pingouin as pg
>>> data = pg.read_dataset('cronbach_wide_missing')
>>> # In R: psych:alpha(data, use="pairwise")
>>> pg.cronbach_alpha(data=data)
(0.732661, array([0.435, 0.909]))
After listwise deletion of missing values (remove the entire rows)
>>> # In R: psych:alpha(data, use="complete.obs")
>>> pg.cronbach_alpha(data=data, remove_na=True)
(0.801695, array([0.581, 0.933]))
After imputing the missing values with the median of each column
>>> pg.cronbach_alpha(data=data.fillna(data.median()))
(0.738019, array([0.447, 0.911]))
Likert-type long-format dataframe
>>> data = pg.read_dataset('cronbach_alpha')
>>> pg.cronbach_alpha(data=data, items='Items', scores='Scores',
... subject='Subj')
(0.591719, array([0.195, 0.84 ]))
"""
# Safety check
assert isinstance(data, pd.DataFrame), 'data must be a dataframe.'
if all([v is not None for v in [items, scores, subject]]):
# Data in long-format: we first convert to a wide format
data = data.pivot(index=subject, values=scores, columns=items)
# From now we assume that data is in wide format
n, k = data.shape
assert k >= 2, 'At least two items are required.'
assert n >= 2, 'At least two raters/subjects are required.'
err = 'All columns must be numeric.'
assert all([data[c].dtype.kind in 'bfi' for c in data.columns]), err
if data.isna().any().any() and remove_na:
# In R = psych:alpha(data, use="complete.obs")
data = data.dropna(axis=0, how='any')
# Compute covariance matrix and Cronbach's alpha
C = data.cov()
cronbach = (k / (k - 1)) * (1 - np.trace(C) / C.sum().sum())
# which is equivalent to
# v = np.diag(C).mean()
# c = C.values[np.tril_indices_from(C, k=-1)].mean()
# cronbach = (k * c) / (v + (k - 1) * c)
# Confidence intervals
alpha = 1 - ci
df1 = n - 1
df2 = df1 * (k - 1)
lower = 1 - (1 - cronbach) * f.isf(alpha / 2, df1, df2)
upper = 1 - (1 - cronbach) * f.isf(1 - alpha / 2, df1, df2)
return round(cronbach, 6), np.round([lower, upper], 3) | python | def cronbach_alpha(data=None, items=None, scores=None, subject=None,
remove_na=False, ci=.95):
"""Cronbach's alpha reliability measure.
Parameters
----------
data : pandas dataframe
Wide or long-format dataframe.
items : str
Column in ``data`` with the items names (long-format only).
scores : str
Column in ``data`` with the scores (long-format only).
subject : str
Column in ``data`` with the subject identifier (long-format only).
remove_na : bool
If True, remove the entire rows that contain missing values
(= listwise deletion). If False, only pairwise missing values are
removed when computing the covariance matrix. For more details, please
refer to the :py:meth:`pandas.DataFrame.cov` method.
ci : float
Confidence interval (.95 = 95%)
Returns
-------
alpha : float
Cronbach's alpha
Notes
-----
This function works with both wide and long format dataframe. If you pass a
long-format dataframe, you must also pass the ``items``, ``scores`` and
``subj`` columns (in which case the data will be converted into wide
format using the :py:meth:`pandas.DataFrame.pivot` method).
Internal consistency is usually measured with Cronbach's alpha, a statistic
calculated from the pairwise correlations between items.
Internal consistency ranges between negative infinity and one.
Coefficient alpha will be negative whenever there is greater
within-subject variability than between-subject variability.
Cronbach's :math:`\\alpha` is defined as
.. math::
\\alpha ={k \\over k-1}\\left(1-{\\sum_{{i=1}}^{k}\\sigma_{{y_{i}}}^{2}
\\over\\sigma_{x}^{2}}\\right)
where :math:`k` refers to the number of items, :math:`\\sigma_{x}^{2}`
is the variance of the observed total scores, and
:math:`\\sigma_{{y_{i}}}^{2}` the variance of component :math:`i` for
the current sample of subjects.
Another formula for Cronbach's :math:`\\alpha` is
.. math::
\\alpha = \\frac{k \\times \\bar c}{\\bar v + (k - 1) \\times \\bar c}
where :math:`\\bar c` refers to the average of all covariances between
items and :math:`\\bar v` to the average variance of each item.
95% confidence intervals are calculated using Feldt's method:
.. math::
c_L = 1 - (1 - \\alpha) \\cdot F_{(0.025, n-1, (n-1)(k-1))}
c_U = 1 - (1 - \\alpha) \\cdot F_{(0.975, n-1, (n-1)(k-1))}
where :math:`n` is the number of subjects and :math:`k` the number of
items.
Results have been tested against the R package psych.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cronbach%27s_alpha
.. [2] http://www.real-statistics.com/reliability/cronbachs-alpha/
.. [3] https://cran.r-project.org/web/packages/psych/psych.pdf
.. [4] Feldt, Leonard S., Woodruff, David J., & Salih, Fathi A. (1987).
Statistical inference for coefficient alpha. Applied Psychological
Measurement, 11(1):93-103.
Examples
--------
Binary wide-format dataframe (with missing values)
>>> import pingouin as pg
>>> data = pg.read_dataset('cronbach_wide_missing')
>>> # In R: psych:alpha(data, use="pairwise")
>>> pg.cronbach_alpha(data=data)
(0.732661, array([0.435, 0.909]))
After listwise deletion of missing values (remove the entire rows)
>>> # In R: psych:alpha(data, use="complete.obs")
>>> pg.cronbach_alpha(data=data, remove_na=True)
(0.801695, array([0.581, 0.933]))
After imputing the missing values with the median of each column
>>> pg.cronbach_alpha(data=data.fillna(data.median()))
(0.738019, array([0.447, 0.911]))
Likert-type long-format dataframe
>>> data = pg.read_dataset('cronbach_alpha')
>>> pg.cronbach_alpha(data=data, items='Items', scores='Scores',
... subject='Subj')
(0.591719, array([0.195, 0.84 ]))
"""
# Safety check
assert isinstance(data, pd.DataFrame), 'data must be a dataframe.'
if all([v is not None for v in [items, scores, subject]]):
# Data in long-format: we first convert to a wide format
data = data.pivot(index=subject, values=scores, columns=items)
# From now we assume that data is in wide format
n, k = data.shape
assert k >= 2, 'At least two items are required.'
assert n >= 2, 'At least two raters/subjects are required.'
err = 'All columns must be numeric.'
assert all([data[c].dtype.kind in 'bfi' for c in data.columns]), err
if data.isna().any().any() and remove_na:
# In R = psych:alpha(data, use="complete.obs")
data = data.dropna(axis=0, how='any')
# Compute covariance matrix and Cronbach's alpha
C = data.cov()
cronbach = (k / (k - 1)) * (1 - np.trace(C) / C.sum().sum())
# which is equivalent to
# v = np.diag(C).mean()
# c = C.values[np.tril_indices_from(C, k=-1)].mean()
# cronbach = (k * c) / (v + (k - 1) * c)
# Confidence intervals
alpha = 1 - ci
df1 = n - 1
df2 = df1 * (k - 1)
lower = 1 - (1 - cronbach) * f.isf(alpha / 2, df1, df2)
upper = 1 - (1 - cronbach) * f.isf(1 - alpha / 2, df1, df2)
return round(cronbach, 6), np.round([lower, upper], 3) | [
"def",
"cronbach_alpha",
"(",
"data",
"=",
"None",
",",
"items",
"=",
"None",
",",
"scores",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"remove_na",
"=",
"False",
",",
"ci",
"=",
".95",
")",
":",
"# Safety check",
"assert",
"isinstance",
"(",
"data... | Cronbach's alpha reliability measure.
Parameters
----------
data : pandas dataframe
Wide or long-format dataframe.
items : str
Column in ``data`` with the items names (long-format only).
scores : str
Column in ``data`` with the scores (long-format only).
subject : str
Column in ``data`` with the subject identifier (long-format only).
remove_na : bool
If True, remove the entire rows that contain missing values
(= listwise deletion). If False, only pairwise missing values are
removed when computing the covariance matrix. For more details, please
refer to the :py:meth:`pandas.DataFrame.cov` method.
ci : float
Confidence interval (.95 = 95%)
Returns
-------
alpha : float
Cronbach's alpha
Notes
-----
This function works with both wide and long format dataframe. If you pass a
long-format dataframe, you must also pass the ``items``, ``scores`` and
``subj`` columns (in which case the data will be converted into wide
format using the :py:meth:`pandas.DataFrame.pivot` method).
Internal consistency is usually measured with Cronbach's alpha, a statistic
calculated from the pairwise correlations between items.
Internal consistency ranges between negative infinity and one.
Coefficient alpha will be negative whenever there is greater
within-subject variability than between-subject variability.
Cronbach's :math:`\\alpha` is defined as
.. math::
\\alpha ={k \\over k-1}\\left(1-{\\sum_{{i=1}}^{k}\\sigma_{{y_{i}}}^{2}
\\over\\sigma_{x}^{2}}\\right)
where :math:`k` refers to the number of items, :math:`\\sigma_{x}^{2}`
is the variance of the observed total scores, and
:math:`\\sigma_{{y_{i}}}^{2}` the variance of component :math:`i` for
the current sample of subjects.
Another formula for Cronbach's :math:`\\alpha` is
.. math::
\\alpha = \\frac{k \\times \\bar c}{\\bar v + (k - 1) \\times \\bar c}
where :math:`\\bar c` refers to the average of all covariances between
items and :math:`\\bar v` to the average variance of each item.
95% confidence intervals are calculated using Feldt's method:
.. math::
c_L = 1 - (1 - \\alpha) \\cdot F_{(0.025, n-1, (n-1)(k-1))}
c_U = 1 - (1 - \\alpha) \\cdot F_{(0.975, n-1, (n-1)(k-1))}
where :math:`n` is the number of subjects and :math:`k` the number of
items.
Results have been tested against the R package psych.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cronbach%27s_alpha
.. [2] http://www.real-statistics.com/reliability/cronbachs-alpha/
.. [3] https://cran.r-project.org/web/packages/psych/psych.pdf
.. [4] Feldt, Leonard S., Woodruff, David J., & Salih, Fathi A. (1987).
Statistical inference for coefficient alpha. Applied Psychological
Measurement, 11(1):93-103.
Examples
--------
Binary wide-format dataframe (with missing values)
>>> import pingouin as pg
>>> data = pg.read_dataset('cronbach_wide_missing')
>>> # In R: psych:alpha(data, use="pairwise")
>>> pg.cronbach_alpha(data=data)
(0.732661, array([0.435, 0.909]))
After listwise deletion of missing values (remove the entire rows)
>>> # In R: psych:alpha(data, use="complete.obs")
>>> pg.cronbach_alpha(data=data, remove_na=True)
(0.801695, array([0.581, 0.933]))
After imputing the missing values with the median of each column
>>> pg.cronbach_alpha(data=data.fillna(data.median()))
(0.738019, array([0.447, 0.911]))
Likert-type long-format dataframe
>>> data = pg.read_dataset('cronbach_alpha')
>>> pg.cronbach_alpha(data=data, items='Items', scores='Scores',
... subject='Subj')
(0.591719, array([0.195, 0.84 ])) | [
"Cronbach",
"s",
"alpha",
"reliability",
"measure",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/reliability.py#L8-L153 | train | 206,664 |
raphaelvallat/pingouin | pingouin/reliability.py | intraclass_corr | def intraclass_corr(data=None, groups=None, raters=None, scores=None, ci=.95):
"""Intra-class correlation coefficient.
Parameters
----------
data : pd.DataFrame
Dataframe containing the variables
groups : string
Name of column in data containing the groups.
raters : string
Name of column in data containing the raters (scorers).
scores : string
Name of column in data containing the scores (ratings).
ci : float
Confidence interval
Returns
-------
icc : float
Intraclass correlation coefficient
ci : list
Lower and upper confidence intervals
Notes
-----
The intraclass correlation (ICC) assesses the reliability of ratings by
comparing the variability of different ratings of the same subject to the
total variation across all ratings and all subjects. The ratings are
quantitative (e.g. Likert scale).
Inspired from:
http://www.real-statistics.com/reliability/intraclass-correlation/
Examples
--------
ICC of wine quality assessed by 4 judges.
>>> import pingouin as pg
>>> data = pg.read_dataset('icc')
>>> pg.intraclass_corr(data=data, groups='Wine', raters='Judge',
... scores='Scores', ci=.95)
(0.727526, array([0.434, 0.927]))
"""
from pingouin import anova
# Check dataframe
if any(v is None for v in [data, groups, raters, scores]):
raise ValueError('Data, groups, raters and scores must be specified')
assert isinstance(data, pd.DataFrame), 'Data must be a pandas dataframe.'
# Check that scores is a numeric variable
assert data[scores].dtype.kind in 'fi', 'Scores must be numeric.'
# Check that data are fully balanced
if data.groupby(raters)[scores].count().nunique() > 1:
raise ValueError('Data must be balanced.')
# Extract sizes
k = data[raters].nunique()
# n = data[groups].nunique()
# ANOVA and ICC
aov = anova(dv=scores, data=data, between=groups, detailed=True)
icc = (aov.loc[0, 'MS'] - aov.loc[1, 'MS']) / \
(aov.loc[0, 'MS'] + (k - 1) * aov.loc[1, 'MS'])
# Confidence interval
alpha = 1 - ci
df_num, df_den = aov.loc[0, 'DF'], aov.loc[1, 'DF']
f_lower = aov.loc[0, 'F'] / f.isf(alpha / 2, df_num, df_den)
f_upper = aov.loc[0, 'F'] * f.isf(alpha / 2, df_den, df_num)
lower = (f_lower - 1) / (f_lower + k - 1)
upper = (f_upper - 1) / (f_upper + k - 1)
return round(icc, 6), np.round([lower, upper], 3) | python | def intraclass_corr(data=None, groups=None, raters=None, scores=None, ci=.95):
"""Intra-class correlation coefficient.
Parameters
----------
data : pd.DataFrame
Dataframe containing the variables
groups : string
Name of column in data containing the groups.
raters : string
Name of column in data containing the raters (scorers).
scores : string
Name of column in data containing the scores (ratings).
ci : float
Confidence interval
Returns
-------
icc : float
Intraclass correlation coefficient
ci : list
Lower and upper confidence intervals
Notes
-----
The intraclass correlation (ICC) assesses the reliability of ratings by
comparing the variability of different ratings of the same subject to the
total variation across all ratings and all subjects. The ratings are
quantitative (e.g. Likert scale).
Inspired from:
http://www.real-statistics.com/reliability/intraclass-correlation/
Examples
--------
ICC of wine quality assessed by 4 judges.
>>> import pingouin as pg
>>> data = pg.read_dataset('icc')
>>> pg.intraclass_corr(data=data, groups='Wine', raters='Judge',
... scores='Scores', ci=.95)
(0.727526, array([0.434, 0.927]))
"""
from pingouin import anova
# Check dataframe
if any(v is None for v in [data, groups, raters, scores]):
raise ValueError('Data, groups, raters and scores must be specified')
assert isinstance(data, pd.DataFrame), 'Data must be a pandas dataframe.'
# Check that scores is a numeric variable
assert data[scores].dtype.kind in 'fi', 'Scores must be numeric.'
# Check that data are fully balanced
if data.groupby(raters)[scores].count().nunique() > 1:
raise ValueError('Data must be balanced.')
# Extract sizes
k = data[raters].nunique()
# n = data[groups].nunique()
# ANOVA and ICC
aov = anova(dv=scores, data=data, between=groups, detailed=True)
icc = (aov.loc[0, 'MS'] - aov.loc[1, 'MS']) / \
(aov.loc[0, 'MS'] + (k - 1) * aov.loc[1, 'MS'])
# Confidence interval
alpha = 1 - ci
df_num, df_den = aov.loc[0, 'DF'], aov.loc[1, 'DF']
f_lower = aov.loc[0, 'F'] / f.isf(alpha / 2, df_num, df_den)
f_upper = aov.loc[0, 'F'] * f.isf(alpha / 2, df_den, df_num)
lower = (f_lower - 1) / (f_lower + k - 1)
upper = (f_upper - 1) / (f_upper + k - 1)
return round(icc, 6), np.round([lower, upper], 3) | [
"def",
"intraclass_corr",
"(",
"data",
"=",
"None",
",",
"groups",
"=",
"None",
",",
"raters",
"=",
"None",
",",
"scores",
"=",
"None",
",",
"ci",
"=",
".95",
")",
":",
"from",
"pingouin",
"import",
"anova",
"# Check dataframe",
"if",
"any",
"(",
"v",
... | Intra-class correlation coefficient.
Parameters
----------
data : pd.DataFrame
Dataframe containing the variables
groups : string
Name of column in data containing the groups.
raters : string
Name of column in data containing the raters (scorers).
scores : string
Name of column in data containing the scores (ratings).
ci : float
Confidence interval
Returns
-------
icc : float
Intraclass correlation coefficient
ci : list
Lower and upper confidence intervals
Notes
-----
The intraclass correlation (ICC) assesses the reliability of ratings by
comparing the variability of different ratings of the same subject to the
total variation across all ratings and all subjects. The ratings are
quantitative (e.g. Likert scale).
Inspired from:
http://www.real-statistics.com/reliability/intraclass-correlation/
Examples
--------
ICC of wine quality assessed by 4 judges.
>>> import pingouin as pg
>>> data = pg.read_dataset('icc')
>>> pg.intraclass_corr(data=data, groups='Wine', raters='Judge',
... scores='Scores', ci=.95)
(0.727526, array([0.434, 0.927])) | [
"Intra",
"-",
"class",
"correlation",
"coefficient",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/reliability.py#L156-L228 | train | 206,665 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _func | def _func(a, p, r, v):
"""
calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v.
"""
# eq. 2.3
f = a[0]*math.log(r-1.) + \
a[1]*math.log(r-1.)**2 + \
a[2]*math.log(r-1.)**3 + \
a[3]*math.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
if r == 3:
f += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
f += 1./517. - 1./(312.*(v,1e38)[np.isinf(v)])
else:
f += 1./(191.*(v,1e38)[np.isinf(v)])
return -f | python | def _func(a, p, r, v):
"""
calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v.
"""
# eq. 2.3
f = a[0]*math.log(r-1.) + \
a[1]*math.log(r-1.)**2 + \
a[2]*math.log(r-1.)**3 + \
a[3]*math.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
if r == 3:
f += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
f += 1./517. - 1./(312.*(v,1e38)[np.isinf(v)])
else:
f += 1./(191.*(v,1e38)[np.isinf(v)])
return -f | [
"def",
"_func",
"(",
"a",
",",
"p",
",",
"r",
",",
"v",
")",
":",
"# eq. 2.3",
"f",
"=",
"a",
"[",
"0",
"]",
"*",
"math",
".",
"log",
"(",
"r",
"-",
"1.",
")",
"+",
"a",
"[",
"1",
"]",
"*",
"math",
".",
"log",
"(",
"r",
"-",
"1.",
")"... | calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v. | [
"calculates",
"f",
"-",
"hat",
"for",
"the",
"coefficients",
"in",
"a",
"probability",
"p",
"sample",
"mean",
"difference",
"r",
"and",
"degrees",
"of",
"freedom",
"v",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L460-L480 | train | 206,666 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _select_ps | def _select_ps(p):
# There are more generic ways of doing this but profiling
# revealed that selecting these points is one of the slow
# things that is easy to change. This is about 11 times
# faster than the generic algorithm it is replacing.
#
# it is possible that different break points could yield
# better estimates, but the function this is refactoring
# just used linear distance.
"""returns the points to use for interpolating p"""
if p >= .99:
return .990, .995, .999
elif p >= .975:
return .975, .990, .995
elif p >= .95:
return .950, .975, .990
elif p >= .9125:
return .900, .950, .975
elif p >= .875:
return .850, .900, .950
elif p >= .825:
return .800, .850, .900
elif p >= .7625:
return .750, .800, .850
elif p >= .675:
return .675, .750, .800
elif p >= .500:
return .500, .675, .750
else:
return .100, .500, .675 | python | def _select_ps(p):
# There are more generic ways of doing this but profiling
# revealed that selecting these points is one of the slow
# things that is easy to change. This is about 11 times
# faster than the generic algorithm it is replacing.
#
# it is possible that different break points could yield
# better estimates, but the function this is refactoring
# just used linear distance.
"""returns the points to use for interpolating p"""
if p >= .99:
return .990, .995, .999
elif p >= .975:
return .975, .990, .995
elif p >= .95:
return .950, .975, .990
elif p >= .9125:
return .900, .950, .975
elif p >= .875:
return .850, .900, .950
elif p >= .825:
return .800, .850, .900
elif p >= .7625:
return .750, .800, .850
elif p >= .675:
return .675, .750, .800
elif p >= .500:
return .500, .675, .750
else:
return .100, .500, .675 | [
"def",
"_select_ps",
"(",
"p",
")",
":",
"# There are more generic ways of doing this but profiling",
"# revealed that selecting these points is one of the slow",
"# things that is easy to change. This is about 11 times",
"# faster than the generic algorithm it is replacing.",
"#",
"# it is po... | returns the points to use for interpolating p | [
"returns",
"the",
"points",
"to",
"use",
"for",
"interpolating",
"p"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L482-L511 | train | 206,667 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _select_vs | def _select_vs(v, p):
# This one is is about 30 times faster than
# the generic algorithm it is replacing.
"""returns the points to use for interpolating v"""
if v >= 120.:
return 60, 120, inf
elif v >= 60.:
return 40, 60, 120
elif v >= 40.:
return 30, 40, 60
elif v >= 30.:
return 24, 30, 40
elif v >= 24.:
return 20, 24, 30
elif v >= 19.5:
return 19, 20, 24
if p >= .9:
if v < 2.5:
return 1, 2, 3
else:
if v < 3.5:
return 2, 3, 4
vi = int(round(v))
return vi - 1, vi, vi + 1 | python | def _select_vs(v, p):
# This one is is about 30 times faster than
# the generic algorithm it is replacing.
"""returns the points to use for interpolating v"""
if v >= 120.:
return 60, 120, inf
elif v >= 60.:
return 40, 60, 120
elif v >= 40.:
return 30, 40, 60
elif v >= 30.:
return 24, 30, 40
elif v >= 24.:
return 20, 24, 30
elif v >= 19.5:
return 19, 20, 24
if p >= .9:
if v < 2.5:
return 1, 2, 3
else:
if v < 3.5:
return 2, 3, 4
vi = int(round(v))
return vi - 1, vi, vi + 1 | [
"def",
"_select_vs",
"(",
"v",
",",
"p",
")",
":",
"# This one is is about 30 times faster than",
"# the generic algorithm it is replacing.",
"if",
"v",
">=",
"120.",
":",
"return",
"60",
",",
"120",
",",
"inf",
"elif",
"v",
">=",
"60.",
":",
"return",
"40",
"... | returns the points to use for interpolating v | [
"returns",
"the",
"points",
"to",
"use",
"for",
"interpolating",
"v"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L596-L622 | train | 206,668 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _interpolate_v | def _interpolate_v(p, r, v):
"""
interpolates v based on the values in the A table for the
scalar value of r and th
"""
# interpolate v (p should be in table)
# ordinate: y**2
# abcissa: 1./v
# find the 3 closest v values
# only p >= .9 have table values for 1 degree of freedom.
# The boolean is used to index the tuple and append 1 when
# p >= .9
v0, v1, v2 = _select_vs(v, p)
# y = f - 1.
y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2.
y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2.
y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2.
# if v2 is inf set to a big number so interpolation
# calculations will work
if v2 > 1e38: v2 = 1e38
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \
(y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = y1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
return y | python | def _interpolate_v(p, r, v):
"""
interpolates v based on the values in the A table for the
scalar value of r and th
"""
# interpolate v (p should be in table)
# ordinate: y**2
# abcissa: 1./v
# find the 3 closest v values
# only p >= .9 have table values for 1 degree of freedom.
# The boolean is used to index the tuple and append 1 when
# p >= .9
v0, v1, v2 = _select_vs(v, p)
# y = f - 1.
y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2.
y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2.
y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2.
# if v2 is inf set to a big number so interpolation
# calculations will work
if v2 > 1e38: v2 = 1e38
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \
(y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = y1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
return y | [
"def",
"_interpolate_v",
"(",
"p",
",",
"r",
",",
"v",
")",
":",
"# interpolate v (p should be in table)",
"# ordinate: y**2",
"# abcissa: 1./v",
"# find the 3 closest v values",
"# only p >= .9 have table values for 1 degree of freedom.",
"# The boolean is used to index the tuple and... | interpolates v based on the values in the A table for the
scalar value of r and th | [
"interpolates",
"v",
"based",
"on",
"the",
"values",
"in",
"the",
"A",
"table",
"for",
"the",
"scalar",
"value",
"of",
"r",
"and",
"th"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L624-L664 | train | 206,669 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _qsturng | def _qsturng(p, r, v):
"""scalar version of qsturng"""
## print 'q',p
# r is interpolated through the q to y here we only need to
# account for when p and/or v are not found in the table.
global A, p_keys, v_keys
if p < .1 or p > .999:
raise ValueError('p must be between .1 and .999')
if p < .9:
if v < 2:
raise ValueError('v must be > 2 when p < .9')
else:
if v < 1:
raise ValueError('v must be > 1 when p >= .9')
# The easy case. A tabled value is requested.
#numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' :
p = float(p)
if isinstance(v, np.ndarray):
v = v.item()
if (p,v) in A:
y = _func(A[(p,v)], p, r, v) + 1.
elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:
# find the 3 closest v values
v0, v1, v2 = _select_vs(v, p)
# find the 3 closest p values
p0, p1, p2 = _select_ps(p)
# calculate r0, r1, and r2
r0_sq = _interpolate_p(p, r, v0)**2
r1_sq = _interpolate_p(p, r, v1)**2
r2_sq = _interpolate_p(p, r, v2)**2
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \
(r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = r1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
elif v not in v_keys+([],[1])[p>=.90]:
y = _interpolate_v(p, r, v)
elif p not in p_keys:
y = _interpolate_p(p, r, v)
return math.sqrt(2) * -y * \
scipy.stats.t.isf((1. + p) / 2., max(v, 1e38)) | python | def _qsturng(p, r, v):
"""scalar version of qsturng"""
## print 'q',p
# r is interpolated through the q to y here we only need to
# account for when p and/or v are not found in the table.
global A, p_keys, v_keys
if p < .1 or p > .999:
raise ValueError('p must be between .1 and .999')
if p < .9:
if v < 2:
raise ValueError('v must be > 2 when p < .9')
else:
if v < 1:
raise ValueError('v must be > 1 when p >= .9')
# The easy case. A tabled value is requested.
#numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' :
p = float(p)
if isinstance(v, np.ndarray):
v = v.item()
if (p,v) in A:
y = _func(A[(p,v)], p, r, v) + 1.
elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:
# find the 3 closest v values
v0, v1, v2 = _select_vs(v, p)
# find the 3 closest p values
p0, p1, p2 = _select_ps(p)
# calculate r0, r1, and r2
r0_sq = _interpolate_p(p, r, v0)**2
r1_sq = _interpolate_p(p, r, v1)**2
r2_sq = _interpolate_p(p, r, v2)**2
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \
(r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = r1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
elif v not in v_keys+([],[1])[p>=.90]:
y = _interpolate_v(p, r, v)
elif p not in p_keys:
y = _interpolate_p(p, r, v)
return math.sqrt(2) * -y * \
scipy.stats.t.isf((1. + p) / 2., max(v, 1e38)) | [
"def",
"_qsturng",
"(",
"p",
",",
"r",
",",
"v",
")",
":",
"## print 'q',p",
"# r is interpolated through the q to y here we only need to",
"# account for when p and/or v are not found in the table.",
"global",
"A",
",",
"p_keys",
",",
"v_keys",
"if",
"p",
"<",
".1",
... | scalar version of qsturng | [
"scalar",
"version",
"of",
"qsturng"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L666-L725 | train | 206,670 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | qsturng | def qsturng(p, r, v):
"""Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range
"""
if all(map(_isfloat, [p, r, v])):
return _qsturng(p, r, v)
return _vqsturng(p, r, v) | python | def qsturng(p, r, v):
"""Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range
"""
if all(map(_isfloat, [p, r, v])):
return _qsturng(p, r, v)
return _vqsturng(p, r, v) | [
"def",
"qsturng",
"(",
"p",
",",
"r",
",",
"v",
")",
":",
"if",
"all",
"(",
"map",
"(",
"_isfloat",
",",
"[",
"p",
",",
"r",
",",
"v",
"]",
")",
")",
":",
"return",
"_qsturng",
"(",
"p",
",",
"r",
",",
"v",
")",
"return",
"_vqsturng",
"(",
... | Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range | [
"Approximates",
"the",
"quantile",
"p",
"for",
"a",
"studentized",
"range",
"distribution",
"having",
"v",
"degrees",
"of",
"freedom",
"and",
"r",
"samples",
"for",
"probability",
"p",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L731-L762 | train | 206,671 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | _psturng | def _psturng(q, r, v):
"""scalar version of psturng"""
if q < 0.:
raise ValueError('q should be >= 0')
opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)
if v == 1:
if q < _qsturng(.9, r, 1):
return .1
elif q > _qsturng(.999, r, 1):
return .001
return 1. - fminbound(opt_func, .9, .999, args=(r,v))
else:
if q < _qsturng(.1, r, v):
return .9
elif q > _qsturng(.999, r, v):
return .001
return 1. - fminbound(opt_func, .1, .999, args=(r,v)) | python | def _psturng(q, r, v):
"""scalar version of psturng"""
if q < 0.:
raise ValueError('q should be >= 0')
opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)
if v == 1:
if q < _qsturng(.9, r, 1):
return .1
elif q > _qsturng(.999, r, 1):
return .001
return 1. - fminbound(opt_func, .9, .999, args=(r,v))
else:
if q < _qsturng(.1, r, v):
return .9
elif q > _qsturng(.999, r, v):
return .001
return 1. - fminbound(opt_func, .1, .999, args=(r,v)) | [
"def",
"_psturng",
"(",
"q",
",",
"r",
",",
"v",
")",
":",
"if",
"q",
"<",
"0.",
":",
"raise",
"ValueError",
"(",
"'q should be >= 0'",
")",
"opt_func",
"=",
"lambda",
"p",
",",
"r",
",",
"v",
":",
"abs",
"(",
"_qsturng",
"(",
"p",
",",
"r",
",... | scalar version of psturng | [
"scalar",
"version",
"of",
"psturng"
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L764-L782 | train | 206,672 |
raphaelvallat/pingouin | pingouin/external/qsturng.py | psturng | def psturng(q, r, v):
"""Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations.
"""
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v) | python | def psturng(q, r, v):
"""Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations.
"""
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v) | [
"def",
"psturng",
"(",
"q",
",",
"r",
",",
"v",
")",
":",
"if",
"all",
"(",
"map",
"(",
"_isfloat",
",",
"[",
"q",
",",
"r",
",",
"v",
"]",
")",
")",
":",
"return",
"_psturng",
"(",
"q",
",",
"r",
",",
"v",
")",
"return",
"_vpsturng",
"(",
... | Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations. | [
"Evaluates",
"the",
"probability",
"from",
"0",
"to",
"q",
"for",
"a",
"studentized",
"range",
"having",
"v",
"degrees",
"of",
"freedom",
"and",
"r",
"samples",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L787-L818 | train | 206,673 |
raphaelvallat/pingouin | pingouin/power.py | power_anova | def power_anova(eta=None, k=None, n=None, power=None, alpha=0.05):
"""
Evaluate power, sample size, effect size or
significance level of a one-way balanced ANOVA.
Parameters
----------
eta : float
ANOVA effect size (eta-square == :math:`\\eta^2`).
k : int
Number of groups
n : int
Sample size per group. Groups are assumed to be balanced
(i.e. same sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``eta``, ``k``, ``n``, ``power`` and
``alpha`` must be passed as None, and that parameter is determined from
the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
This function is a mere Python translation of the original `pwr.anova.test`
function implemented in the `pwr` package. All credit goes to the author,
Stephane Champely.
Statistical power is the likelihood that a study will
detect an effect when there is an effect there to be detected.
A high statistical power means that there is a low probability of
concluding that there is no effect when there is one.
Statistical power is mainly affected by the effect size and the sample
size.
For one-way ANOVA, eta-square is the same as partial eta-square. It can be
evaluated from the f-value and degrees of freedom of the ANOVA using
the following formula:
.. math::
\\eta^2 = \\frac{v_1 F^*}{v_1 F^* + v_2}
Using :math:`\\eta^2` and the total sample size :math:`N`, the
non-centrality parameter is defined by:
.. math:: \\delta = N * \\frac{\\eta^2}{1 - \\eta^2}
Then the critical value of the non-central F-distribution is computed using
the percentile point function of the F-distribution with:
.. math:: q = 1 - alpha
.. math:: v_1 = k - 1
.. math:: v_2 = N - k
where :math:`k` is the number of groups.
Finally, the power of the ANOVA is calculated using the survival function
of the non-central F-distribution using the previously computed critical
value, non-centrality parameter, and degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
Results have been tested against GPower and the R pwr package.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power
>>> from pingouin import power_anova
>>> print('power: %.4f' % power_anova(eta=0.1, k=3, n=20))
power: 0.6082
2. Compute required number of groups
>>> print('k: %.4f' % power_anova(eta=0.1, n=20, power=0.80))
k: 6.0944
3. Compute required sample size
>>> print('n: %.4f' % power_anova(eta=0.1, k=3, power=0.80))
n: 29.9255
4. Compute achieved effect size
>>> print('eta: %.4f' % power_anova(n=20, k=4, power=0.80, alpha=0.05))
eta: 0.1255
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_anova(eta=0.1, n=20, k=4, power=0.80,
... alpha=None))
alpha: 0.1085
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [eta, k, n, power, alpha]])
if n_none != 1:
err = 'Exactly one of eta, k, n, power, and alpha must be None.'
raise ValueError(err)
# Safety checks
if eta is not None:
eta = abs(eta)
f_sq = eta / (1 - eta)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
def func(f_sq, k, n, power, alpha):
nc = (n * k) * f_sq
dof1 = k - 1
dof2 = (n * k) - k
fcrit = stats.f.ppf(1 - alpha, dof1, dof2)
return stats.ncf.sf(fcrit, dof1, dof2, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power
return func(f_sq, k, n, power, alpha)
elif k is None:
# Compute required number of groups
def _eval_k(k, eta, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_k, 2, 100, args=(f_sq, n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif n is None:
# Compute required sample size
def _eval_n(n, f_sq, k, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_n, 2, 1e+07, args=(f_sq, k, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif eta is None:
# Compute achieved eta
def _eval_eta(f_sq, k, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
f_sq = brenth(_eval_eta, 1e-10, 1 - 1e-10, args=(k, n, power,
alpha))
return f_sq / (f_sq + 1) # Return eta-square
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha
def _eval_alpha(alpha, f_sq, k, n, power):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(f_sq, k, n,
power))
except ValueError: # pragma: no cover
return np.nan | python | def power_anova(eta=None, k=None, n=None, power=None, alpha=0.05):
"""
Evaluate power, sample size, effect size or
significance level of a one-way balanced ANOVA.
Parameters
----------
eta : float
ANOVA effect size (eta-square == :math:`\\eta^2`).
k : int
Number of groups
n : int
Sample size per group. Groups are assumed to be balanced
(i.e. same sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``eta``, ``k``, ``n``, ``power`` and
``alpha`` must be passed as None, and that parameter is determined from
the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
This function is a mere Python translation of the original `pwr.anova.test`
function implemented in the `pwr` package. All credit goes to the author,
Stephane Champely.
Statistical power is the likelihood that a study will
detect an effect when there is an effect there to be detected.
A high statistical power means that there is a low probability of
concluding that there is no effect when there is one.
Statistical power is mainly affected by the effect size and the sample
size.
For one-way ANOVA, eta-square is the same as partial eta-square. It can be
evaluated from the f-value and degrees of freedom of the ANOVA using
the following formula:
.. math::
\\eta^2 = \\frac{v_1 F^*}{v_1 F^* + v_2}
Using :math:`\\eta^2` and the total sample size :math:`N`, the
non-centrality parameter is defined by:
.. math:: \\delta = N * \\frac{\\eta^2}{1 - \\eta^2}
Then the critical value of the non-central F-distribution is computed using
the percentile point function of the F-distribution with:
.. math:: q = 1 - alpha
.. math:: v_1 = k - 1
.. math:: v_2 = N - k
where :math:`k` is the number of groups.
Finally, the power of the ANOVA is calculated using the survival function
of the non-central F-distribution using the previously computed critical
value, non-centrality parameter, and degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
Results have been tested against GPower and the R pwr package.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power
>>> from pingouin import power_anova
>>> print('power: %.4f' % power_anova(eta=0.1, k=3, n=20))
power: 0.6082
2. Compute required number of groups
>>> print('k: %.4f' % power_anova(eta=0.1, n=20, power=0.80))
k: 6.0944
3. Compute required sample size
>>> print('n: %.4f' % power_anova(eta=0.1, k=3, power=0.80))
n: 29.9255
4. Compute achieved effect size
>>> print('eta: %.4f' % power_anova(n=20, k=4, power=0.80, alpha=0.05))
eta: 0.1255
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_anova(eta=0.1, n=20, k=4, power=0.80,
... alpha=None))
alpha: 0.1085
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [eta, k, n, power, alpha]])
if n_none != 1:
err = 'Exactly one of eta, k, n, power, and alpha must be None.'
raise ValueError(err)
# Safety checks
if eta is not None:
eta = abs(eta)
f_sq = eta / (1 - eta)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
def func(f_sq, k, n, power, alpha):
nc = (n * k) * f_sq
dof1 = k - 1
dof2 = (n * k) - k
fcrit = stats.f.ppf(1 - alpha, dof1, dof2)
return stats.ncf.sf(fcrit, dof1, dof2, nc)
# Evaluate missing variable
if power is None:
# Compute achieved power
return func(f_sq, k, n, power, alpha)
elif k is None:
# Compute required number of groups
def _eval_k(k, eta, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_k, 2, 100, args=(f_sq, n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif n is None:
# Compute required sample size
def _eval_n(n, f_sq, k, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_n, 2, 1e+07, args=(f_sq, k, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif eta is None:
# Compute achieved eta
def _eval_eta(f_sq, k, n, power, alpha):
return func(f_sq, k, n, power, alpha) - power
try:
f_sq = brenth(_eval_eta, 1e-10, 1 - 1e-10, args=(k, n, power,
alpha))
return f_sq / (f_sq + 1) # Return eta-square
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha
def _eval_alpha(alpha, f_sq, k, n, power):
return func(f_sq, k, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(f_sq, k, n,
power))
except ValueError: # pragma: no cover
return np.nan | [
"def",
"power_anova",
"(",
"eta",
"=",
"None",
",",
"k",
"=",
"None",
",",
"n",
"=",
"None",
",",
"power",
"=",
"None",
",",
"alpha",
"=",
"0.05",
")",
":",
"# Check the number of arguments that are None",
"n_none",
"=",
"sum",
"(",
"[",
"v",
"is",
"No... | Evaluate power, sample size, effect size or
significance level of a one-way balanced ANOVA.
Parameters
----------
eta : float
ANOVA effect size (eta-square == :math:`\\eta^2`).
k : int
Number of groups
n : int
Sample size per group. Groups are assumed to be balanced
(i.e. same sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
Notes
-----
Exactly ONE of the parameters ``eta``, ``k``, ``n``, ``power`` and
``alpha`` must be passed as None, and that parameter is determined from
the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
This function is a mere Python translation of the original `pwr.anova.test`
function implemented in the `pwr` package. All credit goes to the author,
Stephane Champely.
Statistical power is the likelihood that a study will
detect an effect when there is an effect there to be detected.
A high statistical power means that there is a low probability of
concluding that there is no effect when there is one.
Statistical power is mainly affected by the effect size and the sample
size.
For one-way ANOVA, eta-square is the same as partial eta-square. It can be
evaluated from the f-value and degrees of freedom of the ANOVA using
the following formula:
.. math::
\\eta^2 = \\frac{v_1 F^*}{v_1 F^* + v_2}
Using :math:`\\eta^2` and the total sample size :math:`N`, the
non-centrality parameter is defined by:
.. math:: \\delta = N * \\frac{\\eta^2}{1 - \\eta^2}
Then the critical value of the non-central F-distribution is computed using
the percentile point function of the F-distribution with:
.. math:: q = 1 - alpha
.. math:: v_1 = k - 1
.. math:: v_2 = N - k
where :math:`k` is the number of groups.
Finally, the power of the ANOVA is calculated using the survival function
of the non-central F-distribution using the previously computed critical
value, non-centrality parameter, and degrees of freedom.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
Results have been tested against GPower and the R pwr package.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power
>>> from pingouin import power_anova
>>> print('power: %.4f' % power_anova(eta=0.1, k=3, n=20))
power: 0.6082
2. Compute required number of groups
>>> print('k: %.4f' % power_anova(eta=0.1, n=20, power=0.80))
k: 6.0944
3. Compute required sample size
>>> print('n: %.4f' % power_anova(eta=0.1, k=3, power=0.80))
n: 29.9255
4. Compute achieved effect size
>>> print('eta: %.4f' % power_anova(n=20, k=4, power=0.80, alpha=0.05))
eta: 0.1255
5. Compute achieved alpha (significance)
>>> print('alpha: %.4f' % power_anova(eta=0.1, n=20, k=4, power=0.80,
... alpha=None))
alpha: 0.1085 | [
"Evaluate",
"power",
"sample",
"size",
"effect",
"size",
"or",
"significance",
"level",
"of",
"a",
"one",
"-",
"way",
"balanced",
"ANOVA",
"."
] | 58b19fa4fffbfe09d58b456e3926a148249e4d9b | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/power.py#L340-L520 | train | 206,674 |
prawn-cake/vk-requests | vk_requests/streaming.py | Stream.consume | def consume(self, timeout=None, loop=None):
"""Start consuming the stream
:param timeout: int: if it's given then it stops consumer after given
number of seconds
"""
if self._consumer_fn is None:
raise ValueError('Consumer function is not defined yet')
logger.info('Start consuming the stream')
@asyncio.coroutine
def worker(conn_url):
extra_headers = {
'Connection': 'upgrade',
'Upgrade': 'websocket',
'Sec-Websocket-Version': 13,
}
ws = yield from websockets.connect(
conn_url, extra_headers=extra_headers)
if ws is None:
raise RuntimeError("Couldn't connect to the '%s'" % conn_url)
try:
while True:
message = yield from ws.recv()
yield from self._consumer_fn(message)
finally:
yield from ws.close()
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
task = worker(conn_url=self._conn_url)
if timeout:
logger.info('Running task with timeout %s sec', timeout)
loop.run_until_complete(
asyncio.wait_for(task, timeout=timeout))
else:
loop.run_until_complete(task)
except asyncio.TimeoutError:
logger.info('Timeout is reached. Closing the loop')
loop.close()
except KeyboardInterrupt:
logger.info('Closing the loop')
loop.close() | python | def consume(self, timeout=None, loop=None):
"""Start consuming the stream
:param timeout: int: if it's given then it stops consumer after given
number of seconds
"""
if self._consumer_fn is None:
raise ValueError('Consumer function is not defined yet')
logger.info('Start consuming the stream')
@asyncio.coroutine
def worker(conn_url):
extra_headers = {
'Connection': 'upgrade',
'Upgrade': 'websocket',
'Sec-Websocket-Version': 13,
}
ws = yield from websockets.connect(
conn_url, extra_headers=extra_headers)
if ws is None:
raise RuntimeError("Couldn't connect to the '%s'" % conn_url)
try:
while True:
message = yield from ws.recv()
yield from self._consumer_fn(message)
finally:
yield from ws.close()
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
task = worker(conn_url=self._conn_url)
if timeout:
logger.info('Running task with timeout %s sec', timeout)
loop.run_until_complete(
asyncio.wait_for(task, timeout=timeout))
else:
loop.run_until_complete(task)
except asyncio.TimeoutError:
logger.info('Timeout is reached. Closing the loop')
loop.close()
except KeyboardInterrupt:
logger.info('Closing the loop')
loop.close() | [
"def",
"consume",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"loop",
"=",
"None",
")",
":",
"if",
"self",
".",
"_consumer_fn",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Consumer function is not defined yet'",
")",
"logger",
".",
"info",
"(",
"'S... | Start consuming the stream
:param timeout: int: if it's given then it stops consumer after given
number of seconds | [
"Start",
"consuming",
"the",
"stream"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L49-L98 | train | 206,675 |
prawn-cake/vk-requests | vk_requests/streaming.py | StreamingAPI.add_rule | def add_rule(self, value, tag):
"""Add a new rule
:param value: str
:param tag: str
:return: dict of a json response
"""
resp = requests.post(url=self.REQUEST_URL.format(**self._params),
json={'rule': {'value': value, 'tag': tag}})
return resp.json() | python | def add_rule(self, value, tag):
"""Add a new rule
:param value: str
:param tag: str
:return: dict of a json response
"""
resp = requests.post(url=self.REQUEST_URL.format(**self._params),
json={'rule': {'value': value, 'tag': tag}})
return resp.json() | [
"def",
"add_rule",
"(",
"self",
",",
"value",
",",
"tag",
")",
":",
"resp",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"self",
".",
"REQUEST_URL",
".",
"format",
"(",
"*",
"*",
"self",
".",
"_params",
")",
",",
"json",
"=",
"{",
"'rule'",
":"... | Add a new rule
:param value: str
:param tag: str
:return: dict of a json response | [
"Add",
"a",
"new",
"rule"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L117-L126 | train | 206,676 |
prawn-cake/vk-requests | vk_requests/streaming.py | StreamingAPI.remove_rule | def remove_rule(self, tag):
"""Remove a rule by tag
"""
resp = requests.delete(url=self.REQUEST_URL.format(**self._params),
json={'tag': tag})
return resp.json() | python | def remove_rule(self, tag):
"""Remove a rule by tag
"""
resp = requests.delete(url=self.REQUEST_URL.format(**self._params),
json={'tag': tag})
return resp.json() | [
"def",
"remove_rule",
"(",
"self",
",",
"tag",
")",
":",
"resp",
"=",
"requests",
".",
"delete",
"(",
"url",
"=",
"self",
".",
"REQUEST_URL",
".",
"format",
"(",
"*",
"*",
"self",
".",
"_params",
")",
",",
"json",
"=",
"{",
"'tag'",
":",
"tag",
"... | Remove a rule by tag | [
"Remove",
"a",
"rule",
"by",
"tag"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L132-L138 | train | 206,677 |
prawn-cake/vk-requests | vk_requests/utils.py | stringify_values | def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict | python | def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict | [
"def",
"stringify_values",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'Data must be dict. %r is passed'",
"%",
"data",
")",
"values_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"i... | Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict | [
"Coerce",
"iterable",
"values",
"to",
"val1",
"val2",
"valN"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L22-L52 | train | 206,678 |
prawn-cake/vk-requests | vk_requests/utils.py | parse_url_query_params | def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query | python | def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query | [
"def",
"parse_url_query_params",
"(",
"url",
",",
"fragment",
"=",
"True",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"url",
")",
"if",
"fragment",
":",
"url_query",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"fragment",
")",
"else",
":",
"url_query",
"=... | Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict | [
"Parse",
"url",
"query",
"params"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L55-L69 | train | 206,679 |
prawn-cake/vk-requests | vk_requests/utils.py | parse_masked_phone_number | def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result) | python | def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result) | [
"def",
"parse_masked_phone_number",
"(",
"html",
",",
"parser",
"=",
"None",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"html",
",",
"'html.parser'",
")",
"fields",
"=",
"parser",
".",
"find_all",
"(",
"... | Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple | [
"Get",
"masked",
"phone",
"number",
"from",
"security",
"check",
"html"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L99-L119 | train | 206,680 |
prawn-cake/vk-requests | vk_requests/utils.py | check_html_warnings | def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True | python | def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True | [
"def",
"check_html_warnings",
"(",
"html",
",",
"parser",
"=",
"None",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"html",
",",
"'html.parser'",
")",
"# Check warnings",
"warnings",
"=",
"parser",
".",
"fin... | Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings | [
"Check",
"html",
"warnings"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L122-L136 | train | 206,681 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession.http_session | def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session | python | def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session | [
"def",
"http_session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_http_session",
"is",
"None",
":",
"session",
"=",
"VerboseHTTPSession",
"(",
")",
"session",
".",
"headers",
".",
"update",
"(",
"self",
".",
"DEFAULT_HTTP_HEADERS",
")",
"self",
".",
"_htt... | HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance | [
"HTTP",
"Session",
"property"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L63-L72 | train | 206,682 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession.do_login | def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message) | python | def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message) | [
"def",
"do_login",
"(",
"self",
",",
"http_session",
")",
":",
"response",
"=",
"http_session",
".",
"get",
"(",
"self",
".",
"LOGIN_URL",
")",
"action_url",
"=",
"parse_form_action_url",
"(",
"response",
".",
"text",
")",
"# Stop login it action url is not found"... | Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session | [
"Do",
"vk",
"login"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L85-L134 | train | 206,683 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession.require_auth_captcha | def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response | python | def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response | [
"def",
"require_auth_captcha",
"(",
"self",
",",
"response",
",",
"query_params",
",",
"login_form_data",
",",
"http_session",
")",
":",
"logger",
".",
"info",
"(",
"'Captcha is needed. Query params: %s'",
",",
"query_params",
")",
"form_text",
"=",
"response",
".",... | Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError: | [
"Resolve",
"auth",
"captcha",
"case"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L266-L294 | train | 206,684 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession.get_captcha_key | def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually') | python | def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually') | [
"def",
"get_captcha_key",
"(",
"self",
",",
"captcha_image_url",
")",
":",
"if",
"self",
".",
"interactive",
":",
"print",
"(",
"'Open CAPTCHA image url in your browser and enter it below: '",
",",
"captcha_image_url",
")",
"captcha_key",
"=",
"raw_input",
"(",
"'Enter ... | Read CAPTCHA key from user input | [
"Read",
"CAPTCHA",
"key",
"from",
"user",
"input"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L379-L390 | train | 206,685 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession.make_request | def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response'] | python | def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response'] | [
"def",
"make_request",
"(",
"self",
",",
"request",
",",
"captcha_response",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Prepare API Method request %r'",
",",
"request",
")",
"response",
"=",
"self",
".",
"_send_api_request",
"(",
"request",
"=",
"re... | Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response | [
"Make",
"api",
"request",
"helper",
"function"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L398-L442 | train | 206,686 |
prawn-cake/vk-requests | vk_requests/session.py | VKSession._send_api_request | def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response | python | def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response | [
"def",
"_send_api_request",
"(",
"self",
",",
"request",
",",
"captcha_response",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"API_URL",
"+",
"request",
".",
"method_name",
"# Prepare request arguments",
"method_kwargs",
"=",
"{",
"'v'",
":",
"self",
".",
... | Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response | [
"Prepare",
"and",
"send",
"HTTP",
"API",
"request"
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L444-L473 | train | 206,687 |
prawn-cake/vk-requests | vk_requests/__init__.py | create_api | def create_api(app_id=None, login=None, password=None, phone_number=None,
scope='offline', api_version='5.92', http_params=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""Factory method to explicitly create API with app_id, login, password
and phone_number parameters.
If the app_id, login, password are not passed, then token-free session
will be created automatically
:param app_id: int: vk application id, more info: https://vk.com/dev/main
:param login: str: vk login
:param password: str: vk password
:param phone_number: str: phone number with country code (+71234568990)
:param scope: str or list of str: vk session scope
:param api_version: str: vk api version, check https://vk.com/dev/versions
:param interactive: bool: flag which indicates to use InteractiveVKSession
:param service_token: str: new way of querying vk api, instead of getting
oauth token
:param http_params: dict: requests http parameters passed along
:param client_secret: str: secure application key for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_supported: bool: enable two-factor authentication for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_force_sms: bool: force SMS two-factor authentication for Direct Authorization
if two_fa_supported is True, more info: https://vk.com/dev/auth_direct
:return: api instance
:rtype : vk_requests.api.API
"""
session = VKSession(app_id=app_id,
user_login=login,
user_password=password,
phone_number=phone_number,
scope=scope,
service_token=service_token,
api_version=api_version,
interactive=interactive,
client_secret=client_secret,
two_fa_supported = two_fa_supported,
two_fa_force_sms=two_fa_force_sms)
return API(session=session, http_params=http_params) | python | def create_api(app_id=None, login=None, password=None, phone_number=None,
scope='offline', api_version='5.92', http_params=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""Factory method to explicitly create API with app_id, login, password
and phone_number parameters.
If the app_id, login, password are not passed, then token-free session
will be created automatically
:param app_id: int: vk application id, more info: https://vk.com/dev/main
:param login: str: vk login
:param password: str: vk password
:param phone_number: str: phone number with country code (+71234568990)
:param scope: str or list of str: vk session scope
:param api_version: str: vk api version, check https://vk.com/dev/versions
:param interactive: bool: flag which indicates to use InteractiveVKSession
:param service_token: str: new way of querying vk api, instead of getting
oauth token
:param http_params: dict: requests http parameters passed along
:param client_secret: str: secure application key for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_supported: bool: enable two-factor authentication for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_force_sms: bool: force SMS two-factor authentication for Direct Authorization
if two_fa_supported is True, more info: https://vk.com/dev/auth_direct
:return: api instance
:rtype : vk_requests.api.API
"""
session = VKSession(app_id=app_id,
user_login=login,
user_password=password,
phone_number=phone_number,
scope=scope,
service_token=service_token,
api_version=api_version,
interactive=interactive,
client_secret=client_secret,
two_fa_supported = two_fa_supported,
two_fa_force_sms=two_fa_force_sms)
return API(session=session, http_params=http_params) | [
"def",
"create_api",
"(",
"app_id",
"=",
"None",
",",
"login",
"=",
"None",
",",
"password",
"=",
"None",
",",
"phone_number",
"=",
"None",
",",
"scope",
"=",
"'offline'",
",",
"api_version",
"=",
"'5.92'",
",",
"http_params",
"=",
"None",
",",
"interact... | Factory method to explicitly create API with app_id, login, password
and phone_number parameters.
If the app_id, login, password are not passed, then token-free session
will be created automatically
:param app_id: int: vk application id, more info: https://vk.com/dev/main
:param login: str: vk login
:param password: str: vk password
:param phone_number: str: phone number with country code (+71234568990)
:param scope: str or list of str: vk session scope
:param api_version: str: vk api version, check https://vk.com/dev/versions
:param interactive: bool: flag which indicates to use InteractiveVKSession
:param service_token: str: new way of querying vk api, instead of getting
oauth token
:param http_params: dict: requests http parameters passed along
:param client_secret: str: secure application key for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_supported: bool: enable two-factor authentication for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_force_sms: bool: force SMS two-factor authentication for Direct Authorization
if two_fa_supported is True, more info: https://vk.com/dev/auth_direct
:return: api instance
:rtype : vk_requests.api.API | [
"Factory",
"method",
"to",
"explicitly",
"create",
"API",
"with",
"app_id",
"login",
"password",
"and",
"phone_number",
"parameters",
"."
] | dde01c1ed06f13de912506163a35d8c7e06a8f62 | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/__init__.py#L21-L61 | train | 206,688 |
ggravlingen/pytradfri | pytradfri/command.py | Command.result | def result(self, value):
"""The result of the command."""
if self._process_result:
self._result = self._process_result(value)
self._raw_result = value | python | def result(self, value):
"""The result of the command."""
if self._process_result:
self._result = self._process_result(value)
self._raw_result = value | [
"def",
"result",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_process_result",
":",
"self",
".",
"_result",
"=",
"self",
".",
"_process_result",
"(",
"value",
")",
"self",
".",
"_raw_result",
"=",
"value"
] | The result of the command. | [
"The",
"result",
"of",
"the",
"command",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/command.py#L65-L70 | train | 206,689 |
ggravlingen/pytradfri | pytradfri/command.py | Command.url | def url(self, host):
"""Generate url for coap client."""
path = '/'.join(str(v) for v in self._path)
return 'coaps://{}:5684/{}'.format(host, path) | python | def url(self, host):
"""Generate url for coap client."""
path = '/'.join(str(v) for v in self._path)
return 'coaps://{}:5684/{}'.format(host, path) | [
"def",
"url",
"(",
"self",
",",
"host",
")",
":",
"path",
"=",
"'/'",
".",
"join",
"(",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"self",
".",
"_path",
")",
"return",
"'coaps://{}:5684/{}'",
".",
"format",
"(",
"host",
",",
"path",
")"
] | Generate url for coap client. | [
"Generate",
"url",
"for",
"coap",
"client",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/command.py#L72-L75 | train | 206,690 |
ggravlingen/pytradfri | pytradfri/command.py | Command._merge | def _merge(self, a, b):
"""Merges a into b."""
for k, v in a.items():
if isinstance(v, dict):
item = b.setdefault(k, {})
self._merge(v, item)
elif isinstance(v, list):
item = b.setdefault(k, [{}])
if len(v) == 1 and isinstance(v[0], dict):
self._merge(v[0], item[0])
else:
b[k] = v
else:
b[k] = v
return b | python | def _merge(self, a, b):
"""Merges a into b."""
for k, v in a.items():
if isinstance(v, dict):
item = b.setdefault(k, {})
self._merge(v, item)
elif isinstance(v, list):
item = b.setdefault(k, [{}])
if len(v) == 1 and isinstance(v[0], dict):
self._merge(v[0], item[0])
else:
b[k] = v
else:
b[k] = v
return b | [
"def",
"_merge",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"for",
"k",
",",
"v",
"in",
"a",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"item",
"=",
"b",
".",
"setdefault",
"(",
"k",
",",
"{",
"}",
"... | Merges a into b. | [
"Merges",
"a",
"into",
"b",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/command.py#L77-L91 | train | 206,691 |
ggravlingen/pytradfri | pytradfri/command.py | Command.combine_data | def combine_data(self, command2):
"""Combines the data for this command with another."""
if command2 is None:
return
self._data = self._merge(command2._data, self._data) | python | def combine_data(self, command2):
"""Combines the data for this command with another."""
if command2 is None:
return
self._data = self._merge(command2._data, self._data) | [
"def",
"combine_data",
"(",
"self",
",",
"command2",
")",
":",
"if",
"command2",
"is",
"None",
":",
"return",
"self",
".",
"_data",
"=",
"self",
".",
"_merge",
"(",
"command2",
".",
"_data",
",",
"self",
".",
"_data",
")"
] | Combines the data for this command with another. | [
"Combines",
"the",
"data",
"for",
"this",
"command",
"with",
"another",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/command.py#L93-L97 | train | 206,692 |
ggravlingen/pytradfri | pytradfri/util.py | load_json | def load_json(filename: str) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding='utf-8') as fdesc:
return json.loads(fdesc.read())
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug('JSON file not found: %s', filename)
except ValueError as error:
_LOGGER.exception('Could not parse JSON content: %s', filename)
raise PytradfriError(error)
except OSError as error:
_LOGGER.exception('JSON file reading failed: %s', filename)
raise PytradfriError(error)
return {} | python | def load_json(filename: str) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding='utf-8') as fdesc:
return json.loads(fdesc.read())
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug('JSON file not found: %s', filename)
except ValueError as error:
_LOGGER.exception('Could not parse JSON content: %s', filename)
raise PytradfriError(error)
except OSError as error:
_LOGGER.exception('JSON file reading failed: %s', filename)
raise PytradfriError(error)
return {} | [
"def",
"load_json",
"(",
"filename",
":",
"str",
")",
"->",
"Union",
"[",
"List",
",",
"Dict",
"]",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fdesc",
":",
"return",
"json",
".",
"loads",
"(",
"fd... | Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found. | [
"Load",
"JSON",
"data",
"from",
"a",
"file",
"and",
"return",
"as",
"dict",
"or",
"list",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L12-L29 | train | 206,693 |
ggravlingen/pytradfri | pytradfri/util.py | save_json | def save_json(filename: str, config: Union[List, Dict]):
"""Save JSON data to a file.
Returns True on success.
"""
try:
data = json.dumps(config, sort_keys=True, indent=4)
with open(filename, 'w', encoding='utf-8') as fdesc:
fdesc.write(data)
return True
except TypeError as error:
_LOGGER.exception('Failed to serialize to JSON: %s',
filename)
raise PytradfriError(error)
except OSError as error:
_LOGGER.exception('Saving JSON file failed: %s',
filename)
raise PytradfriError(error) | python | def save_json(filename: str, config: Union[List, Dict]):
"""Save JSON data to a file.
Returns True on success.
"""
try:
data = json.dumps(config, sort_keys=True, indent=4)
with open(filename, 'w', encoding='utf-8') as fdesc:
fdesc.write(data)
return True
except TypeError as error:
_LOGGER.exception('Failed to serialize to JSON: %s',
filename)
raise PytradfriError(error)
except OSError as error:
_LOGGER.exception('Saving JSON file failed: %s',
filename)
raise PytradfriError(error) | [
"def",
"save_json",
"(",
"filename",
":",
"str",
",",
"config",
":",
"Union",
"[",
"List",
",",
"Dict",
"]",
")",
":",
"try",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"config",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"wit... | Save JSON data to a file.
Returns True on success. | [
"Save",
"JSON",
"data",
"to",
"a",
"file",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L32-L49 | train | 206,694 |
ggravlingen/pytradfri | pytradfri/util.py | BitChoices.get_selected_keys | def get_selected_keys(self, selection):
"""Return a list of keys for the given selection."""
return [k for k, b in self._lookup.items() if b & selection] | python | def get_selected_keys(self, selection):
"""Return a list of keys for the given selection."""
return [k for k, b in self._lookup.items() if b & selection] | [
"def",
"get_selected_keys",
"(",
"self",
",",
"selection",
")",
":",
"return",
"[",
"k",
"for",
"k",
",",
"b",
"in",
"self",
".",
"_lookup",
".",
"items",
"(",
")",
"if",
"b",
"&",
"selection",
"]"
] | Return a list of keys for the given selection. | [
"Return",
"a",
"list",
"of",
"keys",
"for",
"the",
"given",
"selection",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L82-L84 | train | 206,695 |
ggravlingen/pytradfri | pytradfri/util.py | BitChoices.get_selected_values | def get_selected_values(self, selection):
"""Return a list of values for the given selection."""
return [v for b, v in self._choices if b & selection] | python | def get_selected_values(self, selection):
"""Return a list of values for the given selection."""
return [v for b, v in self._choices if b & selection] | [
"def",
"get_selected_values",
"(",
"self",
",",
"selection",
")",
":",
"return",
"[",
"v",
"for",
"b",
",",
"v",
"in",
"self",
".",
"_choices",
"if",
"b",
"&",
"selection",
"]"
] | Return a list of values for the given selection. | [
"Return",
"a",
"list",
"of",
"values",
"for",
"the",
"given",
"selection",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L86-L88 | train | 206,696 |
ggravlingen/pytradfri | pytradfri/api/libcoap_api.py | retry_timeout | def retry_timeout(api, retries=3):
"""Retry API call when a timeout occurs."""
@wraps(api)
def retry_api(*args, **kwargs):
"""Retrying API."""
for i in range(1, retries + 1):
try:
return api(*args, **kwargs)
except RequestTimeout:
if i == retries:
raise
return retry_api | python | def retry_timeout(api, retries=3):
"""Retry API call when a timeout occurs."""
@wraps(api)
def retry_api(*args, **kwargs):
"""Retrying API."""
for i in range(1, retries + 1):
try:
return api(*args, **kwargs)
except RequestTimeout:
if i == retries:
raise
return retry_api | [
"def",
"retry_timeout",
"(",
"api",
",",
"retries",
"=",
"3",
")",
":",
"@",
"wraps",
"(",
"api",
")",
"def",
"retry_api",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Retrying API.\"\"\"",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"... | Retry API call when a timeout occurs. | [
"Retry",
"API",
"call",
"when",
"a",
"timeout",
"occurs",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/libcoap_api.py#L198-L210 | train | 206,697 |
ggravlingen/pytradfri | pytradfri/api/libcoap_api.py | APIFactory.request | def request(self, api_commands, *, timeout=None):
"""Make a request. Timeout is in seconds."""
if not isinstance(api_commands, list):
return self._execute(api_commands, timeout=timeout)
command_results = []
for api_command in api_commands:
result = self._execute(api_command, timeout=timeout)
command_results.append(result)
return command_results | python | def request(self, api_commands, *, timeout=None):
"""Make a request. Timeout is in seconds."""
if not isinstance(api_commands, list):
return self._execute(api_commands, timeout=timeout)
command_results = []
for api_command in api_commands:
result = self._execute(api_command, timeout=timeout)
command_results.append(result)
return command_results | [
"def",
"request",
"(",
"self",
",",
"api_commands",
",",
"*",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"api_commands",
",",
"list",
")",
":",
"return",
"self",
".",
"_execute",
"(",
"api_commands",
",",
"timeout",
"=",
"ti... | Make a request. Timeout is in seconds. | [
"Make",
"a",
"request",
".",
"Timeout",
"is",
"in",
"seconds",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/libcoap_api.py#L93-L104 | train | 206,698 |
ggravlingen/pytradfri | pytradfri/smart_task.py | SmartTask.task_start_time | def task_start_time(self):
"""Return the time the task starts.
Time is set according to iso8601.
"""
return datetime.time(
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR],
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN]) | python | def task_start_time(self):
"""Return the time the task starts.
Time is set according to iso8601.
"""
return datetime.time(
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR],
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN]) | [
"def",
"task_start_time",
"(",
"self",
")",
":",
"return",
"datetime",
".",
"time",
"(",
"self",
".",
"task_start_parameters",
"[",
"ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR",
"]",
",",
"self",
".",
"task_start_parameters",
"[",
"ATTR_SMART_TASK_TRIGGER_TIME_START_MIN",
... | Return the time the task starts.
Time is set according to iso8601. | [
"Return",
"the",
"time",
"the",
"task",
"starts",
"."
] | 63750fa8fb27158c013d24865cdaa7fb82b3ab53 | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/smart_task.py#L118-L127 | train | 206,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.