repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
data-8/datascience | datascience/tables.py | Table._with_columns | def _with_columns(self, columns):
"""Create a table from a sequence of columns, copying column labels."""
table = type(self)()
for label, column in zip(self.labels, columns):
self._add_column_and_format(table, label, column)
return table | python | def _with_columns(self, columns):
"""Create a table from a sequence of columns, copying column labels."""
table = type(self)()
for label, column in zip(self.labels, columns):
self._add_column_and_format(table, label, column)
return table | [
"def",
"_with_columns",
"(",
"self",
",",
"columns",
")",
":",
"table",
"=",
"type",
"(",
"self",
")",
"(",
")",
"for",
"label",
",",
"column",
"in",
"zip",
"(",
"self",
".",
"labels",
",",
"columns",
")",
":",
"self",
".",
"_add_column_and_format",
... | Create a table from a sequence of columns, copying column labels. | [
"Create",
"a",
"table",
"from",
"a",
"sequence",
"of",
"columns",
"copying",
"column",
"labels",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L135-L140 | train | 204,600 |
data-8/datascience | datascience/tables.py | Table._add_column_and_format | def _add_column_and_format(self, table, label, column):
"""Add a column to table, copying the formatter from self."""
label = self._as_label(label)
table[label] = column
if label in self._formats:
table._formats[label] = self._formats[label] | python | def _add_column_and_format(self, table, label, column):
"""Add a column to table, copying the formatter from self."""
label = self._as_label(label)
table[label] = column
if label in self._formats:
table._formats[label] = self._formats[label] | [
"def",
"_add_column_and_format",
"(",
"self",
",",
"table",
",",
"label",
",",
"column",
")",
":",
"label",
"=",
"self",
".",
"_as_label",
"(",
"label",
")",
"table",
"[",
"label",
"]",
"=",
"column",
"if",
"label",
"in",
"self",
".",
"_formats",
":",
... | Add a column to table, copying the formatter from self. | [
"Add",
"a",
"column",
"to",
"table",
"copying",
"the",
"formatter",
"from",
"self",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L142-L147 | train | 204,601 |
data-8/datascience | datascience/tables.py | Table.from_df | def from_df(cls, df):
"""Convert a Pandas DataFrame into a Table."""
t = cls()
labels = df.columns
for label in df.columns:
t.append_column(label, df[label])
return t | python | def from_df(cls, df):
"""Convert a Pandas DataFrame into a Table."""
t = cls()
labels = df.columns
for label in df.columns:
t.append_column(label, df[label])
return t | [
"def",
"from_df",
"(",
"cls",
",",
"df",
")",
":",
"t",
"=",
"cls",
"(",
")",
"labels",
"=",
"df",
".",
"columns",
"for",
"label",
"in",
"df",
".",
"columns",
":",
"t",
".",
"append_column",
"(",
"label",
",",
"df",
"[",
"label",
"]",
")",
"ret... | Convert a Pandas DataFrame into a Table. | [
"Convert",
"a",
"Pandas",
"DataFrame",
"into",
"a",
"Table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L150-L156 | train | 204,602 |
data-8/datascience | datascience/tables.py | Table.from_array | def from_array(cls, arr):
"""Convert a structured NumPy array into a Table."""
return cls().with_columns([(f, arr[f]) for f in arr.dtype.names]) | python | def from_array(cls, arr):
"""Convert a structured NumPy array into a Table."""
return cls().with_columns([(f, arr[f]) for f in arr.dtype.names]) | [
"def",
"from_array",
"(",
"cls",
",",
"arr",
")",
":",
"return",
"cls",
"(",
")",
".",
"with_columns",
"(",
"[",
"(",
"f",
",",
"arr",
"[",
"f",
"]",
")",
"for",
"f",
"in",
"arr",
".",
"dtype",
".",
"names",
"]",
")"
] | Convert a structured NumPy array into a Table. | [
"Convert",
"a",
"structured",
"NumPy",
"array",
"into",
"a",
"Table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L159-L161 | train | 204,603 |
data-8/datascience | datascience/tables.py | Table.column | def column(self, index_or_label):
"""Return the values of a column as an array.
table.column(label) is equivalent to table[label].
>>> tiles = Table().with_columns(
... 'letter', make_array('c', 'd'),
... 'count', make_array(2, 4),
... )
>>> list(tiles.column('letter'))
['c', 'd']
>>> tiles.column(1)
array([2, 4])
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
if (isinstance(index_or_label, str)
and index_or_label not in self.labels):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(self.labels))
)
if (isinstance(index_or_label, int)
and not 0 <= index_or_label < len(self.labels)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(self.labels) - 1)
)
return self._columns[self._as_label(index_or_label)] | python | def column(self, index_or_label):
"""Return the values of a column as an array.
table.column(label) is equivalent to table[label].
>>> tiles = Table().with_columns(
... 'letter', make_array('c', 'd'),
... 'count', make_array(2, 4),
... )
>>> list(tiles.column('letter'))
['c', 'd']
>>> tiles.column(1)
array([2, 4])
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
if (isinstance(index_or_label, str)
and index_or_label not in self.labels):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(self.labels))
)
if (isinstance(index_or_label, int)
and not 0 <= index_or_label < len(self.labels)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(self.labels) - 1)
)
return self._columns[self._as_label(index_or_label)] | [
"def",
"column",
"(",
"self",
",",
"index_or_label",
")",
":",
"if",
"(",
"isinstance",
"(",
"index_or_label",
",",
"str",
")",
"and",
"index_or_label",
"not",
"in",
"self",
".",
"labels",
")",
":",
"raise",
"ValueError",
"(",
"'The column \"{}\" is not in the... | Return the values of a column as an array.
table.column(label) is equivalent to table[label].
>>> tiles = Table().with_columns(
... 'letter', make_array('c', 'd'),
... 'count', make_array(2, 4),
... )
>>> list(tiles.column('letter'))
['c', 'd']
>>> tiles.column(1)
array([2, 4])
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table. | [
"Return",
"the",
"values",
"of",
"a",
"column",
"as",
"an",
"array",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L246-L285 | train | 204,604 |
data-8/datascience | datascience/tables.py | Table.values | def values(self):
"""Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`.
"""
dtypes = [col.dtype for col in self.columns]
if len(set(dtypes)) > 1:
dtype = object
else:
dtype = None
return np.array(self.columns, dtype=dtype).T | python | def values(self):
"""Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`.
"""
dtypes = [col.dtype for col in self.columns]
if len(set(dtypes)) > 1:
dtype = object
else:
dtype = None
return np.array(self.columns, dtype=dtype).T | [
"def",
"values",
"(",
"self",
")",
":",
"dtypes",
"=",
"[",
"col",
".",
"dtype",
"for",
"col",
"in",
"self",
".",
"columns",
"]",
"if",
"len",
"(",
"set",
"(",
"dtypes",
")",
")",
">",
"1",
":",
"dtype",
"=",
"object",
"else",
":",
"dtype",
"="... | Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`. | [
"Return",
"data",
"in",
"self",
"as",
"a",
"numpy",
"array",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L288-L300 | train | 204,605 |
data-8/datascience | datascience/tables.py | Table.apply | def apply(self, fn, *column_or_columns):
"""Apply ``fn`` to each element or elements of ``column_or_columns``.
If no ``column_or_columns`` provided, `fn`` is applied to each row.
Args:
``fn`` (function) -- The function to apply.
``column_or_columns``: Columns containing the arguments to ``fn``
as either column labels (``str``) or column indices (``int``).
The number of columns must match the number of arguments
that ``fn`` expects.
Raises:
``ValueError`` -- if ``column_label`` is not an existing
column in the table.
``TypeError`` -- if insufficent number of ``column_label`` passed
to ``fn``.
Returns:
An array consisting of results of applying ``fn`` to elements
specified by ``column_label`` in each row.
>>> t = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> t.apply(lambda x: x - 1, 'points')
array([0, 1, 1, 9])
>>> t.apply(lambda x, y: x * y, 'count', 'points')
array([ 9, 6, 6, 10])
>>> t.apply(lambda x: x - 1, 'count', 'points')
Traceback (most recent call last):
...
TypeError: <lambda>() takes 1 positional argument but 2 were given
>>> t.apply(lambda x: x - 1, 'counts')
Traceback (most recent call last):
...
ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points
Whole rows are passed to the function if no columns are specified.
>>> t.apply(lambda row: row[1] * 2)
array([18, 6, 6, 2])
"""
if not column_or_columns:
return np.array([fn(row) for row in self.rows])
else:
if len(column_or_columns) == 1 and \
_is_non_string_iterable(column_or_columns[0]):
warnings.warn(
"column lists are deprecated; pass each as an argument", FutureWarning)
column_or_columns = column_or_columns[0]
rows = zip(*self.select(*column_or_columns).columns)
return np.array([fn(*row) for row in rows]) | python | def apply(self, fn, *column_or_columns):
"""Apply ``fn`` to each element or elements of ``column_or_columns``.
If no ``column_or_columns`` provided, `fn`` is applied to each row.
Args:
``fn`` (function) -- The function to apply.
``column_or_columns``: Columns containing the arguments to ``fn``
as either column labels (``str``) or column indices (``int``).
The number of columns must match the number of arguments
that ``fn`` expects.
Raises:
``ValueError`` -- if ``column_label`` is not an existing
column in the table.
``TypeError`` -- if insufficent number of ``column_label`` passed
to ``fn``.
Returns:
An array consisting of results of applying ``fn`` to elements
specified by ``column_label`` in each row.
>>> t = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> t.apply(lambda x: x - 1, 'points')
array([0, 1, 1, 9])
>>> t.apply(lambda x, y: x * y, 'count', 'points')
array([ 9, 6, 6, 10])
>>> t.apply(lambda x: x - 1, 'count', 'points')
Traceback (most recent call last):
...
TypeError: <lambda>() takes 1 positional argument but 2 were given
>>> t.apply(lambda x: x - 1, 'counts')
Traceback (most recent call last):
...
ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points
Whole rows are passed to the function if no columns are specified.
>>> t.apply(lambda row: row[1] * 2)
array([18, 6, 6, 2])
"""
if not column_or_columns:
return np.array([fn(row) for row in self.rows])
else:
if len(column_or_columns) == 1 and \
_is_non_string_iterable(column_or_columns[0]):
warnings.warn(
"column lists are deprecated; pass each as an argument", FutureWarning)
column_or_columns = column_or_columns[0]
rows = zip(*self.select(*column_or_columns).columns)
return np.array([fn(*row) for row in rows]) | [
"def",
"apply",
"(",
"self",
",",
"fn",
",",
"*",
"column_or_columns",
")",
":",
"if",
"not",
"column_or_columns",
":",
"return",
"np",
".",
"array",
"(",
"[",
"fn",
"(",
"row",
")",
"for",
"row",
"in",
"self",
".",
"rows",
"]",
")",
"else",
":",
... | Apply ``fn`` to each element or elements of ``column_or_columns``.
If no ``column_or_columns`` provided, `fn`` is applied to each row.
Args:
``fn`` (function) -- The function to apply.
``column_or_columns``: Columns containing the arguments to ``fn``
as either column labels (``str``) or column indices (``int``).
The number of columns must match the number of arguments
that ``fn`` expects.
Raises:
``ValueError`` -- if ``column_label`` is not an existing
column in the table.
``TypeError`` -- if insufficent number of ``column_label`` passed
to ``fn``.
Returns:
An array consisting of results of applying ``fn`` to elements
specified by ``column_label`` in each row.
>>> t = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> t.apply(lambda x: x - 1, 'points')
array([0, 1, 1, 9])
>>> t.apply(lambda x, y: x * y, 'count', 'points')
array([ 9, 6, 6, 10])
>>> t.apply(lambda x: x - 1, 'count', 'points')
Traceback (most recent call last):
...
TypeError: <lambda>() takes 1 positional argument but 2 were given
>>> t.apply(lambda x: x - 1, 'counts')
Traceback (most recent call last):
...
ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points
Whole rows are passed to the function if no columns are specified.
>>> t.apply(lambda row: row[1] * 2)
array([18, 6, 6, 2]) | [
"Apply",
"fn",
"to",
"each",
"element",
"or",
"elements",
"of",
"column_or_columns",
".",
"If",
"no",
"column_or_columns",
"provided",
"fn",
"is",
"applied",
"to",
"each",
"row",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L306-L364 | train | 204,606 |
data-8/datascience | datascience/tables.py | Table.set_format | def set_format(self, column_or_columns, formatter):
"""Set the format of a column."""
if inspect.isclass(formatter):
formatter = formatter()
if callable(formatter) and not hasattr(formatter, 'format_column'):
formatter = _formats.FunctionFormatter(formatter)
if not hasattr(formatter, 'format_column'):
raise Exception('Expected Formatter or function: ' + str(formatter))
for label in self._as_labels(column_or_columns):
if formatter.converts_values:
self[label] = formatter.convert_column(self[label])
self._formats[label] = formatter
return self | python | def set_format(self, column_or_columns, formatter):
"""Set the format of a column."""
if inspect.isclass(formatter):
formatter = formatter()
if callable(formatter) and not hasattr(formatter, 'format_column'):
formatter = _formats.FunctionFormatter(formatter)
if not hasattr(formatter, 'format_column'):
raise Exception('Expected Formatter or function: ' + str(formatter))
for label in self._as_labels(column_or_columns):
if formatter.converts_values:
self[label] = formatter.convert_column(self[label])
self._formats[label] = formatter
return self | [
"def",
"set_format",
"(",
"self",
",",
"column_or_columns",
",",
"formatter",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"formatter",
")",
":",
"formatter",
"=",
"formatter",
"(",
")",
"if",
"callable",
"(",
"formatter",
")",
"and",
"not",
"hasattr",
... | Set the format of a column. | [
"Set",
"the",
"format",
"of",
"a",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L370-L382 | train | 204,607 |
data-8/datascience | datascience/tables.py | Table.move_to_start | def move_to_start(self, column_label):
"""Move a column to the first in order."""
self._columns.move_to_end(column_label, last=False)
return self | python | def move_to_start(self, column_label):
"""Move a column to the first in order."""
self._columns.move_to_end(column_label, last=False)
return self | [
"def",
"move_to_start",
"(",
"self",
",",
"column_label",
")",
":",
"self",
".",
"_columns",
".",
"move_to_end",
"(",
"column_label",
",",
"last",
"=",
"False",
")",
"return",
"self"
] | Move a column to the first in order. | [
"Move",
"a",
"column",
"to",
"the",
"first",
"in",
"order",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L384-L387 | train | 204,608 |
data-8/datascience | datascience/tables.py | Table.append | def append(self, row_or_table):
"""Append a row or all rows of a table. An appended table must have all
columns of self."""
if not row_or_table:
return
if isinstance(row_or_table, Table):
t = row_or_table
columns = list(t.select(self.labels)._columns.values())
n = t.num_rows
else:
if (len(list(row_or_table)) != self.num_columns):
raise Exception('Row should have '+ str(self.num_columns) + " columns")
columns, n = [[value] for value in row_or_table], 1
for i, column in enumerate(self._columns):
if self.num_rows:
self._columns[column] = np.append(self[column], columns[i])
else:
self._columns[column] = np.array(columns[i])
self._num_rows += n
return self | python | def append(self, row_or_table):
"""Append a row or all rows of a table. An appended table must have all
columns of self."""
if not row_or_table:
return
if isinstance(row_or_table, Table):
t = row_or_table
columns = list(t.select(self.labels)._columns.values())
n = t.num_rows
else:
if (len(list(row_or_table)) != self.num_columns):
raise Exception('Row should have '+ str(self.num_columns) + " columns")
columns, n = [[value] for value in row_or_table], 1
for i, column in enumerate(self._columns):
if self.num_rows:
self._columns[column] = np.append(self[column], columns[i])
else:
self._columns[column] = np.array(columns[i])
self._num_rows += n
return self | [
"def",
"append",
"(",
"self",
",",
"row_or_table",
")",
":",
"if",
"not",
"row_or_table",
":",
"return",
"if",
"isinstance",
"(",
"row_or_table",
",",
"Table",
")",
":",
"t",
"=",
"row_or_table",
"columns",
"=",
"list",
"(",
"t",
".",
"select",
"(",
"s... | Append a row or all rows of a table. An appended table must have all
columns of self. | [
"Append",
"a",
"row",
"or",
"all",
"rows",
"of",
"a",
"table",
".",
"An",
"appended",
"table",
"must",
"have",
"all",
"columns",
"of",
"self",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L394-L413 | train | 204,609 |
data-8/datascience | datascience/tables.py | Table.append_column | def append_column(self, label, values):
"""Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
>>> table
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
>>> table
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# TODO(sam): Allow append_column to take in a another table, copying
# over formatter as needed.
if not isinstance(label, str):
raise ValueError('The column label must be a string, but a '
'{} was given'.format(label.__class__.__name__))
if not isinstance(values, np.ndarray):
# Coerce a single value to a sequence
if not _is_non_string_iterable(values):
values = [values] * max(self.num_rows, 1)
values = np.array(tuple(values))
if self.num_rows != 0 and len(values) != self.num_rows:
raise ValueError('Column length mismatch. New column does not have '
'the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values | python | def append_column(self, label, values):
"""Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
>>> table
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
>>> table
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# TODO(sam): Allow append_column to take in a another table, copying
# over formatter as needed.
if not isinstance(label, str):
raise ValueError('The column label must be a string, but a '
'{} was given'.format(label.__class__.__name__))
if not isinstance(values, np.ndarray):
# Coerce a single value to a sequence
if not _is_non_string_iterable(values):
values = [values] * max(self.num_rows, 1)
values = np.array(tuple(values))
if self.num_rows != 0 and len(values) != self.num_rows:
raise ValueError('Column length mismatch. New column does not have '
'the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values | [
"def",
"append_column",
"(",
"self",
",",
"label",
",",
"values",
")",
":",
"# TODO(sam): Allow append_column to take in a another table, copying",
"# over formatter as needed.",
"if",
"not",
"isinstance",
"(",
"label",
",",
"str",
")",
":",
"raise",
"ValueError",
"(",
... | Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
>>> table
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
>>> table
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table. | [
"Appends",
"a",
"column",
"to",
"the",
"table",
"or",
"replaces",
"a",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L415-L491 | train | 204,610 |
data-8/datascience | datascience/tables.py | Table.remove | def remove(self, row_or_row_indices):
"""Removes a row or multiple rows of a table in place."""
if not row_or_row_indices:
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for i, elem in enumerate(self[col]) if i not in rows_remove]
return self | python | def remove(self, row_or_row_indices):
"""Removes a row or multiple rows of a table in place."""
if not row_or_row_indices:
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for i, elem in enumerate(self[col]) if i not in rows_remove]
return self | [
"def",
"remove",
"(",
"self",
",",
"row_or_row_indices",
")",
":",
"if",
"not",
"row_or_row_indices",
":",
"return",
"if",
"isinstance",
"(",
"row_or_row_indices",
",",
"int",
")",
":",
"rows_remove",
"=",
"[",
"row_or_row_indices",
"]",
"else",
":",
"rows_rem... | Removes a row or multiple rows of a table in place. | [
"Removes",
"a",
"row",
"or",
"multiple",
"rows",
"of",
"a",
"table",
"in",
"place",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L555-L565 | train | 204,611 |
data-8/datascience | datascience/tables.py | Table.copy | def copy(self, *, shallow=False):
"""Return a copy of a table."""
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table | python | def copy(self, *, shallow=False):
"""Return a copy of a table."""
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table | [
"def",
"copy",
"(",
"self",
",",
"*",
",",
"shallow",
"=",
"False",
")",
":",
"table",
"=",
"type",
"(",
"self",
")",
"(",
")",
"for",
"label",
"in",
"self",
".",
"labels",
":",
"if",
"shallow",
":",
"column",
"=",
"self",
"[",
"label",
"]",
"e... | Return a copy of a table. | [
"Return",
"a",
"copy",
"of",
"a",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L572-L581 | train | 204,612 |
data-8/datascience | datascience/tables.py | Table.select | def select(self, *column_or_columns):
"""Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6
"""
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table | python | def select(self, *column_or_columns):
"""Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6
"""
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table | [
"def",
"select",
"(",
"self",
",",
"*",
"column_or_columns",
")",
":",
"labels",
"=",
"self",
".",
"_varargs_as_labels",
"(",
"column_or_columns",
")",
"table",
"=",
"type",
"(",
"self",
")",
"(",
")",
"for",
"label",
"in",
"labels",
":",
"self",
".",
... | Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6 | [
"Return",
"a",
"table",
"with",
"only",
"the",
"columns",
"in",
"column_or_columns",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L583-L632 | train | 204,613 |
data-8/datascience | datascience/tables.py | Table.drop | def drop(self, *column_or_columns):
"""Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
"""
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels)
if i not in exclude and c not in exclude]) | python | def drop(self, *column_or_columns):
"""Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
"""
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels)
if i not in exclude and c not in exclude]) | [
"def",
"drop",
"(",
"self",
",",
"*",
"column_or_columns",
")",
":",
"exclude",
"=",
"_varargs_labels_as_list",
"(",
"column_or_columns",
")",
"return",
"self",
".",
"select",
"(",
"[",
"c",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"self",... | Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582 | [
"Return",
"a",
"Table",
"with",
"only",
"columns",
"other",
"than",
"selected",
"label",
"or",
"labels",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L643-L699 | train | 204,614 |
data-8/datascience | datascience/tables.py | Table.where | def where(self, column_or_label, value_or_predicate=None, other=None):
"""
Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price
"""
column = self._get_column(column_or_label)
if other is not None:
assert callable(value_or_predicate), "Predicate required for 3-arg where"
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for x, y in zip(column, other)]
elif value_or_predicate is not None:
if not callable(value_or_predicate):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0]) | python | def where(self, column_or_label, value_or_predicate=None, other=None):
"""
Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price
"""
column = self._get_column(column_or_label)
if other is not None:
assert callable(value_or_predicate), "Predicate required for 3-arg where"
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for x, y in zip(column, other)]
elif value_or_predicate is not None:
if not callable(value_or_predicate):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0]) | [
"def",
"where",
"(",
"self",
",",
"column_or_label",
",",
"value_or_predicate",
"=",
"None",
",",
"other",
"=",
"None",
")",
":",
"column",
"=",
"self",
".",
"_get_column",
"(",
"column_or_label",
")",
"if",
"other",
"is",
"not",
"None",
":",
"assert",
"... | Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price | [
"Return",
"a",
"new",
"Table",
"containing",
"rows",
"where",
"value_or_predicate",
"returns",
"True",
"for",
"values",
"in",
"column_or_label",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L701-L795 | train | 204,615 |
data-8/datascience | datascience/tables.py | Table.sort | def sort(self, column_or_label, descending=False, distinct=False):
"""Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
"""
column = self._get_column(column_or_label)
if distinct:
_, row_numbers = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::-1])
return self.take(row_numbers) | python | def sort(self, column_or_label, descending=False, distinct=False):
"""Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
"""
column = self._get_column(column_or_label)
if distinct:
_, row_numbers = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::-1])
return self.take(row_numbers) | [
"def",
"sort",
"(",
"self",
",",
"column_or_label",
",",
"descending",
"=",
"False",
",",
"distinct",
"=",
"False",
")",
":",
"column",
"=",
"self",
".",
"_get_column",
"(",
"column_or_label",
")",
"if",
"distinct",
":",
"_",
",",
"row_numbers",
"=",
"np... | Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2 | [
"Return",
"a",
"Table",
"of",
"rows",
"sorted",
"according",
"to",
"the",
"values",
"in",
"a",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L797-L866 | train | 204,616 |
data-8/datascience | datascience/tables.py | Table.group | def group(self, column_or_label, collect=None):
"""Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05
"""
# Assume that a call to group with a list of labels is a call to groups
if _is_non_string_iterable(column_or_label) and \
len(column_or_label) != self._num_rows:
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
# Remove column used for grouping
column = self._get_column(column_or_label)
if isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
# Group by column
groups = self.index_by(column)
keys = sorted(groups.keys())
# Generate grouped columns
if collect is None:
labels = [column_label, 'count' if column_label != 'count' else self._unused_label('count')]
columns = [keys, [len(groups[k]) for k in keys]]
else:
columns, labels = [], []
for i, label in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert column_label == self._unused_label(column_label)
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped | python | def group(self, column_or_label, collect=None):
"""Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05
"""
# Assume that a call to group with a list of labels is a call to groups
if _is_non_string_iterable(column_or_label) and \
len(column_or_label) != self._num_rows:
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
# Remove column used for grouping
column = self._get_column(column_or_label)
if isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
# Group by column
groups = self.index_by(column)
keys = sorted(groups.keys())
# Generate grouped columns
if collect is None:
labels = [column_label, 'count' if column_label != 'count' else self._unused_label('count')]
columns = [keys, [len(groups[k]) for k in keys]]
else:
columns, labels = [], []
for i, label in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert column_label == self._unused_label(column_label)
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped | [
"def",
"group",
"(",
"self",
",",
"column_or_label",
",",
"collect",
"=",
"None",
")",
":",
"# Assume that a call to group with a list of labels is a call to groups",
"if",
"_is_non_string_iterable",
"(",
"column_or_label",
")",
"and",
"len",
"(",
"column_or_label",
")",
... | Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05 | [
"Group",
"rows",
"by",
"unique",
"values",
"in",
"a",
"column",
";",
"count",
"or",
"aggregate",
"others",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L868-L952 | train | 204,617 |
data-8/datascience | datascience/tables.py | Table.groups | def groups(self, labels, collect=None):
"""Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
"""
# Assume that a call to groups with one label is a call to group
if not _is_non_string_iterable(labels):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if label not in self.labels:
raise ValueError("All labels must exist in the table")
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), lambda s: s)
grouped._columns.popitem(last=False) # Discard the column of tuples
# Flatten grouping values and move them to front
counts = [len(v) for v in grouped[0]]
for label in labels[::-1]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
# Aggregate other values
if collect is None:
count = 'count' if 'count' not in labels else self._unused_label('count')
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if label in labels:
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped | python | def groups(self, labels, collect=None):
"""Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
"""
# Assume that a call to groups with one label is a call to group
if not _is_non_string_iterable(labels):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if label not in self.labels:
raise ValueError("All labels must exist in the table")
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), lambda s: s)
grouped._columns.popitem(last=False) # Discard the column of tuples
# Flatten grouping values and move them to front
counts = [len(v) for v in grouped[0]]
for label in labels[::-1]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
# Aggregate other values
if collect is None:
count = 'count' if 'count' not in labels else self._unused_label('count')
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if label in labels:
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped | [
"def",
"groups",
"(",
"self",
",",
"labels",
",",
"collect",
"=",
"None",
")",
":",
"# Assume that a call to groups with one label is a call to group",
"if",
"not",
"_is_non_string_iterable",
"(",
"labels",
")",
":",
"return",
"self",
".",
"group",
"(",
"labels",
... | Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05 | [
"Group",
"rows",
"by",
"multiple",
"columns",
"count",
"or",
"aggregate",
"others",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L954-L1032 | train | 204,618 |
data-8/datascience | datascience/tables.py | Table.pivot_bin | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs) :
"""Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
"""
pivot_columns = _as_labels(pivot_columns)
selected = self.select(pivot_columns + [value_column])
grouped = selected.groups(pivot_columns, collect=lambda x:x)
# refine bins by taking a histogram over all the data
if bins is not None:
vargs['bins'] = bins
_, rbins = np.histogram(self[value_column],**vargs)
# create a table with these bins a first column and counts for each group
vargs['bins'] = rbins
binned = type(self)().with_column('bin',rbins)
for group in grouped.rows:
col_label = "-".join(map(str,group[0:-1]))
col_vals = group[-1]
counts,_ = np.histogram(col_vals,**vargs)
binned[col_label] = np.append(counts,0)
return binned | python | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs) :
"""Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.
"""
pivot_columns = _as_labels(pivot_columns)
selected = self.select(pivot_columns + [value_column])
grouped = selected.groups(pivot_columns, collect=lambda x:x)
# refine bins by taking a histogram over all the data
if bins is not None:
vargs['bins'] = bins
_, rbins = np.histogram(self[value_column],**vargs)
# create a table with these bins a first column and counts for each group
vargs['bins'] = rbins
binned = type(self)().with_column('bin',rbins)
for group in grouped.rows:
col_label = "-".join(map(str,group[0:-1]))
col_vals = group[-1]
counts,_ = np.histogram(col_vals,**vargs)
binned[col_label] = np.append(counts,0)
return binned | [
"def",
"pivot_bin",
"(",
"self",
",",
"pivot_columns",
",",
"value_column",
",",
"bins",
"=",
"None",
",",
"*",
"*",
"vargs",
")",
":",
"pivot_columns",
"=",
"_as_labels",
"(",
"pivot_columns",
")",
"selected",
"=",
"self",
".",
"select",
"(",
"pivot_colum... | Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1. | [
"Form",
"a",
"table",
"with",
"columns",
"formed",
"by",
"the",
"unique",
"tuples",
"in",
"pivot_columns",
"containing",
"counts",
"per",
"bin",
"of",
"the",
"values",
"associated",
"with",
"each",
"tuple",
"in",
"the",
"value_column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1126-L1165 | train | 204,619 |
data-8/datascience | datascience/tables.py | Table.stack | def stack(self, key, labels=None):
"""Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
"""
rows, labels = [], labels or self.labels
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items()
if k != key and k in labels]
return type(self)([key, 'column', 'value']).with_rows(rows) | python | def stack(self, key, labels=None):
"""Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
"""
rows, labels = [], labels or self.labels
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items()
if k != key and k in labels]
return type(self)([key, 'column', 'value']).with_rows(rows) | [
"def",
"stack",
"(",
"self",
",",
"key",
",",
"labels",
"=",
"None",
")",
":",
"rows",
",",
"labels",
"=",
"[",
"]",
",",
"labels",
"or",
"self",
".",
"labels",
"for",
"row",
"in",
"self",
".",
"rows",
":",
"[",
"rows",
".",
"append",
"(",
"(",... | Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data. | [
"Takes",
"k",
"original",
"columns",
"and",
"returns",
"two",
"columns",
"with",
"col",
".",
"1",
"of",
"all",
"column",
"names",
"and",
"col",
".",
"2",
"of",
"all",
"associated",
"data",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1167-L1175 | train | 204,620 |
data-8/datascience | datascience/tables.py | Table.join | def join(self, column_label, other, other_label=None):
"""Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
"""
if self.num_rows == 0 or other.num_rows == 0:
return None
if not other_label:
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
# Gather joined rows from self_rows that have join values in other_rows
joined_rows = []
for v, rows in self_rows.items():
if v in other_rows:
joined_rows += [row + o for row in rows for o in other_rows[v]]
if not joined_rows:
return None
# Build joined table
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)(self_labels + other_labels).with_rows(joined_rows)
# Copy formats from both tables
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
# Remove redundant column, but perhaps save its formatting
del joined[other_labels_map[other_label]]
if column_label not in self._formats and other_label in other._formats:
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label) | python | def join(self, column_label, other, other_label=None):
"""Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
"""
if self.num_rows == 0 or other.num_rows == 0:
return None
if not other_label:
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
# Gather joined rows from self_rows that have join values in other_rows
joined_rows = []
for v, rows in self_rows.items():
if v in other_rows:
joined_rows += [row + o for row in rows for o in other_rows[v]]
if not joined_rows:
return None
# Build joined table
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)(self_labels + other_labels).with_rows(joined_rows)
# Copy formats from both tables
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
# Remove redundant column, but perhaps save its formatting
del joined[other_labels_map[other_label]]
if column_label not in self._formats and other_label in other._formats:
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label) | [
"def",
"join",
"(",
"self",
",",
"column_label",
",",
"other",
",",
"other_label",
"=",
"None",
")",
":",
"if",
"self",
".",
"num_rows",
"==",
"0",
"or",
"other",
".",
"num_rows",
"==",
"0",
":",
"return",
"None",
"if",
"not",
"other_label",
":",
"ot... | Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6 | [
"Creates",
"a",
"new",
"table",
"with",
"the",
"columns",
"of",
"self",
"and",
"other",
"containing",
"rows",
"for",
"all",
"values",
"of",
"a",
"column",
"that",
"appear",
"in",
"both",
"tables",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1177-L1274 | train | 204,621 |
data-8/datascience | datascience/tables.py | Table.stats | def stats(self, ops=(min, max, np.median, sum)):
"""Compute statistics for each column and place them in a table."""
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table | python | def stats(self, ops=(min, max, np.median, sum)):
"""Compute statistics for each column and place them in a table."""
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table | [
"def",
"stats",
"(",
"self",
",",
"ops",
"=",
"(",
"min",
",",
"max",
",",
"np",
".",
"median",
",",
"sum",
")",
")",
":",
"names",
"=",
"[",
"op",
".",
"__name__",
"for",
"op",
"in",
"ops",
"]",
"ops",
"=",
"[",
"_zero_on_type_error",
"(",
"op... | Compute statistics for each column and place them in a table. | [
"Compute",
"statistics",
"for",
"each",
"column",
"and",
"place",
"them",
"in",
"a",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1277-L1286 | train | 204,622 |
data-8/datascience | datascience/tables.py | Table._as_label | def _as_label(self, index_or_label):
"""Convert index to label."""
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError(str(index_or_label) + ' is not a label or index') | python | def _as_label(self, index_or_label):
"""Convert index to label."""
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError(str(index_or_label) + ' is not a label or index') | [
"def",
"_as_label",
"(",
"self",
",",
"index_or_label",
")",
":",
"if",
"isinstance",
"(",
"index_or_label",
",",
"str",
")",
":",
"return",
"index_or_label",
"if",
"isinstance",
"(",
"index_or_label",
",",
"numbers",
".",
"Integral",
")",
":",
"return",
"se... | Convert index to label. | [
"Convert",
"index",
"to",
"label",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1288-L1295 | train | 204,623 |
data-8/datascience | datascience/tables.py | Table._unused_label | def _unused_label(self, label):
"""Generate an unused label."""
original = label
existing = self.labels
i = 2
while label in existing:
label = '{}_{}'.format(original, i)
i += 1
return label | python | def _unused_label(self, label):
"""Generate an unused label."""
original = label
existing = self.labels
i = 2
while label in existing:
label = '{}_{}'.format(original, i)
i += 1
return label | [
"def",
"_unused_label",
"(",
"self",
",",
"label",
")",
":",
"original",
"=",
"label",
"existing",
"=",
"self",
".",
"labels",
"i",
"=",
"2",
"while",
"label",
"in",
"existing",
":",
"label",
"=",
"'{}_{}'",
".",
"format",
"(",
"original",
",",
"i",
... | Generate an unused label. | [
"Generate",
"an",
"unused",
"label",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1306-L1314 | train | 204,624 |
data-8/datascience | datascience/tables.py | Table._get_column | def _get_column(self, column_or_label):
"""Convert label to column and check column length."""
c = column_or_label
if isinstance(c, collections.Hashable) and c in self.labels:
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert len(c) == self.num_rows, 'column length mismatch'
return c | python | def _get_column(self, column_or_label):
"""Convert label to column and check column length."""
c = column_or_label
if isinstance(c, collections.Hashable) and c in self.labels:
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert len(c) == self.num_rows, 'column length mismatch'
return c | [
"def",
"_get_column",
"(",
"self",
",",
"column_or_label",
")",
":",
"c",
"=",
"column_or_label",
"if",
"isinstance",
"(",
"c",
",",
"collections",
".",
"Hashable",
")",
"and",
"c",
"in",
"self",
".",
"labels",
":",
"return",
"self",
"[",
"c",
"]",
"el... | Convert label to column and check column length. | [
"Convert",
"label",
"to",
"column",
"and",
"check",
"column",
"length",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1316-L1327 | train | 204,625 |
data-8/datascience | datascience/tables.py | Table.percentile | def percentile(self, p):
"""Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10
"""
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles) | python | def percentile(self, p):
"""Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10
"""
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles) | [
"def",
"percentile",
"(",
"self",
",",
"p",
")",
":",
"percentiles",
"=",
"[",
"[",
"_util",
".",
"percentile",
"(",
"p",
",",
"column",
")",
"]",
"for",
"column",
"in",
"self",
".",
"columns",
"]",
"return",
"self",
".",
"_with_columns",
"(",
"perce... | Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10 | [
"Return",
"a",
"new",
"table",
"with",
"one",
"row",
"containing",
"the",
"pth",
"percentile",
"for",
"each",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1329-L1354 | train | 204,626 |
data-8/datascience | datascience/tables.py | Table.sample | def sample(self, k=None, with_replacement=True, weights=None):
"""Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
# Weights must be length of table.
>>> jobs.sample(k=2, weights=make_array(1, 0, 0))
Traceback (most recent call last):
...
ValueError: a and p must have same size
"""
n = self.num_rows
if k is None:
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample | python | def sample(self, k=None, with_replacement=True, weights=None):
"""Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
# Weights must be length of table.
>>> jobs.sample(k=2, weights=make_array(1, 0, 0))
Traceback (most recent call last):
...
ValueError: a and p must have same size
"""
n = self.num_rows
if k is None:
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample | [
"def",
"sample",
"(",
"self",
",",
"k",
"=",
"None",
",",
"with_replacement",
"=",
"True",
",",
"weights",
"=",
"None",
")",
":",
"n",
"=",
"self",
".",
"num_rows",
"if",
"k",
"is",
"None",
":",
"k",
"=",
"n",
"index",
"=",
"np",
".",
"random",
... | Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
# Weights must be length of table.
>>> jobs.sample(k=2, weights=make_array(1, 0, 0))
Traceback (most recent call last):
...
ValueError: a and p must have same size | [
"Return",
"a",
"new",
"table",
"where",
"k",
"rows",
"are",
"randomly",
"sampled",
"from",
"the",
"original",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1356-L1428 | train | 204,627 |
data-8/datascience | datascience/tables.py | Table.split | def split(self, k):
"""Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8
"""
if not 1 <= k <= self.num_rows - 1:
raise ValueError("Invalid value of k. k must be between 1 and the"
"number of rows - 1")
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return first, rest | python | def split(self, k):
"""Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8
"""
if not 1 <= k <= self.num_rows - 1:
raise ValueError("Invalid value of k. k must be between 1 and the"
"number of rows - 1")
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return first, rest | [
"def",
"split",
"(",
"self",
",",
"k",
")",
":",
"if",
"not",
"1",
"<=",
"k",
"<=",
"self",
".",
"num_rows",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid value of k. k must be between 1 and the\"",
"\"number of rows - 1\"",
")",
"rows",
"=",
"np",
"... | Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8 | [
"Return",
"a",
"tuple",
"of",
"two",
"tables",
"where",
"the",
"first",
"table",
"contains",
"k",
"rows",
"randomly",
"sampled",
"and",
"the",
"second",
"contains",
"the",
"remaining",
"rows",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1461-L1505 | train | 204,628 |
data-8/datascience | datascience/tables.py | Table.with_row | def with_row(self, row):
"""Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(row)
return self | python | def with_row(self, row):
"""Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(row)
return self | [
"def",
"with_row",
"(",
"self",
",",
"row",
")",
":",
"self",
"=",
"self",
".",
"copy",
"(",
")",
"self",
".",
"append",
"(",
"row",
")",
"return",
"self"
] | Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2 | [
"Return",
"a",
"table",
"with",
"an",
"additional",
"row",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1508-L1525 | train | 204,629 |
data-8/datascience | datascience/tables.py | Table.with_rows | def with_rows(self, rows):
"""Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self | python | def with_rows(self, rows):
"""Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2
"""
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self | [
"def",
"with_rows",
"(",
"self",
",",
"rows",
")",
":",
"self",
"=",
"self",
".",
"copy",
"(",
")",
"self",
".",
"append",
"(",
"self",
".",
"_with_columns",
"(",
"zip",
"(",
"*",
"rows",
")",
")",
")",
"return",
"self"
] | Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2 | [
"Return",
"a",
"table",
"with",
"additional",
"rows",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1527-L1547 | train | 204,630 |
data-8/datascience | datascience/tables.py | Table.with_column | def with_column(self, label, values, *rest):
"""Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# Ensure that if with_column is called instead of with_columns;
# no error is raised.
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table | python | def with_column(self, label, values, *rest):
"""Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
# Ensure that if with_column is called instead of with_columns;
# no error is raised.
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table | [
"def",
"with_column",
"(",
"self",
",",
"label",
",",
"values",
",",
"*",
"rest",
")",
":",
"# Ensure that if with_column is called instead of with_columns;",
"# no error is raised.",
"if",
"rest",
":",
"return",
"self",
".",
"with_columns",
"(",
"label",
",",
"valu... | Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table. | [
"Return",
"a",
"new",
"table",
"with",
"an",
"additional",
"or",
"replaced",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1549-L1607 | train | 204,631 |
data-8/datascience | datascience/tables.py | Table.with_columns | def with_columns(self, *labels_and_values):
"""Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values or
a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110,234 | 0.354
110,235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110,234 | 0.354 | N/A | 2,016
110,235 | 0.236 | N/A | 2,016
>>> salaries = Table().with_column('salary',
... make_array('$500,000', '$15,500,000'))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'years', make_array(6, 1))
player_id | wOBA | salaries | season | years
110,234 | 0.354 | $500,000 | 2,016 | 6
110,235 | 0.236 | $15,500,000 | 2,016 | 1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
if len(labels_and_values) == 1:
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if not isinstance(labels_and_values, collections.abc.Sequence):
labels_and_values = list(labels_and_values)
if not labels_and_values:
return self
first = labels_and_values[0]
if not isinstance(first, str) and hasattr(first, '__iter__'):
for pair in labels_and_values:
assert len(pair) == 2, 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert len(labels_and_values) % 2 == 0, 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
label, values = labels_and_values[i], labels_and_values[i+1]
self = self.with_column(label, values)
return self | python | def with_columns(self, *labels_and_values):
"""Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values or
a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110,234 | 0.354
110,235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110,234 | 0.354 | N/A | 2,016
110,235 | 0.236 | N/A | 2,016
>>> salaries = Table().with_column('salary',
... make_array('$500,000', '$15,500,000'))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'years', make_array(6, 1))
player_id | wOBA | salaries | season | years
110,234 | 0.354 | $500,000 | 2,016 | 6
110,235 | 0.236 | $15,500,000 | 2,016 | 1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.
"""
if len(labels_and_values) == 1:
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if not isinstance(labels_and_values, collections.abc.Sequence):
labels_and_values = list(labels_and_values)
if not labels_and_values:
return self
first = labels_and_values[0]
if not isinstance(first, str) and hasattr(first, '__iter__'):
for pair in labels_and_values:
assert len(pair) == 2, 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert len(labels_and_values) % 2 == 0, 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
label, values = labels_and_values[i], labels_and_values[i+1]
self = self.with_column(label, values)
return self | [
"def",
"with_columns",
"(",
"self",
",",
"*",
"labels_and_values",
")",
":",
"if",
"len",
"(",
"labels_and_values",
")",
"==",
"1",
":",
"labels_and_values",
"=",
"labels_and_values",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"labels_and_values",
",",
"collectio... | Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values or
a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110,234 | 0.354
110,235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110,234 | 0.354 | N/A | 2,016
110,235 | 0.236 | N/A | 2,016
>>> salaries = Table().with_column('salary',
... make_array('$500,000', '$15,500,000'))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'years', make_array(6, 1))
player_id | wOBA | salaries | season | years
110,234 | 0.354 | $500,000 | 2,016 | 6
110,235 | 0.236 | $15,500,000 | 2,016 | 1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table. | [
"Return",
"a",
"table",
"with",
"additional",
"or",
"replaced",
"columns",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1609-L1684 | train | 204,632 |
data-8/datascience | datascience/tables.py | Table.bin | def bin(self, *columns, **vargs):
"""Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
"""
if columns:
self = self.select(*columns)
if 'normed' in vargs:
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = 'density' if density else 'count'
cols = list(self._columns.values())
_, bins = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
counts, _ = np.histogram(self[label], bins=bins, density=density)
binned[label + ' ' + tag] = np.append(counts, 0)
return binned | python | def bin(self, *columns, **vargs):
"""Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
"""
if columns:
self = self.select(*columns)
if 'normed' in vargs:
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = 'density' if density else 'count'
cols = list(self._columns.values())
_, bins = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
counts, _ = np.histogram(self[label], bins=bins, density=density)
binned[label + ' ' + tag] = np.append(counts, 0)
return binned | [
"def",
"bin",
"(",
"self",
",",
"*",
"columns",
",",
"*",
"*",
"vargs",
")",
":",
"if",
"columns",
":",
"self",
"=",
"self",
".",
"select",
"(",
"*",
"columns",
")",
"if",
"'normed'",
"in",
"vargs",
":",
"vargs",
".",
"setdefault",
"(",
"'density'"... | Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function. | [
"Group",
"values",
"by",
"bin",
"and",
"compute",
"counts",
"per",
"bin",
"by",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1740-L1785 | train | 204,633 |
data-8/datascience | datascience/tables.py | Table._use_html_if_available | def _use_html_if_available(format_fn):
"""Use the value's HTML rendering if available, overriding format_fn."""
def format_using_as_html(v, label=False):
if not label and hasattr(v, 'as_html'):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html | python | def _use_html_if_available(format_fn):
"""Use the value's HTML rendering if available, overriding format_fn."""
def format_using_as_html(v, label=False):
if not label and hasattr(v, 'as_html'):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html | [
"def",
"_use_html_if_available",
"(",
"format_fn",
")",
":",
"def",
"format_using_as_html",
"(",
"v",
",",
"label",
"=",
"False",
")",
":",
"if",
"not",
"label",
"and",
"hasattr",
"(",
"v",
",",
"'as_html'",
")",
":",
"return",
"v",
".",
"as_html",
"(",
... | Use the value's HTML rendering if available, overriding format_fn. | [
"Use",
"the",
"value",
"s",
"HTML",
"rendering",
"if",
"available",
"overriding",
"format_fn",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1806-L1813 | train | 204,634 |
data-8/datascience | datascience/tables.py | Table._get_column_formatters | def _get_column_formatters(self, max_rows, as_html):
"""Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str
"""
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for k, v in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts | python | def _get_column_formatters(self, max_rows, as_html):
"""Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str
"""
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for k, v in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts | [
"def",
"_get_column_formatters",
"(",
"self",
",",
"max_rows",
",",
"as_html",
")",
":",
"formats",
"=",
"{",
"s",
":",
"self",
".",
"_formats",
".",
"get",
"(",
"s",
",",
"self",
".",
"formatter",
")",
"for",
"s",
"in",
"self",
".",
"labels",
"}",
... | Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str | [
"Return",
"one",
"value",
"formatting",
"function",
"per",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1815-L1825 | train | 204,635 |
data-8/datascience | datascience/tables.py | Table.as_text | def as_text(self, max_rows=0, sep=" | "):
"""Format table as text."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for fmt, label in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for v, f in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines]) | python | def as_text(self, max_rows=0, sep=" | "):
"""Format table as text."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for fmt, label in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for v, f in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines]) | [
"def",
"as_text",
"(",
"self",
",",
"max_rows",
"=",
"0",
",",
"sep",
"=",
"\" | \"",
")",
":",
"if",
"not",
"max_rows",
"or",
"max_rows",
">",
"self",
".",
"num_rows",
":",
"max_rows",
"=",
"self",
".",
"num_rows",
"omitted",
"=",
"max",
"(",
"0",
... | Format table as text. | [
"Format",
"table",
"as",
"text",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1827-L1840 | train | 204,636 |
data-8/datascience | datascience/tables.py | Table.as_html | def as_html(self, max_rows=0):
"""Format table as HTML."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self.labels
lines = [
(0, '<table border="1" class="dataframe">'),
(1, '<thead>'),
(2, '<tr>'),
(3, ' '.join('<th>' + label + '</th>' for label in labels)),
(2, '</tr>'),
(1, '</thead>'),
(1, '<tbody>'),
]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [
(2, '<tr>'),
(3, ' '.join('<td>' + fmt(v, label=False) + '</td>' for
v, fmt in zip(row, fmts))),
(2, '</tr>'),
]
lines.append((1, '</tbody>'))
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p>'.format(omitted)))
return '\n'.join(4 * indent * ' ' + text for indent, text in lines) | python | def as_html(self, max_rows=0):
"""Format table as HTML."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self.labels
lines = [
(0, '<table border="1" class="dataframe">'),
(1, '<thead>'),
(2, '<tr>'),
(3, ' '.join('<th>' + label + '</th>' for label in labels)),
(2, '</tr>'),
(1, '</thead>'),
(1, '<tbody>'),
]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [
(2, '<tr>'),
(3, ' '.join('<td>' + fmt(v, label=False) + '</td>' for
v, fmt in zip(row, fmts))),
(2, '</tr>'),
]
lines.append((1, '</tbody>'))
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p>'.format(omitted)))
return '\n'.join(4 * indent * ' ' + text for indent, text in lines) | [
"def",
"as_html",
"(",
"self",
",",
"max_rows",
"=",
"0",
")",
":",
"if",
"not",
"max_rows",
"or",
"max_rows",
">",
"self",
".",
"num_rows",
":",
"max_rows",
"=",
"self",
".",
"num_rows",
"omitted",
"=",
"max",
"(",
"0",
",",
"self",
".",
"num_rows",... | Format table as HTML. | [
"Format",
"table",
"as",
"HTML",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1842-L1869 | train | 204,637 |
data-8/datascience | datascience/tables.py | Table.index_by | def index_by(self, column_or_label):
"""Return a dict keyed by values in a column that contains lists of
rows corresponding to each value.
"""
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | python | def index_by(self, column_or_label):
"""Return a dict keyed by values in a column that contains lists of
rows corresponding to each value.
"""
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | [
"def",
"index_by",
"(",
"self",
",",
"column_or_label",
")",
":",
"column",
"=",
"self",
".",
"_get_column",
"(",
"column_or_label",
")",
"index",
"=",
"{",
"}",
"for",
"key",
",",
"row",
"in",
"zip",
"(",
"column",
",",
"self",
".",
"rows",
")",
":"... | Return a dict keyed by values in a column that contains lists of
rows corresponding to each value. | [
"Return",
"a",
"dict",
"keyed",
"by",
"values",
"in",
"a",
"column",
"that",
"contains",
"lists",
"of",
"rows",
"corresponding",
"to",
"each",
"value",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1871-L1879 | train | 204,638 |
data-8/datascience | datascience/tables.py | Table.to_array | def to_array(self):
"""Convert the table to a structured NumPy array."""
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr | python | def to_array(self):
"""Convert the table to a structured NumPy array."""
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr | [
"def",
"to_array",
"(",
"self",
")",
":",
"dt",
"=",
"np",
".",
"dtype",
"(",
"list",
"(",
"zip",
"(",
"self",
".",
"labels",
",",
"(",
"c",
".",
"dtype",
"for",
"c",
"in",
"self",
".",
"columns",
")",
")",
")",
")",
"arr",
"=",
"np",
".",
... | Convert the table to a structured NumPy array. | [
"Convert",
"the",
"table",
"to",
"a",
"structured",
"NumPy",
"array",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1913-L1919 | train | 204,639 |
data-8/datascience | datascience/tables.py | Table.plot | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price>
"""
options = self.default_options.copy()
options.update(vargs)
if column_for_xticks is not None:
x_data, y_labels = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
x_data, y_labels = None, self.labels
x_label = None
if select is not None:
y_labels = self._as_labels(select)
if x_data is not None:
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if x_data is None:
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | python | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price>
"""
options = self.default_options.copy()
options.update(vargs)
if column_for_xticks is not None:
x_data, y_labels = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
x_data, y_labels = None, self.labels
x_label = None
if select is not None:
y_labels = self._as_labels(select)
if x_data is not None:
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if x_data is None:
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | [
"def",
"plot",
"(",
"self",
",",
"column_for_xticks",
"=",
"None",
",",
"select",
"=",
"None",
",",
"overlay",
"=",
"True",
",",
"width",
"=",
"6",
",",
"height",
"=",
"4",
",",
"*",
"*",
"vargs",
")",
":",
"options",
"=",
"self",
".",
"default_opt... | Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price> | [
"Plot",
"line",
"charts",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1941-L2003 | train | 204,640 |
data-8/datascience | datascience/tables.py | Table.bar | def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected column except `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
xticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar(index-0.5, self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None) :
tick_labels = [ticks[int(l)] if 0<=l<len(ticks) else '' for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height) | python | def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected column except `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
xticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar(index-0.5, self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None) :
tick_labels = [ticks[int(l)] if 0<=l<len(ticks) else '' for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height) | [
"def",
"bar",
"(",
"self",
",",
"column_for_categories",
"=",
"None",
",",
"select",
"=",
"None",
",",
"overlay",
"=",
"True",
",",
"width",
"=",
"6",
",",
"height",
"=",
"4",
",",
"*",
"*",
"vargs",
")",
":",
"options",
"=",
"self",
".",
"default_... | Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected column except `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs. | [
"Plot",
"bar",
"charts",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2005-L2046 | train | 204,641 |
data-8/datascience | datascience/tables.py | Table.group_bar | def group_bar(self, column_label, **vargs):
"""Plot a bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``bar`` in that there is no need to specify
bar heights; the height of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``bar`` behaves more like ``plot`` or
``scatter`` (which require the height of each point to be specified).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).bar(column_label, **vargs) | python | def group_bar(self, column_label, **vargs):
"""Plot a bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``bar`` in that there is no need to specify
bar heights; the height of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``bar`` behaves more like ``plot`` or
``scatter`` (which require the height of each point to be specified).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).bar(column_label, **vargs) | [
"def",
"group_bar",
"(",
"self",
",",
"column_label",
",",
"*",
"*",
"vargs",
")",
":",
"self",
".",
"group",
"(",
"column_label",
")",
".",
"bar",
"(",
"column_label",
",",
"*",
"*",
"vargs",
")"
] | Plot a bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``bar`` in that there is no need to specify
bar heights; the height of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``bar`` behaves more like ``plot`` or
``scatter`` (which require the height of each point to be specified).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs. | [
"Plot",
"a",
"bar",
"chart",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2049-L2075 | train | 204,642 |
data-8/datascience | datascience/tables.py | Table.barh | def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"""Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
yticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = 1 - 2 * margin
if overlay:
bwidth /= len(labels)
if 'height' in options:
height = options.pop('height')
else:
height = max(4, len(index)/2)
def draw(axis, label, color):
if overlay:
ypos = index + margin + (1-2*margin)*(n - 1 - labels.index(label))/n
else:
ypos = index
# barh plots entries in reverse order from bottom to top
axis.barh(ypos, self[label][::-1], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks(index+0.5) # Center labels on bars
# barh plots entries in reverse order from bottom to top
axis.set_yticklabels(ticks[::-1], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize('', labels, yticks, overlay, draw, annotate, width=width, height=height) | python | def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"""Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
"""
options = self.default_options.copy()
# Matplotlib tries to center the labels, but we already handle that
# TODO consider changing the custom centering code and using matplotlib's default
vargs['align'] = 'edge'
options.update(vargs)
yticks, labels = self._split_column_and_labels(column_for_categories)
if select is not None:
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = 1 - 2 * margin
if overlay:
bwidth /= len(labels)
if 'height' in options:
height = options.pop('height')
else:
height = max(4, len(index)/2)
def draw(axis, label, color):
if overlay:
ypos = index + margin + (1-2*margin)*(n - 1 - labels.index(label))/n
else:
ypos = index
# barh plots entries in reverse order from bottom to top
axis.barh(ypos, self[label][::-1], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks(index+0.5) # Center labels on bars
# barh plots entries in reverse order from bottom to top
axis.set_yticklabels(ticks[::-1], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize('', labels, yticks, overlay, draw, annotate, width=width, height=height) | [
"def",
"barh",
"(",
"self",
",",
"column_for_categories",
"=",
"None",
",",
"select",
"=",
"None",
",",
"overlay",
"=",
"True",
",",
"width",
"=",
"6",
",",
"*",
"*",
"vargs",
")",
":",
"options",
"=",
"self",
".",
"default_options",
".",
"copy",
"("... | Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price> | [
"Plot",
"horizontal",
"bar",
"charts",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2077-L2158 | train | 204,643 |
data-8/datascience | datascience/tables.py | Table.group_barh | def group_barh(self, column_label, **vargs):
"""Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).barh(column_label, **vargs) | python | def group_barh(self, column_label, **vargs):
"""Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.
"""
self.group(column_label).barh(column_label, **vargs) | [
"def",
"group_barh",
"(",
"self",
",",
"column_label",
",",
"*",
"*",
"vargs",
")",
":",
"self",
".",
"group",
"(",
"column_label",
")",
".",
"barh",
"(",
"column_label",
",",
"*",
"*",
"vargs",
")"
] | Plot a horizontal bar chart for the table.
The values of the specified column are grouped and counted, and one
bar is produced for each group.
Note: This differs from ``barh`` in that there is no need to specify
bar heights; the size of a category's bar is the number of copies
of that category in the given column. This method behaves more like
``hist`` in that regard, while ``barh`` behaves more like ``plot`` or
``scatter`` (which require the second coordinate of each point to be
specified in another column).
Args:
``column_label`` (str or int): The name or index of a column
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
width (float): The width of the plot, in inches
height (float): The height of the plot, in inches
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs. | [
"Plot",
"a",
"horizontal",
"bar",
"chart",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2161-L2188 | train | 204,644 |
data-8/datascience | datascience/tables.py | Table._visualize | def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
"""Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories`
"""
for label in y_labels:
if not all(isinstance(x, numbers.Real) for x in self[label]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A plot cannot be drawn for this column."
.format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay and n > 1:
_, axis = plt.subplots(figsize=(width, height))
if x_label is not None:
axis.set_xlabel(x_label)
for label, color in zip(y_labels, colors):
draw(axis, label, color)
if ticks is not None:
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
fig, axes = plt.subplots(n, 1, figsize=(width, height*n))
if not isinstance(axes, collections.Iterable):
axes=[axes]
for axis, y_label, color in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if x_label is not None:
axis.set_xlabel(x_label, fontsize=16)
if ticks is not None:
annotate(axis, ticks)
type(self).plots.append(axis) | python | def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
"""Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories`
"""
for label in y_labels:
if not all(isinstance(x, numbers.Real) for x in self[label]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A plot cannot be drawn for this column."
.format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay and n > 1:
_, axis = plt.subplots(figsize=(width, height))
if x_label is not None:
axis.set_xlabel(x_label)
for label, color in zip(y_labels, colors):
draw(axis, label, color)
if ticks is not None:
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
fig, axes = plt.subplots(n, 1, figsize=(width, height*n))
if not isinstance(axes, collections.Iterable):
axes=[axes]
for axis, y_label, color in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if x_label is not None:
axis.set_xlabel(x_label, fontsize=16)
if ticks is not None:
annotate(axis, ticks)
type(self).plots.append(axis) | [
"def",
"_visualize",
"(",
"self",
",",
"x_label",
",",
"y_labels",
",",
"ticks",
",",
"overlay",
",",
"draw",
",",
"annotate",
",",
"width",
"=",
"6",
",",
"height",
"=",
"4",
")",
":",
"for",
"label",
"in",
"y_labels",
":",
"if",
"not",
"all",
"("... | Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories` | [
"Generic",
"visualization",
"that",
"overlays",
"or",
"separates",
"the",
"draw",
"function",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2297-L2333 | train | 204,645 |
data-8/datascience | datascience/tables.py | Table._split_column_and_labels | def _split_column_and_labels(self, column_or_label):
"""Return the specified column and labels of other columns."""
column = None if column_or_label is None else self._get_column(column_or_label)
labels = [label for i, label in enumerate(self.labels) if column_or_label not in (i, label)]
return column, labels | python | def _split_column_and_labels(self, column_or_label):
"""Return the specified column and labels of other columns."""
column = None if column_or_label is None else self._get_column(column_or_label)
labels = [label for i, label in enumerate(self.labels) if column_or_label not in (i, label)]
return column, labels | [
"def",
"_split_column_and_labels",
"(",
"self",
",",
"column_or_label",
")",
":",
"column",
"=",
"None",
"if",
"column_or_label",
"is",
"None",
"else",
"self",
".",
"_get_column",
"(",
"column_or_label",
")",
"labels",
"=",
"[",
"label",
"for",
"i",
",",
"la... | Return the specified column and labels of other columns. | [
"Return",
"the",
"specified",
"column",
"and",
"labels",
"of",
"other",
"columns",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2335-L2339 | train | 204,646 |
data-8/datascience | datascience/tables.py | Table.pivot_hist | def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
"""Draw histograms of each category in a column."""
warnings.warn("pivot_hist is deprecated; use "
"hist(value_column_label, group=pivot_column_label), or "
"with side_by_side=True if you really want side-by-side "
"bars.")
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where(self[pivot_column_label] == pivot)] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
vals, bins, patches = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
_, axes = plt.subplots(n, 1, figsize=(width, height * n))
vals = []
bins = None
for axis, label, column, color in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
avals, abins, patches = axis.hist(column, color=color, bins=bins, **vargs)
else:
avals, abins, patches = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if not isinstance(bins, np.ndarray):
bins = abins
else:
assert bins.all() == abins.all(), "Inconsistent bins in hist"
t = type(self)()
t['start'] = bins[0:-1]
t['end'] = bins[1:]
for label, column in zip(pvt_labels,vals):
t[label] = column | python | def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
"""Draw histograms of each category in a column."""
warnings.warn("pivot_hist is deprecated; use "
"hist(value_column_label, group=pivot_column_label), or "
"with side_by_side=True if you really want side-by-side "
"bars.")
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where(self[pivot_column_label] == pivot)] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
vals, bins, patches = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
_, axes = plt.subplots(n, 1, figsize=(width, height * n))
vals = []
bins = None
for axis, label, column, color in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
avals, abins, patches = axis.hist(column, color=color, bins=bins, **vargs)
else:
avals, abins, patches = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if not isinstance(bins, np.ndarray):
bins = abins
else:
assert bins.all() == abins.all(), "Inconsistent bins in hist"
t = type(self)()
t['start'] = bins[0:-1]
t['end'] = bins[1:]
for label, column in zip(pvt_labels,vals):
t[label] = column | [
"def",
"pivot_hist",
"(",
"self",
",",
"pivot_column_label",
",",
"value_column_label",
",",
"overlay",
"=",
"True",
",",
"width",
"=",
"6",
",",
"height",
"=",
"4",
",",
"*",
"*",
"vargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"pivot_hist is deprecate... | Draw histograms of each category in a column. | [
"Draw",
"histograms",
"of",
"each",
"category",
"in",
"a",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2342-L2375 | train | 204,647 |
data-8/datascience | datascience/tables.py | Table.hist_of_counts | def hist_of_counts(self, *columns, overlay=True, bins=None, bin_column=None,
group=None, side_by_side=False, width=6, height=4, **vargs):
"""
Plots one count-based histogram for each column in columns. The
heights of each bar will represent the counts, and all the bins
must be of equal size.
If no column is specified, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column. Note that
if the histograms are not overlaid, they are not forced to the
same scale.
bins (array or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
group (column name or index): A column of categories. The rows are
grouped by the values in this column, and a separate histogram is
generated for each group. The histograms are overlaid or plotted
separately depending on the overlay argument. If None, no such
grouping is done.
side_by_side (bool): Whether histogram bins should be plotted side by
side (instead of directly overlaid). Makes sense only when
plotting multiple histograms, either by passing several columns
or by using the group option.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `cumulative`, and
`orientation`, to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist_of_counts() # doctest: +SKIP
<histogram of values in count with counts on y-axis>
<histogram of values in points with counts on y-axis>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'count', make_array(5, 10, 5))
>>> t.hist_of_counts(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding counts>
>>> t = Table().with_columns(
... 'value', make_array(1, 2, 3, 2, 5 ),
... 'category', make_array('a', 'a', 'a', 'b', 'b'))
>>> t.hist('value', group='category') # doctest: +SKIP
<two overlaid histograms of the data [1, 2, 3] and [2, 5]>
"""
if bin_column is not None and bins is None:
bins = np.unique(self.column(bin_column))
# TODO ensure counts are integers even when `columns` is empty
for column in columns:
if not _is_array_integer(self.column(column)):
raise ValueError('The column {0} contains non-integer values '
'When using hist_of_counts with bin_columns, '
'all columns should contain counts.'
.format(column))
if vargs.get('normed', False) or vargs.get('density', False):
raise ValueError("hist_of_counts is for displaying counts only, "
"and should not be used with the normed or "
"density keyword arguments")
vargs['density'] = False
if bins is not None:
if len(bins) < 2:
raise ValueError("bins must have at least two items")
diffs = np.diff(sorted(bins))
# Diffs should all be equal (up to floating point error)
normalized_diff_deviances = np.abs((diffs - diffs[0])/diffs[0])
if np.any(normalized_diff_deviances > 1e-11):
raise ValueError("Bins of unequal size should not be used "
"with hist_of_counts. Please use hist() and "
"make sure to set normed=True")
return self.hist(*columns, overlay=overlay, bins=bins, bin_column=bin_column, group=group, side_by_side=side_by_side, width=width, height=height, **vargs) | python | def hist_of_counts(self, *columns, overlay=True, bins=None, bin_column=None,
group=None, side_by_side=False, width=6, height=4, **vargs):
"""
Plots one count-based histogram for each column in columns. The
heights of each bar will represent the counts, and all the bins
must be of equal size.
If no column is specified, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column. Note that
if the histograms are not overlaid, they are not forced to the
same scale.
bins (array or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
group (column name or index): A column of categories. The rows are
grouped by the values in this column, and a separate histogram is
generated for each group. The histograms are overlaid or plotted
separately depending on the overlay argument. If None, no such
grouping is done.
side_by_side (bool): Whether histogram bins should be plotted side by
side (instead of directly overlaid). Makes sense only when
plotting multiple histograms, either by passing several columns
or by using the group option.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `cumulative`, and
`orientation`, to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist_of_counts() # doctest: +SKIP
<histogram of values in count with counts on y-axis>
<histogram of values in points with counts on y-axis>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'count', make_array(5, 10, 5))
>>> t.hist_of_counts(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding counts>
>>> t = Table().with_columns(
... 'value', make_array(1, 2, 3, 2, 5 ),
... 'category', make_array('a', 'a', 'a', 'b', 'b'))
>>> t.hist('value', group='category') # doctest: +SKIP
<two overlaid histograms of the data [1, 2, 3] and [2, 5]>
"""
if bin_column is not None and bins is None:
bins = np.unique(self.column(bin_column))
# TODO ensure counts are integers even when `columns` is empty
for column in columns:
if not _is_array_integer(self.column(column)):
raise ValueError('The column {0} contains non-integer values '
'When using hist_of_counts with bin_columns, '
'all columns should contain counts.'
.format(column))
if vargs.get('normed', False) or vargs.get('density', False):
raise ValueError("hist_of_counts is for displaying counts only, "
"and should not be used with the normed or "
"density keyword arguments")
vargs['density'] = False
if bins is not None:
if len(bins) < 2:
raise ValueError("bins must have at least two items")
diffs = np.diff(sorted(bins))
# Diffs should all be equal (up to floating point error)
normalized_diff_deviances = np.abs((diffs - diffs[0])/diffs[0])
if np.any(normalized_diff_deviances > 1e-11):
raise ValueError("Bins of unequal size should not be used "
"with hist_of_counts. Please use hist() and "
"make sure to set normed=True")
return self.hist(*columns, overlay=overlay, bins=bins, bin_column=bin_column, group=group, side_by_side=side_by_side, width=width, height=height, **vargs) | [
"def",
"hist_of_counts",
"(",
"self",
",",
"*",
"columns",
",",
"overlay",
"=",
"True",
",",
"bins",
"=",
"None",
",",
"bin_column",
"=",
"None",
",",
"group",
"=",
"None",
",",
"side_by_side",
"=",
"False",
",",
"width",
"=",
"6",
",",
"height",
"="... | Plots one count-based histogram for each column in columns. The
heights of each bar will represent the counts, and all the bins
must be of equal size.
If no column is specified, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column. Note that
if the histograms are not overlaid, they are not forced to the
same scale.
bins (array or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
group (column name or index): A column of categories. The rows are
grouped by the values in this column, and a separate histogram is
generated for each group. The histograms are overlaid or plotted
separately depending on the overlay argument. If None, no such
grouping is done.
side_by_side (bool): Whether histogram bins should be plotted side by
side (instead of directly overlaid). Makes sense only when
plotting multiple histograms, either by passing several columns
or by using the group option.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `cumulative`, and
`orientation`, to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist_of_counts() # doctest: +SKIP
<histogram of values in count with counts on y-axis>
<histogram of values in points with counts on y-axis>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'count', make_array(5, 10, 5))
>>> t.hist_of_counts(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding counts>
>>> t = Table().with_columns(
... 'value', make_array(1, 2, 3, 2, 5 ),
... 'category', make_array('a', 'a', 'a', 'b', 'b'))
>>> t.hist('value', group='category') # doctest: +SKIP
<two overlaid histograms of the data [1, 2, 3] and [2, 5]> | [
"Plots",
"one",
"count",
"-",
"based",
"histogram",
"for",
"each",
"column",
"in",
"columns",
".",
"The",
"heights",
"of",
"each",
"bar",
"will",
"represent",
"the",
"counts",
"and",
"all",
"the",
"bins",
"must",
"be",
"of",
"equal",
"size",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2572-L2666 | train | 204,648 |
data-8/datascience | datascience/tables.py | Table.boxplot | def boxplot(self, **vargs):
"""Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure>
"""
# Check for non-numerical values and raise a ValueError if any found
for col in self:
if any(isinstance(cell, np.flexible) for cell in self[col]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A histogram cannot be drawn for this table."
.format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs) | python | def boxplot(self, **vargs):
"""Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure>
"""
# Check for non-numerical values and raise a ValueError if any found
for col in self:
if any(isinstance(cell, np.flexible) for cell in self[col]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A histogram cannot be drawn for this table."
.format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs) | [
"def",
"boxplot",
"(",
"self",
",",
"*",
"*",
"vargs",
")",
":",
"# Check for non-numerical values and raise a ValueError if any found",
"for",
"col",
"in",
"self",
":",
"if",
"any",
"(",
"isinstance",
"(",
"cell",
",",
"np",
".",
"flexible",
")",
"for",
"cell... | Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure> | [
"Plots",
"a",
"boxplot",
"for",
"the",
"table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2669-L2714 | train | 204,649 |
data-8/datascience | datascience/util.py | plot_normal_cdf | def plot_normal_cdf(rbound=None, lbound=None, mean=0, sd=1):
"""Plots a normal curve with specified parameters and area below curve shaded
between ``lbound`` and ``rbound``.
Args:
``rbound`` (numeric): right boundary of shaded region
``lbound`` (numeric): left boundary of shaded region; by default is negative infinity
``mean`` (numeric): mean/expectation of normal distribution
``sd`` (numeric): standard deviation of normal distribution
"""
shade = rbound is not None or lbound is not None
shade_left = rbound is not None and lbound is not None
inf = 3.5 * sd
step = 0.1
rlabel = rbound
llabel = lbound
if rbound is None:
rbound = inf + mean
rlabel = "$\infty$"
if lbound is None:
lbound = -inf + mean
llabel = "-$\infty$"
pdf_range = np.arange(-inf + mean, inf + mean, step)
plt.plot(pdf_range, stats.norm.pdf(pdf_range, loc=mean, scale=sd), color='k', lw=1)
cdf_range = np.arange(lbound, rbound + step, step)
if shade:
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='gold')
if shade_left:
cdf_range = np.arange(-inf+mean, lbound + step, step)
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='darkblue')
plt.ylim(0, stats.norm.pdf(0, loc=0, scale=sd) * 1.25)
plt.xlabel('z')
plt.ylabel('$\phi$(z)', rotation=90)
plt.title("Normal Curve ~ ($\mu$ = {0}, $\sigma$ = {1}) "
"{2} < z < {3}".format(mean, sd, llabel, rlabel), fontsize=16)
plt.show() | python | def plot_normal_cdf(rbound=None, lbound=None, mean=0, sd=1):
"""Plots a normal curve with specified parameters and area below curve shaded
between ``lbound`` and ``rbound``.
Args:
``rbound`` (numeric): right boundary of shaded region
``lbound`` (numeric): left boundary of shaded region; by default is negative infinity
``mean`` (numeric): mean/expectation of normal distribution
``sd`` (numeric): standard deviation of normal distribution
"""
shade = rbound is not None or lbound is not None
shade_left = rbound is not None and lbound is not None
inf = 3.5 * sd
step = 0.1
rlabel = rbound
llabel = lbound
if rbound is None:
rbound = inf + mean
rlabel = "$\infty$"
if lbound is None:
lbound = -inf + mean
llabel = "-$\infty$"
pdf_range = np.arange(-inf + mean, inf + mean, step)
plt.plot(pdf_range, stats.norm.pdf(pdf_range, loc=mean, scale=sd), color='k', lw=1)
cdf_range = np.arange(lbound, rbound + step, step)
if shade:
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='gold')
if shade_left:
cdf_range = np.arange(-inf+mean, lbound + step, step)
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='darkblue')
plt.ylim(0, stats.norm.pdf(0, loc=0, scale=sd) * 1.25)
plt.xlabel('z')
plt.ylabel('$\phi$(z)', rotation=90)
plt.title("Normal Curve ~ ($\mu$ = {0}, $\sigma$ = {1}) "
"{2} < z < {3}".format(mean, sd, llabel, rlabel), fontsize=16)
plt.show() | [
"def",
"plot_normal_cdf",
"(",
"rbound",
"=",
"None",
",",
"lbound",
"=",
"None",
",",
"mean",
"=",
"0",
",",
"sd",
"=",
"1",
")",
":",
"shade",
"=",
"rbound",
"is",
"not",
"None",
"or",
"lbound",
"is",
"not",
"None",
"shade_left",
"=",
"rbound",
"... | Plots a normal curve with specified parameters and area below curve shaded
between ``lbound`` and ``rbound``.
Args:
``rbound`` (numeric): right boundary of shaded region
``lbound`` (numeric): left boundary of shaded region; by default is negative infinity
``mean`` (numeric): mean/expectation of normal distribution
``sd`` (numeric): standard deviation of normal distribution | [
"Plots",
"a",
"normal",
"curve",
"with",
"specified",
"parameters",
"and",
"area",
"below",
"curve",
"shaded",
"between",
"lbound",
"and",
"rbound",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L66-L104 | train | 204,650 |
data-8/datascience | datascience/util.py | proportions_from_distribution | def proportions_from_distribution(table, label, sample_size,
column_name='Random Sample'):
"""
Adds a column named ``column_name`` containing the proportions of a random
draw using the distribution in ``label``.
This method uses ``np.random.multinomial`` to draw ``sample_size`` samples
from the distribution in ``table.column(label)``, then divides by
``sample_size`` to create the resulting column of proportions.
Args:
``table``: An instance of ``Table``.
``label``: Label of column in ``table``. This column must contain a
distribution (the values must sum to 1).
``sample_size``: The size of the sample to draw from the distribution.
``column_name``: The name of the new column that contains the sampled
proportions. Defaults to ``'Random Sample'``.
Returns:
A copy of ``table`` with a column ``column_name`` containing the
sampled proportions. The proportions will sum to 1.
Throws:
``ValueError``: If the ``label`` is not in the table, or if
``table.column(label)`` does not sum to 1.
"""
proportions = sample_proportions(sample_size, table.column(label))
return table.with_column('Random Sample', proportions) | python | def proportions_from_distribution(table, label, sample_size,
column_name='Random Sample'):
"""
Adds a column named ``column_name`` containing the proportions of a random
draw using the distribution in ``label``.
This method uses ``np.random.multinomial`` to draw ``sample_size`` samples
from the distribution in ``table.column(label)``, then divides by
``sample_size`` to create the resulting column of proportions.
Args:
``table``: An instance of ``Table``.
``label``: Label of column in ``table``. This column must contain a
distribution (the values must sum to 1).
``sample_size``: The size of the sample to draw from the distribution.
``column_name``: The name of the new column that contains the sampled
proportions. Defaults to ``'Random Sample'``.
Returns:
A copy of ``table`` with a column ``column_name`` containing the
sampled proportions. The proportions will sum to 1.
Throws:
``ValueError``: If the ``label`` is not in the table, or if
``table.column(label)`` does not sum to 1.
"""
proportions = sample_proportions(sample_size, table.column(label))
return table.with_column('Random Sample', proportions) | [
"def",
"proportions_from_distribution",
"(",
"table",
",",
"label",
",",
"sample_size",
",",
"column_name",
"=",
"'Random Sample'",
")",
":",
"proportions",
"=",
"sample_proportions",
"(",
"sample_size",
",",
"table",
".",
"column",
"(",
"label",
")",
")",
"retu... | Adds a column named ``column_name`` containing the proportions of a random
draw using the distribution in ``label``.
This method uses ``np.random.multinomial`` to draw ``sample_size`` samples
from the distribution in ``table.column(label)``, then divides by
``sample_size`` to create the resulting column of proportions.
Args:
``table``: An instance of ``Table``.
``label``: Label of column in ``table``. This column must contain a
distribution (the values must sum to 1).
``sample_size``: The size of the sample to draw from the distribution.
``column_name``: The name of the new column that contains the sampled
proportions. Defaults to ``'Random Sample'``.
Returns:
A copy of ``table`` with a column ``column_name`` containing the
sampled proportions. The proportions will sum to 1.
Throws:
``ValueError``: If the ``label`` is not in the table, or if
``table.column(label)`` does not sum to 1. | [
"Adds",
"a",
"column",
"named",
"column_name",
"containing",
"the",
"proportions",
"of",
"a",
"random",
"draw",
"using",
"the",
"distribution",
"in",
"label",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L128-L158 | train | 204,651 |
data-8/datascience | datascience/util.py | table_apply | def table_apply(table, func, subset=None):
"""Applies a function to each column and returns a Table.
Uses pandas `apply` under the hood, then converts back to a Table
Args:
table : instance of Table
The table to apply your function to
func : function
Any function that will work with DataFrame.apply
subset : list | None
A list of columns to apply the function to. If None, function
will be applied to all columns in table
Returns
-------
tab : instance of Table
A table with the given function applied. It will either be the
shape == shape(table), or shape (1, table.shape[1])
"""
from . import Table
df = table.to_df()
if subset is not None:
# Iterate through columns
subset = np.atleast_1d(subset)
if any([i not in df.columns for i in subset]):
err = np.where([i not in df.columns for i in subset])[0]
err = "Column mismatch: {0}".format(
[subset[i] for i in err])
raise ValueError(err)
for col in subset:
df[col] = df[col].apply(func)
else:
df = df.apply(func)
if isinstance(df, pd.Series):
# Reshape it so that we can easily convert back
df = pd.DataFrame(df).T
tab = Table.from_df(df)
return tab | python | def table_apply(table, func, subset=None):
"""Applies a function to each column and returns a Table.
Uses pandas `apply` under the hood, then converts back to a Table
Args:
table : instance of Table
The table to apply your function to
func : function
Any function that will work with DataFrame.apply
subset : list | None
A list of columns to apply the function to. If None, function
will be applied to all columns in table
Returns
-------
tab : instance of Table
A table with the given function applied. It will either be the
shape == shape(table), or shape (1, table.shape[1])
"""
from . import Table
df = table.to_df()
if subset is not None:
# Iterate through columns
subset = np.atleast_1d(subset)
if any([i not in df.columns for i in subset]):
err = np.where([i not in df.columns for i in subset])[0]
err = "Column mismatch: {0}".format(
[subset[i] for i in err])
raise ValueError(err)
for col in subset:
df[col] = df[col].apply(func)
else:
df = df.apply(func)
if isinstance(df, pd.Series):
# Reshape it so that we can easily convert back
df = pd.DataFrame(df).T
tab = Table.from_df(df)
return tab | [
"def",
"table_apply",
"(",
"table",
",",
"func",
",",
"subset",
"=",
"None",
")",
":",
"from",
".",
"import",
"Table",
"df",
"=",
"table",
".",
"to_df",
"(",
")",
"if",
"subset",
"is",
"not",
"None",
":",
"# Iterate through columns",
"subset",
"=",
"np... | Applies a function to each column and returns a Table.
Uses pandas `apply` under the hood, then converts back to a Table
Args:
table : instance of Table
The table to apply your function to
func : function
Any function that will work with DataFrame.apply
subset : list | None
A list of columns to apply the function to. If None, function
will be applied to all columns in table
Returns
-------
tab : instance of Table
A table with the given function applied. It will either be the
shape == shape(table), or shape (1, table.shape[1]) | [
"Applies",
"a",
"function",
"to",
"each",
"column",
"and",
"returns",
"a",
"Table",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L161-L200 | train | 204,652 |
data-8/datascience | datascience/util.py | minimize | def minimize(f, start=None, smooth=False, log=None, array=False, **vargs):
"""Minimize a function f of one or more arguments.
Args:
f: A function that takes numbers and returns a number
start: A starting value or list of starting values
smooth: Whether to assume that f is smooth and use first-order info
log: Logging function called on the result of optimization (e.g. print)
vargs: Other named arguments passed to scipy.optimize.minimize
Returns either:
(a) the minimizing argument of a one-argument function
(b) an array of minimizing arguments of a multi-argument function
"""
if start is None:
assert not array, "Please pass starting values explicitly when array=True"
arg_count = f.__code__.co_argcount
assert arg_count > 0, "Please pass starting values explicitly for variadic functions"
start = [0] * arg_count
if not hasattr(start, '__len__'):
start = [start]
if array:
objective = f
else:
@functools.wraps(f)
def objective(args):
return f(*args)
if not smooth and 'method' not in vargs:
vargs['method'] = 'Powell'
result = optimize.minimize(objective, start, **vargs)
if log is not None:
log(result)
if len(start) == 1:
return result.x.item(0)
else:
return result.x | python | def minimize(f, start=None, smooth=False, log=None, array=False, **vargs):
"""Minimize a function f of one or more arguments.
Args:
f: A function that takes numbers and returns a number
start: A starting value or list of starting values
smooth: Whether to assume that f is smooth and use first-order info
log: Logging function called on the result of optimization (e.g. print)
vargs: Other named arguments passed to scipy.optimize.minimize
Returns either:
(a) the minimizing argument of a one-argument function
(b) an array of minimizing arguments of a multi-argument function
"""
if start is None:
assert not array, "Please pass starting values explicitly when array=True"
arg_count = f.__code__.co_argcount
assert arg_count > 0, "Please pass starting values explicitly for variadic functions"
start = [0] * arg_count
if not hasattr(start, '__len__'):
start = [start]
if array:
objective = f
else:
@functools.wraps(f)
def objective(args):
return f(*args)
if not smooth and 'method' not in vargs:
vargs['method'] = 'Powell'
result = optimize.minimize(objective, start, **vargs)
if log is not None:
log(result)
if len(start) == 1:
return result.x.item(0)
else:
return result.x | [
"def",
"minimize",
"(",
"f",
",",
"start",
"=",
"None",
",",
"smooth",
"=",
"False",
",",
"log",
"=",
"None",
",",
"array",
"=",
"False",
",",
"*",
"*",
"vargs",
")",
":",
"if",
"start",
"is",
"None",
":",
"assert",
"not",
"array",
",",
"\"Please... | Minimize a function f of one or more arguments.
Args:
f: A function that takes numbers and returns a number
start: A starting value or list of starting values
smooth: Whether to assume that f is smooth and use first-order info
log: Logging function called on the result of optimization (e.g. print)
vargs: Other named arguments passed to scipy.optimize.minimize
Returns either:
(a) the minimizing argument of a one-argument function
(b) an array of minimizing arguments of a multi-argument function | [
"Minimize",
"a",
"function",
"f",
"of",
"one",
"or",
"more",
"arguments",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L203-L244 | train | 204,653 |
data-8/datascience | datascience/maps.py | _lat_lons_from_geojson | def _lat_lons_from_geojson(s):
"""Return a latitude-longitude pairs from nested GeoJSON coordinates.
GeoJSON coordinates are always stored in (longitude, latitude) order.
"""
if len(s) >= 2 and isinstance(s[0], _number) and isinstance(s[0], _number):
lat, lon = s[1], s[0]
return [(lat, lon)]
else:
return [lat_lon for sub in s for lat_lon in _lat_lons_from_geojson(sub)] | python | def _lat_lons_from_geojson(s):
"""Return a latitude-longitude pairs from nested GeoJSON coordinates.
GeoJSON coordinates are always stored in (longitude, latitude) order.
"""
if len(s) >= 2 and isinstance(s[0], _number) and isinstance(s[0], _number):
lat, lon = s[1], s[0]
return [(lat, lon)]
else:
return [lat_lon for sub in s for lat_lon in _lat_lons_from_geojson(sub)] | [
"def",
"_lat_lons_from_geojson",
"(",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
">=",
"2",
"and",
"isinstance",
"(",
"s",
"[",
"0",
"]",
",",
"_number",
")",
"and",
"isinstance",
"(",
"s",
"[",
"0",
"]",
",",
"_number",
")",
":",
"lat",
",",
"... | Return a latitude-longitude pairs from nested GeoJSON coordinates.
GeoJSON coordinates are always stored in (longitude, latitude) order. | [
"Return",
"a",
"latitude",
"-",
"longitude",
"pairs",
"from",
"nested",
"GeoJSON",
"coordinates",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L601-L610 | train | 204,654 |
data-8/datascience | datascience/maps.py | _FoliumWrapper.as_html | def as_html(self):
"""Generate HTML to display map."""
if not self._folium_map:
self.draw()
return self._inline_map(self._folium_map, self._width, self._height) | python | def as_html(self):
"""Generate HTML to display map."""
if not self._folium_map:
self.draw()
return self._inline_map(self._folium_map, self._width, self._height) | [
"def",
"as_html",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_folium_map",
":",
"self",
".",
"draw",
"(",
")",
"return",
"self",
".",
"_inline_map",
"(",
"self",
".",
"_folium_map",
",",
"self",
".",
"_width",
",",
"self",
".",
"_height",
")"
... | Generate HTML to display map. | [
"Generate",
"HTML",
"to",
"display",
"map",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L36-L40 | train | 204,655 |
data-8/datascience | datascience/maps.py | _FoliumWrapper.show | def show(self):
"""Publish HTML."""
IPython.display.display(IPython.display.HTML(self.as_html())) | python | def show(self):
"""Publish HTML."""
IPython.display.display(IPython.display.HTML(self.as_html())) | [
"def",
"show",
"(",
"self",
")",
":",
"IPython",
".",
"display",
".",
"display",
"(",
"IPython",
".",
"display",
".",
"HTML",
"(",
"self",
".",
"as_html",
"(",
")",
")",
")"
] | Publish HTML. | [
"Publish",
"HTML",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L42-L44 | train | 204,656 |
data-8/datascience | datascience/maps.py | Map.copy | def copy(self):
"""
Copies the current Map into a new one and returns it.
"""
m = Map(features=self._features, width=self._width,
height=self._height, **self._attrs)
m._folium_map = self._folium_map
return m | python | def copy(self):
"""
Copies the current Map into a new one and returns it.
"""
m = Map(features=self._features, width=self._width,
height=self._height, **self._attrs)
m._folium_map = self._folium_map
return m | [
"def",
"copy",
"(",
"self",
")",
":",
"m",
"=",
"Map",
"(",
"features",
"=",
"self",
".",
"_features",
",",
"width",
"=",
"self",
".",
"_width",
",",
"height",
"=",
"self",
".",
"_height",
",",
"*",
"*",
"self",
".",
"_attrs",
")",
"m",
".",
"_... | Copies the current Map into a new one and returns it. | [
"Copies",
"the",
"current",
"Map",
"into",
"a",
"new",
"one",
"and",
"returns",
"it",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L89-L97 | train | 204,657 |
data-8/datascience | datascience/maps.py | Map._autozoom | def _autozoom(self):
"""Calculate zoom and location."""
bounds = self._autobounds()
attrs = {}
midpoint = lambda a, b: (a + b)/2
attrs['location'] = (
midpoint(bounds['min_lat'], bounds['max_lat']),
midpoint(bounds['min_lon'], bounds['max_lon'])
)
# self._folium_map.fit_bounds(
# [bounds['min_long'], bounds['min_lat']],
# [bounds['max_long'], bounds['max_lat']]
# )
# remove the following with new Folium release
# rough approximation, assuming max_zoom is 18
import math
try:
lat_diff = bounds['max_lat'] - bounds['min_lat']
lon_diff = bounds['max_lon'] - bounds['min_lon']
area, max_area = lat_diff*lon_diff, 180*360
if area:
factor = 1 + max(0, 1 - self._width/1000)/2 + max(0, 1-area**0.5)/2
zoom = math.log(area/max_area)/-factor
else:
zoom = self._default_zoom
zoom = max(1, min(18, round(zoom)))
attrs['zoom_start'] = zoom
except ValueError as e:
raise Exception('Check that your locations are lat-lon pairs', e)
return attrs | python | def _autozoom(self):
"""Calculate zoom and location."""
bounds = self._autobounds()
attrs = {}
midpoint = lambda a, b: (a + b)/2
attrs['location'] = (
midpoint(bounds['min_lat'], bounds['max_lat']),
midpoint(bounds['min_lon'], bounds['max_lon'])
)
# self._folium_map.fit_bounds(
# [bounds['min_long'], bounds['min_lat']],
# [bounds['max_long'], bounds['max_lat']]
# )
# remove the following with new Folium release
# rough approximation, assuming max_zoom is 18
import math
try:
lat_diff = bounds['max_lat'] - bounds['min_lat']
lon_diff = bounds['max_lon'] - bounds['min_lon']
area, max_area = lat_diff*lon_diff, 180*360
if area:
factor = 1 + max(0, 1 - self._width/1000)/2 + max(0, 1-area**0.5)/2
zoom = math.log(area/max_area)/-factor
else:
zoom = self._default_zoom
zoom = max(1, min(18, round(zoom)))
attrs['zoom_start'] = zoom
except ValueError as e:
raise Exception('Check that your locations are lat-lon pairs', e)
return attrs | [
"def",
"_autozoom",
"(",
"self",
")",
":",
"bounds",
"=",
"self",
".",
"_autobounds",
"(",
")",
"attrs",
"=",
"{",
"}",
"midpoint",
"=",
"lambda",
"a",
",",
"b",
":",
"(",
"a",
"+",
"b",
")",
"/",
"2",
"attrs",
"[",
"'location'",
"]",
"=",
"(",... | Calculate zoom and location. | [
"Calculate",
"zoom",
"and",
"location",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L122-L155 | train | 204,658 |
data-8/datascience | datascience/maps.py | Map._autobounds | def _autobounds(self):
"""Simple calculation for bounds."""
bounds = {}
def check(prop, compare, extreme, val):
opp = min if compare is max else max
bounds.setdefault(prop, val)
bounds[prop] = opp(compare(bounds[prop], val), extreme)
def bound_check(lat_lon):
lat, lon = lat_lon
check('max_lat', max, 90, lat)
check('min_lat', min, -90, lat)
check('max_lon', max, 180, lon)
check('min_lon', min, -180, lon)
lat_lons = [lat_lon for feature in self._features.values() for
lat_lon in feature.lat_lons]
if not lat_lons:
lat_lons.append(self._default_lat_lon)
for lat_lon in lat_lons:
bound_check(lat_lon)
return bounds | python | def _autobounds(self):
"""Simple calculation for bounds."""
bounds = {}
def check(prop, compare, extreme, val):
opp = min if compare is max else max
bounds.setdefault(prop, val)
bounds[prop] = opp(compare(bounds[prop], val), extreme)
def bound_check(lat_lon):
lat, lon = lat_lon
check('max_lat', max, 90, lat)
check('min_lat', min, -90, lat)
check('max_lon', max, 180, lon)
check('min_lon', min, -180, lon)
lat_lons = [lat_lon for feature in self._features.values() for
lat_lon in feature.lat_lons]
if not lat_lons:
lat_lons.append(self._default_lat_lon)
for lat_lon in lat_lons:
bound_check(lat_lon)
return bounds | [
"def",
"_autobounds",
"(",
"self",
")",
":",
"bounds",
"=",
"{",
"}",
"def",
"check",
"(",
"prop",
",",
"compare",
",",
"extreme",
",",
"val",
")",
":",
"opp",
"=",
"min",
"if",
"compare",
"is",
"max",
"else",
"max",
"bounds",
".",
"setdefault",
"(... | Simple calculation for bounds. | [
"Simple",
"calculation",
"for",
"bounds",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L157-L180 | train | 204,659 |
data-8/datascience | datascience/maps.py | Map.geojson | def geojson(self):
"""Render features as a FeatureCollection."""
return {
"type": "FeatureCollection",
"features": [f.geojson(i) for i, f in self._features.items()]
} | python | def geojson(self):
"""Render features as a FeatureCollection."""
return {
"type": "FeatureCollection",
"features": [f.geojson(i) for i, f in self._features.items()]
} | [
"def",
"geojson",
"(",
"self",
")",
":",
"return",
"{",
"\"type\"",
":",
"\"FeatureCollection\"",
",",
"\"features\"",
":",
"[",
"f",
".",
"geojson",
"(",
"i",
")",
"for",
"i",
",",
"f",
"in",
"self",
".",
"_features",
".",
"items",
"(",
")",
"]",
... | Render features as a FeatureCollection. | [
"Render",
"features",
"as",
"a",
"FeatureCollection",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L198-L203 | train | 204,660 |
data-8/datascience | datascience/maps.py | Map.color | def color(self, values, ids=(), key_on='feature.id', palette='YlOrBr', **kwargs):
"""Color map features by binning values.
values -- a sequence of values or a table of keys and values
ids -- an ID for each value; if none are provided, indices are used
key_on -- attribute of each feature to match to ids
palette -- one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
Defaults from Folium:
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1].
"""
# Set values and ids to both be simple sequences by inspecting values
id_name, value_name = 'IDs', 'values'
if isinstance(values, collections.abc.Mapping):
assert not ids, 'IDs and a map cannot both be used together'
if hasattr(values, 'columns') and len(values.columns) == 2:
table = values
ids, values = table.columns
id_name, value_name = table.labels
else:
dictionary = values
ids, values = list(dictionary.keys()), list(dictionary.values())
if len(ids) != len(values):
assert len(ids) == 0
# Use indices as IDs
ids = list(range(len(values)))
m = self._create_map()
data = pandas.DataFrame({id_name: ids, value_name: values})
attrs = {
'geo_str': json.dumps(self.geojson()),
'data': data,
'columns': [id_name, value_name],
'key_on': key_on,
'fill_color': palette,
}
kwargs.update(attrs)
m.geo_json(**kwargs)
colored = self.format()
colored._folium_map = m
return colored | python | def color(self, values, ids=(), key_on='feature.id', palette='YlOrBr', **kwargs):
"""Color map features by binning values.
values -- a sequence of values or a table of keys and values
ids -- an ID for each value; if none are provided, indices are used
key_on -- attribute of each feature to match to ids
palette -- one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
Defaults from Folium:
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1].
"""
# Set values and ids to both be simple sequences by inspecting values
id_name, value_name = 'IDs', 'values'
if isinstance(values, collections.abc.Mapping):
assert not ids, 'IDs and a map cannot both be used together'
if hasattr(values, 'columns') and len(values.columns) == 2:
table = values
ids, values = table.columns
id_name, value_name = table.labels
else:
dictionary = values
ids, values = list(dictionary.keys()), list(dictionary.values())
if len(ids) != len(values):
assert len(ids) == 0
# Use indices as IDs
ids = list(range(len(values)))
m = self._create_map()
data = pandas.DataFrame({id_name: ids, value_name: values})
attrs = {
'geo_str': json.dumps(self.geojson()),
'data': data,
'columns': [id_name, value_name],
'key_on': key_on,
'fill_color': palette,
}
kwargs.update(attrs)
m.geo_json(**kwargs)
colored = self.format()
colored._folium_map = m
return colored | [
"def",
"color",
"(",
"self",
",",
"values",
",",
"ids",
"=",
"(",
")",
",",
"key_on",
"=",
"'feature.id'",
",",
"palette",
"=",
"'YlOrBr'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Set values and ids to both be simple sequences by inspecting values",
"id_name",
","... | Color map features by binning values.
values -- a sequence of values or a table of keys and values
ids -- an ID for each value; if none are provided, indices are used
key_on -- attribute of each feature to match to ids
palette -- one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
Defaults from Folium:
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1]. | [
"Color",
"map",
"features",
"by",
"binning",
"values",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L205-L262 | train | 204,661 |
data-8/datascience | datascience/maps.py | Map.overlay | def overlay(self, feature, color='Blue', opacity=0.6):
"""
Overlays ``feature`` on the map. Returns a new Map.
Args:
``feature``: a ``Table`` of map features, a list of map features,
a Map, a Region, or a circle marker map table. The features will
be overlayed on the Map with specified ``color``.
``color`` (``str``): Color of feature. Defaults to 'Blue'
``opacity`` (``float``): Opacity of overlain feature. Defaults to
0.6.
Returns:
A new ``Map`` with the overlain ``feature``.
"""
result = self.copy()
if type(feature) == Table:
# if table of features e.g. Table.from_records(taz_map.features)
if 'feature' in feature:
feature = feature['feature']
# if marker table e.g. table with columns: latitudes,longitudes,popup,color,radius
else:
feature = Circle.map_table(feature)
if type(feature) in [list, np.ndarray]:
for f in feature:
f._attrs['fill_color'] = color
f._attrs['fill_opacity'] = opacity
f.draw_on(result._folium_map)
elif type(feature) == Map:
for i in range(len(feature._features)):
f = feature._features[i]
f._attrs['fill_color'] = color
f._attrs['fill_opacity'] = opacity
f.draw_on(result._folium_map)
elif type(feature) == Region:
feature._attrs['fill_color'] = color
feature._attrs['fill_opacity'] = opacity
feature.draw_on(result._folium_map)
return result | python | def overlay(self, feature, color='Blue', opacity=0.6):
"""
Overlays ``feature`` on the map. Returns a new Map.
Args:
``feature``: a ``Table`` of map features, a list of map features,
a Map, a Region, or a circle marker map table. The features will
be overlayed on the Map with specified ``color``.
``color`` (``str``): Color of feature. Defaults to 'Blue'
``opacity`` (``float``): Opacity of overlain feature. Defaults to
0.6.
Returns:
A new ``Map`` with the overlain ``feature``.
"""
result = self.copy()
if type(feature) == Table:
# if table of features e.g. Table.from_records(taz_map.features)
if 'feature' in feature:
feature = feature['feature']
# if marker table e.g. table with columns: latitudes,longitudes,popup,color,radius
else:
feature = Circle.map_table(feature)
if type(feature) in [list, np.ndarray]:
for f in feature:
f._attrs['fill_color'] = color
f._attrs['fill_opacity'] = opacity
f.draw_on(result._folium_map)
elif type(feature) == Map:
for i in range(len(feature._features)):
f = feature._features[i]
f._attrs['fill_color'] = color
f._attrs['fill_opacity'] = opacity
f.draw_on(result._folium_map)
elif type(feature) == Region:
feature._attrs['fill_color'] = color
feature._attrs['fill_opacity'] = opacity
feature.draw_on(result._folium_map)
return result | [
"def",
"overlay",
"(",
"self",
",",
"feature",
",",
"color",
"=",
"'Blue'",
",",
"opacity",
"=",
"0.6",
")",
":",
"result",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"type",
"(",
"feature",
")",
"==",
"Table",
":",
"# if table of features e.g. Table.from... | Overlays ``feature`` on the map. Returns a new Map.
Args:
``feature``: a ``Table`` of map features, a list of map features,
a Map, a Region, or a circle marker map table. The features will
be overlayed on the Map with specified ``color``.
``color`` (``str``): Color of feature. Defaults to 'Blue'
``opacity`` (``float``): Opacity of overlain feature. Defaults to
0.6.
Returns:
A new ``Map`` with the overlain ``feature``. | [
"Overlays",
"feature",
"on",
"the",
"map",
".",
"Returns",
"a",
"new",
"Map",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L264-L307 | train | 204,662 |
data-8/datascience | datascience/maps.py | Map.read_geojson | def read_geojson(cls, path_or_json_or_string):
"""Read a geoJSON string, object, or file. Return a dict of features keyed by ID."""
assert path_or_json_or_string
data = None
if isinstance(path_or_json_or_string, (dict, list)):
data = path_or_json_or_string
try:
data = json.loads(path_or_json_or_string)
except ValueError:
pass
try:
path = path_or_json_or_string
if path.endswith('.gz') or path.endswith('.gzip'):
import gzip
contents = gzip.open(path, 'r').read().decode('utf-8')
else:
contents = open(path, 'r').read()
data = json.loads(contents)
except FileNotFoundError:
pass
# TODO web address
assert data, 'MapData accepts a valid geoJSON object, geoJSON string, or path to a geoJSON file'
return cls(cls._read_geojson_features(data)) | python | def read_geojson(cls, path_or_json_or_string):
"""Read a geoJSON string, object, or file. Return a dict of features keyed by ID."""
assert path_or_json_or_string
data = None
if isinstance(path_or_json_or_string, (dict, list)):
data = path_or_json_or_string
try:
data = json.loads(path_or_json_or_string)
except ValueError:
pass
try:
path = path_or_json_or_string
if path.endswith('.gz') or path.endswith('.gzip'):
import gzip
contents = gzip.open(path, 'r').read().decode('utf-8')
else:
contents = open(path, 'r').read()
data = json.loads(contents)
except FileNotFoundError:
pass
# TODO web address
assert data, 'MapData accepts a valid geoJSON object, geoJSON string, or path to a geoJSON file'
return cls(cls._read_geojson_features(data)) | [
"def",
"read_geojson",
"(",
"cls",
",",
"path_or_json_or_string",
")",
":",
"assert",
"path_or_json_or_string",
"data",
"=",
"None",
"if",
"isinstance",
"(",
"path_or_json_or_string",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"data",
"=",
"path_or_json_or_st... | Read a geoJSON string, object, or file. Return a dict of features keyed by ID. | [
"Read",
"a",
"geoJSON",
"string",
"object",
"or",
"file",
".",
"Return",
"a",
"dict",
"of",
"features",
"keyed",
"by",
"ID",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L310-L332 | train | 204,663 |
data-8/datascience | datascience/maps.py | Map._read_geojson_features | def _read_geojson_features(data, features=None, prefix=""):
"""Return a dict of features keyed by ID."""
if features is None:
features = collections.OrderedDict()
for i, feature in enumerate(data['features']):
key = feature.get('id', prefix + str(i))
feature_type = feature['geometry']['type']
if feature_type == 'FeatureCollection':
_read_geojson_features(feature, features, prefix + '.' + key)
elif feature_type == 'Point':
value = Circle._convert_point(feature)
elif feature_type in ['Polygon', 'MultiPolygon']:
value = Region(feature)
else:
# TODO Support all http://geojson.org/geojson-spec.html#geometry-objects
value = None
features[key] = value
return features | python | def _read_geojson_features(data, features=None, prefix=""):
"""Return a dict of features keyed by ID."""
if features is None:
features = collections.OrderedDict()
for i, feature in enumerate(data['features']):
key = feature.get('id', prefix + str(i))
feature_type = feature['geometry']['type']
if feature_type == 'FeatureCollection':
_read_geojson_features(feature, features, prefix + '.' + key)
elif feature_type == 'Point':
value = Circle._convert_point(feature)
elif feature_type in ['Polygon', 'MultiPolygon']:
value = Region(feature)
else:
# TODO Support all http://geojson.org/geojson-spec.html#geometry-objects
value = None
features[key] = value
return features | [
"def",
"_read_geojson_features",
"(",
"data",
",",
"features",
"=",
"None",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"i",
",",
"feature",
"in",
"enumer... | Return a dict of features keyed by ID. | [
"Return",
"a",
"dict",
"of",
"features",
"keyed",
"by",
"ID",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L335-L352 | train | 204,664 |
data-8/datascience | datascience/maps.py | _MapFeature.draw_on | def draw_on(self, folium_map):
"""Add feature to Folium map object."""
f = getattr(folium_map, self._map_method_name)
f(**self._folium_kwargs) | python | def draw_on(self, folium_map):
"""Add feature to Folium map object."""
f = getattr(folium_map, self._map_method_name)
f(**self._folium_kwargs) | [
"def",
"draw_on",
"(",
"self",
",",
"folium_map",
")",
":",
"f",
"=",
"getattr",
"(",
"folium_map",
",",
"self",
".",
"_map_method_name",
")",
"f",
"(",
"*",
"*",
"self",
".",
"_folium_kwargs",
")"
] | Add feature to Folium map object. | [
"Add",
"feature",
"to",
"Folium",
"map",
"object",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L365-L368 | train | 204,665 |
data-8/datascience | datascience/maps.py | _MapFeature._set_folium_map | def _set_folium_map(self):
"""A map containing only the feature."""
m = Map(features=[self], width=self._width, height=self._height)
self._folium_map = m.draw() | python | def _set_folium_map(self):
"""A map containing only the feature."""
m = Map(features=[self], width=self._width, height=self._height)
self._folium_map = m.draw() | [
"def",
"_set_folium_map",
"(",
"self",
")",
":",
"m",
"=",
"Map",
"(",
"features",
"=",
"[",
"self",
"]",
",",
"width",
"=",
"self",
".",
"_width",
",",
"height",
"=",
"self",
".",
"_height",
")",
"self",
".",
"_folium_map",
"=",
"m",
".",
"draw",
... | A map containing only the feature. | [
"A",
"map",
"containing",
"only",
"the",
"feature",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L370-L373 | train | 204,666 |
data-8/datascience | datascience/maps.py | Marker.geojson | def geojson(self, feature_id):
"""GeoJSON representation of the marker as a point."""
lat, lon = self.lat_lon
return {
'type': 'Feature',
'id': feature_id,
'geometry': {
'type': 'Point',
'coordinates': (lon, lat),
},
} | python | def geojson(self, feature_id):
"""GeoJSON representation of the marker as a point."""
lat, lon = self.lat_lon
return {
'type': 'Feature',
'id': feature_id,
'geometry': {
'type': 'Point',
'coordinates': (lon, lat),
},
} | [
"def",
"geojson",
"(",
"self",
",",
"feature_id",
")",
":",
"lat",
",",
"lon",
"=",
"self",
".",
"lat_lon",
"return",
"{",
"'type'",
":",
"'Feature'",
",",
"'id'",
":",
"feature_id",
",",
"'geometry'",
":",
"{",
"'type'",
":",
"'Point'",
",",
"'coordin... | GeoJSON representation of the marker as a point. | [
"GeoJSON",
"representation",
"of",
"the",
"marker",
"as",
"a",
"point",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L441-L451 | train | 204,667 |
data-8/datascience | datascience/maps.py | Marker._convert_point | def _convert_point(cls, feature):
"""Convert a GeoJSON point to a Marker."""
lon, lat = feature['geometry']['coordinates']
popup = feature['properties'].get('name', '')
return cls(lat, lon) | python | def _convert_point(cls, feature):
"""Convert a GeoJSON point to a Marker."""
lon, lat = feature['geometry']['coordinates']
popup = feature['properties'].get('name', '')
return cls(lat, lon) | [
"def",
"_convert_point",
"(",
"cls",
",",
"feature",
")",
":",
"lon",
",",
"lat",
"=",
"feature",
"[",
"'geometry'",
"]",
"[",
"'coordinates'",
"]",
"popup",
"=",
"feature",
"[",
"'properties'",
"]",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"return",... | Convert a GeoJSON point to a Marker. | [
"Convert",
"a",
"GeoJSON",
"point",
"to",
"a",
"Marker",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L461-L465 | train | 204,668 |
data-8/datascience | datascience/maps.py | Marker.map | def map(cls, latitudes, longitudes, labels=None, colors=None, areas=None, **kwargs):
"""Return markers from columns of coordinates, labels, & colors.
The areas column is not applicable to markers, but sets circle areas.
"""
assert len(latitudes) == len(longitudes)
assert areas is None or hasattr(cls, '_has_radius'), "A " + cls.__name__ + " has no radius"
inputs = [latitudes, longitudes]
if labels is not None:
assert len(labels) == len(latitudes)
inputs.append(labels)
else:
inputs.append(("",) * len(latitudes))
if colors is not None:
assert len(colors) == len(latitudes)
inputs.append(colors)
if areas is not None:
assert len(areas) == len(latitudes)
inputs.append(np.array(areas) ** 0.5 / math.pi)
ms = [cls(*args, **kwargs) for args in zip(*inputs)]
return Map(ms) | python | def map(cls, latitudes, longitudes, labels=None, colors=None, areas=None, **kwargs):
"""Return markers from columns of coordinates, labels, & colors.
The areas column is not applicable to markers, but sets circle areas.
"""
assert len(latitudes) == len(longitudes)
assert areas is None or hasattr(cls, '_has_radius'), "A " + cls.__name__ + " has no radius"
inputs = [latitudes, longitudes]
if labels is not None:
assert len(labels) == len(latitudes)
inputs.append(labels)
else:
inputs.append(("",) * len(latitudes))
if colors is not None:
assert len(colors) == len(latitudes)
inputs.append(colors)
if areas is not None:
assert len(areas) == len(latitudes)
inputs.append(np.array(areas) ** 0.5 / math.pi)
ms = [cls(*args, **kwargs) for args in zip(*inputs)]
return Map(ms) | [
"def",
"map",
"(",
"cls",
",",
"latitudes",
",",
"longitudes",
",",
"labels",
"=",
"None",
",",
"colors",
"=",
"None",
",",
"areas",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"len",
"(",
"latitudes",
")",
"==",
"len",
"(",
"longitud... | Return markers from columns of coordinates, labels, & colors.
The areas column is not applicable to markers, but sets circle areas. | [
"Return",
"markers",
"from",
"columns",
"of",
"coordinates",
"labels",
"&",
"colors",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L468-L488 | train | 204,669 |
data-8/datascience | datascience/maps.py | Region.polygons | def polygons(self):
"""Return a list of polygons describing the region.
- Each polygon is a list of linear rings, where the first describes the
exterior and the rest describe interior holes.
- Each linear ring is a list of positions where the last is a repeat of
the first.
- Each position is a (lat, lon) pair.
"""
if self.type == 'Polygon':
polygons = [self._geojson['geometry']['coordinates']]
elif self.type == 'MultiPolygon':
polygons = self._geojson['geometry']['coordinates']
return [ [ [_lat_lons_from_geojson(s) for
s in ring ] for
ring in polygon] for
polygon in polygons] | python | def polygons(self):
"""Return a list of polygons describing the region.
- Each polygon is a list of linear rings, where the first describes the
exterior and the rest describe interior holes.
- Each linear ring is a list of positions where the last is a repeat of
the first.
- Each position is a (lat, lon) pair.
"""
if self.type == 'Polygon':
polygons = [self._geojson['geometry']['coordinates']]
elif self.type == 'MultiPolygon':
polygons = self._geojson['geometry']['coordinates']
return [ [ [_lat_lons_from_geojson(s) for
s in ring ] for
ring in polygon] for
polygon in polygons] | [
"def",
"polygons",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"==",
"'Polygon'",
":",
"polygons",
"=",
"[",
"self",
".",
"_geojson",
"[",
"'geometry'",
"]",
"[",
"'coordinates'",
"]",
"]",
"elif",
"self",
".",
"type",
"==",
"'MultiPolygon'",
":"... | Return a list of polygons describing the region.
- Each polygon is a list of linear rings, where the first describes the
exterior and the rest describe interior holes.
- Each linear ring is a list of positions where the last is a repeat of
the first.
- Each position is a (lat, lon) pair. | [
"Return",
"a",
"list",
"of",
"polygons",
"describing",
"the",
"region",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L553-L569 | train | 204,670 |
data-8/datascience | datascience/maps.py | Region.geojson | def geojson(self, feature_id):
"""Return GeoJSON with ID substituted."""
if self._geojson.get('id', feature_id) == feature_id:
return self._geojson
else:
geo = self._geojson.copy()
geo['id'] = feature_id
return geo | python | def geojson(self, feature_id):
"""Return GeoJSON with ID substituted."""
if self._geojson.get('id', feature_id) == feature_id:
return self._geojson
else:
geo = self._geojson.copy()
geo['id'] = feature_id
return geo | [
"def",
"geojson",
"(",
"self",
",",
"feature_id",
")",
":",
"if",
"self",
".",
"_geojson",
".",
"get",
"(",
"'id'",
",",
"feature_id",
")",
"==",
"feature_id",
":",
"return",
"self",
".",
"_geojson",
"else",
":",
"geo",
"=",
"self",
".",
"_geojson",
... | Return GeoJSON with ID substituted. | [
"Return",
"GeoJSON",
"with",
"ID",
"substituted",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L585-L592 | train | 204,671 |
data-8/datascience | datascience/predicates.py | are.between | def between(y, z):
"""Greater than or equal to y and less than z."""
return _combinable(lambda x: (y <= x < z) or _equal_or_float_equal(x, y)) | python | def between(y, z):
"""Greater than or equal to y and less than z."""
return _combinable(lambda x: (y <= x < z) or _equal_or_float_equal(x, y)) | [
"def",
"between",
"(",
"y",
",",
"z",
")",
":",
"return",
"_combinable",
"(",
"lambda",
"x",
":",
"(",
"y",
"<=",
"x",
"<",
"z",
")",
"or",
"_equal_or_float_equal",
"(",
"x",
",",
"y",
")",
")"
] | Greater than or equal to y and less than z. | [
"Greater",
"than",
"or",
"equal",
"to",
"y",
"and",
"less",
"than",
"z",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/predicates.py#L134-L136 | train | 204,672 |
data-8/datascience | datascience/predicates.py | are.between_or_equal_to | def between_or_equal_to(y, z):
"""Greater than or equal to y and less than or equal to z."""
return _combinable(lambda x: (y <= x <= z) or _equal_or_float_equal(x, y) or _equal_or_float_equal(x, z)) | python | def between_or_equal_to(y, z):
"""Greater than or equal to y and less than or equal to z."""
return _combinable(lambda x: (y <= x <= z) or _equal_or_float_equal(x, y) or _equal_or_float_equal(x, z)) | [
"def",
"between_or_equal_to",
"(",
"y",
",",
"z",
")",
":",
"return",
"_combinable",
"(",
"lambda",
"x",
":",
"(",
"y",
"<=",
"x",
"<=",
"z",
")",
"or",
"_equal_or_float_equal",
"(",
"x",
",",
"y",
")",
"or",
"_equal_or_float_equal",
"(",
"x",
",",
"... | Greater than or equal to y and less than or equal to z. | [
"Greater",
"than",
"or",
"equal",
"to",
"y",
"and",
"less",
"than",
"or",
"equal",
"to",
"z",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/predicates.py#L139-L141 | train | 204,673 |
data-8/datascience | datascience/formats.py | Formatter.format_column | def format_column(self, label, column):
"""Return a formatting function that pads & truncates values."""
if len(column) == 0:
val_width = 0
else:
val_width = max(len(self.format_value(v)) for v in column)
val_width = min(val_width, self.max_width)
width = max(val_width, len(str(label)), self.min_width, len(self.etc))
def pad(value, label=False):
if label:
raw = value
else:
raw = self.format_value(value)
if len(raw) > width:
prefix = raw[:width-len(self.etc)] + self.etc
else:
prefix = raw
return prefix.ljust(width)
return pad | python | def format_column(self, label, column):
"""Return a formatting function that pads & truncates values."""
if len(column) == 0:
val_width = 0
else:
val_width = max(len(self.format_value(v)) for v in column)
val_width = min(val_width, self.max_width)
width = max(val_width, len(str(label)), self.min_width, len(self.etc))
def pad(value, label=False):
if label:
raw = value
else:
raw = self.format_value(value)
if len(raw) > width:
prefix = raw[:width-len(self.etc)] + self.etc
else:
prefix = raw
return prefix.ljust(width)
return pad | [
"def",
"format_column",
"(",
"self",
",",
"label",
",",
"column",
")",
":",
"if",
"len",
"(",
"column",
")",
"==",
"0",
":",
"val_width",
"=",
"0",
"else",
":",
"val_width",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"format_value",
"(",
"v",
")",
... | Return a formatting function that pads & truncates values. | [
"Return",
"a",
"formatting",
"function",
"that",
"pads",
"&",
"truncates",
"values",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L28-L46 | train | 204,674 |
data-8/datascience | datascience/formats.py | Formatter.format_value | def format_value(value):
"""Pretty-print an arbitrary value."""
if isinstance(value, (bool, np.bool_)):
return str(value)
elif isinstance(value, (int, np.integer)):
return '{:n}'.format(value)
elif isinstance(value, (float, np.floating)):
return '{:g}'.format(value)
else:
return str(value) | python | def format_value(value):
"""Pretty-print an arbitrary value."""
if isinstance(value, (bool, np.bool_)):
return str(value)
elif isinstance(value, (int, np.integer)):
return '{:n}'.format(value)
elif isinstance(value, (float, np.floating)):
return '{:g}'.format(value)
else:
return str(value) | [
"def",
"format_value",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"bool",
",",
"np",
".",
"bool_",
")",
")",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"np",
".",
"i... | Pretty-print an arbitrary value. | [
"Pretty",
"-",
"print",
"an",
"arbitrary",
"value",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L49-L58 | train | 204,675 |
data-8/datascience | datascience/formats.py | Formatter.converts_values | def converts_values(self):
"""Whether this Formatter also converts values."""
return self.convert_value is not Formatter.convert_value or \
self.convert_column is not Formatter.convert_column | python | def converts_values(self):
"""Whether this Formatter also converts values."""
return self.convert_value is not Formatter.convert_value or \
self.convert_column is not Formatter.convert_column | [
"def",
"converts_values",
"(",
"self",
")",
":",
"return",
"self",
".",
"convert_value",
"is",
"not",
"Formatter",
".",
"convert_value",
"or",
"self",
".",
"convert_column",
"is",
"not",
"Formatter",
".",
"convert_column"
] | Whether this Formatter also converts values. | [
"Whether",
"this",
"Formatter",
"also",
"converts",
"values",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L70-L73 | train | 204,676 |
data-8/datascience | datascience/formats.py | NumberFormatter.convert_value | def convert_value(self, value):
"""Convert string 93,000.00 to float 93000.0."""
if isinstance(value, str):
value = value.replace(self.separator, '')
if self.decimal_point not in value:
return int(value)
else:
return float(value.replace(self.decimal_point, '.'))
elif self.int_to_float:
return float(value)
else:
return value | python | def convert_value(self, value):
"""Convert string 93,000.00 to float 93000.0."""
if isinstance(value, str):
value = value.replace(self.separator, '')
if self.decimal_point not in value:
return int(value)
else:
return float(value.replace(self.decimal_point, '.'))
elif self.int_to_float:
return float(value)
else:
return value | [
"def",
"convert_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"self",
".",
"separator",
",",
"''",
")",
"if",
"self",
".",
"decimal_point",
"not",
"in"... | Convert string 93,000.00 to float 93000.0. | [
"Convert",
"string",
"93",
"000",
".",
"00",
"to",
"float",
"93000",
".",
"0",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L95-L106 | train | 204,677 |
data-8/datascience | datascience/formats.py | CurrencyFormatter.convert_value | def convert_value(self, value):
"""Convert value to float. If value is a string, ensure that the first
character is the same as symbol ie. the value is in the currency this
formatter is representing.
"""
if isinstance(value, str):
assert value.startswith(self.symbol), "Currency does not start with " + self.symbol
value = value.lstrip(self.symbol)
return super().convert_value(value) | python | def convert_value(self, value):
"""Convert value to float. If value is a string, ensure that the first
character is the same as symbol ie. the value is in the currency this
formatter is representing.
"""
if isinstance(value, str):
assert value.startswith(self.symbol), "Currency does not start with " + self.symbol
value = value.lstrip(self.symbol)
return super().convert_value(value) | [
"def",
"convert_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"assert",
"value",
".",
"startswith",
"(",
"self",
".",
"symbol",
")",
",",
"\"Currency does not start with \"",
"+",
"self",
".",
"symbol"... | Convert value to float. If value is a string, ensure that the first
character is the same as symbol ie. the value is in the currency this
formatter is representing. | [
"Convert",
"value",
"to",
"float",
".",
"If",
"value",
"is",
"a",
"string",
"ensure",
"that",
"the",
"first",
"character",
"is",
"the",
"same",
"as",
"symbol",
"ie",
".",
"the",
"value",
"is",
"in",
"the",
"currency",
"this",
"formatter",
"is",
"represen... | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L123-L131 | train | 204,678 |
data-8/datascience | datascience/formats.py | DistributionFormatter.convert_column | def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total
else:
return values | python | def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total
else:
return values | [
"def",
"convert_column",
"(",
"self",
",",
"values",
")",
":",
"assert",
"all",
"(",
"values",
">=",
"0",
")",
",",
"'Cannot normalize a column with negatives'",
"total",
"=",
"sum",
"(",
"values",
")",
"if",
"total",
">",
"0",
":",
"return",
"values",
"/"... | Normalize values. | [
"Normalize",
"values",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/formats.py#L171-L178 | train | 204,679 |
NiklasRosenstein/myo-python | myo/macaddr.py | decode | def decode(bstr):
"""
Decodes an ASCII encoded binary MAC address tring into a number.
"""
bstr = bstr.replace(b':', b'')
if len(bstr) != 12:
raise ValueError('not a valid MAC address: {!r}'.format(bstr))
try:
return int(bstr, 16)
except ValueError:
raise ValueError('not a valid MAC address: {!r}'.format(bstr)) | python | def decode(bstr):
"""
Decodes an ASCII encoded binary MAC address tring into a number.
"""
bstr = bstr.replace(b':', b'')
if len(bstr) != 12:
raise ValueError('not a valid MAC address: {!r}'.format(bstr))
try:
return int(bstr, 16)
except ValueError:
raise ValueError('not a valid MAC address: {!r}'.format(bstr)) | [
"def",
"decode",
"(",
"bstr",
")",
":",
"bstr",
"=",
"bstr",
".",
"replace",
"(",
"b':'",
",",
"b''",
")",
"if",
"len",
"(",
"bstr",
")",
"!=",
"12",
":",
"raise",
"ValueError",
"(",
"'not a valid MAC address: {!r}'",
".",
"format",
"(",
"bstr",
")",
... | Decodes an ASCII encoded binary MAC address tring into a number. | [
"Decodes",
"an",
"ASCII",
"encoded",
"binary",
"MAC",
"address",
"tring",
"into",
"a",
"number",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/macaddr.py#L52-L64 | train | 204,680 |
NiklasRosenstein/myo-python | myo/_ffi.py | init | def init(lib_name=None, bin_path=None, sdk_path=None):
"""
Initialize the Myo SDK by loading the libmyo shared library. With no
arguments, libmyo must be on your `PATH` or `LD_LIBRARY_PATH`.
You can specify the exact path to libmyo with *lib_name*. Alternatively,
you can specify the binaries directory that contains libmyo with *bin_path*.
Finally, you can also pass the path to the Myo SDK root directory and it
will figure out the path to libmyo by itself.
"""
if sum(bool(x) for x in [lib_name, bin_path, sdk_path]) > 1:
raise ValueError('expected zero or one arguments')
if sdk_path:
if sys.platform.startswith('win32'):
bin_path = os.path.join(sdk_path, 'bin')
elif sys.platform.startswith('darwin'):
bin_path = os.path.join(sdk_path, 'myo.framework')
else:
raise RuntimeError('unsupported platform: {!r}'.format(sys.platform))
if bin_path:
lib_name = os.path.join(bin_path, _getdlname())
if not lib_name:
lib_name = _getdlname()
global libmyo
libmyo = ffi.dlopen(lib_name) | python | def init(lib_name=None, bin_path=None, sdk_path=None):
"""
Initialize the Myo SDK by loading the libmyo shared library. With no
arguments, libmyo must be on your `PATH` or `LD_LIBRARY_PATH`.
You can specify the exact path to libmyo with *lib_name*. Alternatively,
you can specify the binaries directory that contains libmyo with *bin_path*.
Finally, you can also pass the path to the Myo SDK root directory and it
will figure out the path to libmyo by itself.
"""
if sum(bool(x) for x in [lib_name, bin_path, sdk_path]) > 1:
raise ValueError('expected zero or one arguments')
if sdk_path:
if sys.platform.startswith('win32'):
bin_path = os.path.join(sdk_path, 'bin')
elif sys.platform.startswith('darwin'):
bin_path = os.path.join(sdk_path, 'myo.framework')
else:
raise RuntimeError('unsupported platform: {!r}'.format(sys.platform))
if bin_path:
lib_name = os.path.join(bin_path, _getdlname())
if not lib_name:
lib_name = _getdlname()
global libmyo
libmyo = ffi.dlopen(lib_name) | [
"def",
"init",
"(",
"lib_name",
"=",
"None",
",",
"bin_path",
"=",
"None",
",",
"sdk_path",
"=",
"None",
")",
":",
"if",
"sum",
"(",
"bool",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"lib_name",
",",
"bin_path",
",",
"sdk_path",
"]",
")",
">",
"1",
... | Initialize the Myo SDK by loading the libmyo shared library. With no
arguments, libmyo must be on your `PATH` or `LD_LIBRARY_PATH`.
You can specify the exact path to libmyo with *lib_name*. Alternatively,
you can specify the binaries directory that contains libmyo with *bin_path*.
Finally, you can also pass the path to the Myo SDK root directory and it
will figure out the path to libmyo by itself. | [
"Initialize",
"the",
"Myo",
"SDK",
"by",
"loading",
"the",
"libmyo",
"shared",
"library",
".",
"With",
"no",
"arguments",
"libmyo",
"must",
"be",
"on",
"your",
"PATH",
"or",
"LD_LIBRARY_PATH",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/_ffi.py#L209-L236 | train | 204,681 |
NiklasRosenstein/myo-python | myo/utils.py | TimeInterval.reset | def reset(self, value=None):
"""
Resets the start time of the interval to now or the specified value.
"""
if value is None:
value = time.clock()
self.start = value
if self.value_on_reset:
self.value = self.value_on_reset | python | def reset(self, value=None):
"""
Resets the start time of the interval to now or the specified value.
"""
if value is None:
value = time.clock()
self.start = value
if self.value_on_reset:
self.value = self.value_on_reset | [
"def",
"reset",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"time",
".",
"clock",
"(",
")",
"self",
".",
"start",
"=",
"value",
"if",
"self",
".",
"value_on_reset",
":",
"self",
".",
"value",
... | Resets the start time of the interval to now or the specified value. | [
"Resets",
"the",
"start",
"time",
"of",
"the",
"interval",
"to",
"now",
"or",
"the",
"specified",
"value",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/utils.py#L46-L55 | train | 204,682 |
NiklasRosenstein/myo-python | myo/math.py | Vector.normalized | def normalized(self):
"""
Returns a normalized copy of this vector.
"""
norm = self.magnitude()
return Vector(self.x / norm, self.y / norm, self.z / norm) | python | def normalized(self):
"""
Returns a normalized copy of this vector.
"""
norm = self.magnitude()
return Vector(self.x / norm, self.y / norm, self.z / norm) | [
"def",
"normalized",
"(",
"self",
")",
":",
"norm",
"=",
"self",
".",
"magnitude",
"(",
")",
"return",
"Vector",
"(",
"self",
".",
"x",
"/",
"norm",
",",
"self",
".",
"y",
"/",
"norm",
",",
"self",
".",
"z",
"/",
"norm",
")"
] | Returns a normalized copy of this vector. | [
"Returns",
"a",
"normalized",
"copy",
"of",
"this",
"vector",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L102-L108 | train | 204,683 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.magnitude | def magnitude(self):
"""
Returns the magnitude of the quaternion.
"""
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2 + self.w ** 2) | python | def magnitude(self):
"""
Returns the magnitude of the quaternion.
"""
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2 + self.w ** 2) | [
"def",
"magnitude",
"(",
"self",
")",
":",
"return",
"math",
".",
"sqrt",
"(",
"self",
".",
"x",
"**",
"2",
"+",
"self",
".",
"y",
"**",
"2",
"+",
"self",
".",
"z",
"**",
"2",
"+",
"self",
".",
"w",
"**",
"2",
")"
] | Returns the magnitude of the quaternion. | [
"Returns",
"the",
"magnitude",
"of",
"the",
"quaternion",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L194-L199 | train | 204,684 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.normalized | def normalized(self):
"""
Returns the unit quaternion corresponding to the same rotation
as this one.
"""
magnitude = self.magnitude()
return Quaternion(
self.x / magnitude, self.y / magnitude,
self.z / magnitude, self.w / magnitude) | python | def normalized(self):
"""
Returns the unit quaternion corresponding to the same rotation
as this one.
"""
magnitude = self.magnitude()
return Quaternion(
self.x / magnitude, self.y / magnitude,
self.z / magnitude, self.w / magnitude) | [
"def",
"normalized",
"(",
"self",
")",
":",
"magnitude",
"=",
"self",
".",
"magnitude",
"(",
")",
"return",
"Quaternion",
"(",
"self",
".",
"x",
"/",
"magnitude",
",",
"self",
".",
"y",
"/",
"magnitude",
",",
"self",
".",
"z",
"/",
"magnitude",
",",
... | Returns the unit quaternion corresponding to the same rotation
as this one. | [
"Returns",
"the",
"unit",
"quaternion",
"corresponding",
"to",
"the",
"same",
"rotation",
"as",
"this",
"one",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L201-L210 | train | 204,685 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.roll | def roll(self):
""" Calculates the Roll of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.atan2(2*y*w - 2*x*z, 1 - 2*y*y - 2*z*z) | python | def roll(self):
""" Calculates the Roll of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.atan2(2*y*w - 2*x*z, 1 - 2*y*y - 2*z*z) | [
"def",
"roll",
"(",
"self",
")",
":",
"x",
",",
"y",
",",
"z",
",",
"w",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"z",
",",
"self",
".",
"w",
"return",
"math",
".",
"atan2",
"(",
"2",
"*",
"y",
"*",
"w",
"-",
"2",... | Calculates the Roll of the Quaternion. | [
"Calculates",
"the",
"Roll",
"of",
"the",
"Quaternion",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L229-L233 | train | 204,686 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.pitch | def pitch(self):
""" Calculates the Pitch of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.atan2(2*x*w - 2*y*z, 1 - 2*x*x - 2*z*z) | python | def pitch(self):
""" Calculates the Pitch of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.atan2(2*x*w - 2*y*z, 1 - 2*x*x - 2*z*z) | [
"def",
"pitch",
"(",
"self",
")",
":",
"x",
",",
"y",
",",
"z",
",",
"w",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"z",
",",
"self",
".",
"w",
"return",
"math",
".",
"atan2",
"(",
"2",
"*",
"x",
"*",
"w",
"-",
"2"... | Calculates the Pitch of the Quaternion. | [
"Calculates",
"the",
"Pitch",
"of",
"the",
"Quaternion",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L236-L240 | train | 204,687 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.yaw | def yaw(self):
""" Calculates the Yaw of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.asin(2*x*y + 2*z*w) | python | def yaw(self):
""" Calculates the Yaw of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
return math.asin(2*x*y + 2*z*w) | [
"def",
"yaw",
"(",
"self",
")",
":",
"x",
",",
"y",
",",
"z",
",",
"w",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"z",
",",
"self",
".",
"w",
"return",
"math",
".",
"asin",
"(",
"2",
"*",
"x",
"*",
"y",
"+",
"2",
... | Calculates the Yaw of the Quaternion. | [
"Calculates",
"the",
"Yaw",
"of",
"the",
"Quaternion",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L243-L247 | train | 204,688 |
NiklasRosenstein/myo-python | myo/math.py | Quaternion.rpy | def rpy(self):
""" Calculates the Roll, Pitch and Yaw of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
roll = math.atan2(2*y*w - 2*x*z, 1 - 2*y*y - 2*z*z)
pitch = math.atan2(2*x*w - 2*y*z, 1 - 2*x*x - 2*z*z)
yaw = math.asin(2*x*y + 2*z*w)
return (roll, pitch, yaw) | python | def rpy(self):
""" Calculates the Roll, Pitch and Yaw of the Quaternion. """
x, y, z, w = self.x, self.y, self.z, self.w
roll = math.atan2(2*y*w - 2*x*z, 1 - 2*y*y - 2*z*z)
pitch = math.atan2(2*x*w - 2*y*z, 1 - 2*x*x - 2*z*z)
yaw = math.asin(2*x*y + 2*z*w)
return (roll, pitch, yaw) | [
"def",
"rpy",
"(",
"self",
")",
":",
"x",
",",
"y",
",",
"z",
",",
"w",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"z",
",",
"self",
".",
"w",
"roll",
"=",
"math",
".",
"atan2",
"(",
"2",
"*",
"y",
"*",
"w",
"-",
... | Calculates the Roll, Pitch and Yaw of the Quaternion. | [
"Calculates",
"the",
"Roll",
"Pitch",
"and",
"Yaw",
"of",
"the",
"Quaternion",
"."
] | 89a7480f8058061da7a3dd98ccec57a6b134ddf3 | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L250-L257 | train | 204,689 |
vpelletier/python-libusb1 | usb1/libusb1.py | get_iso_packet_buffer_list | def get_iso_packet_buffer_list(transfer_p):
"""
Python-specific helper extracting a list of iso packet buffers.
"""
transfer = transfer_p.contents
offset = 0
result = []
append = result.append
for iso_transfer in _get_iso_packet_list(transfer):
length = iso_transfer.length
append(_get_iso_packet_buffer(transfer, offset, length))
offset += length
return result | python | def get_iso_packet_buffer_list(transfer_p):
"""
Python-specific helper extracting a list of iso packet buffers.
"""
transfer = transfer_p.contents
offset = 0
result = []
append = result.append
for iso_transfer in _get_iso_packet_list(transfer):
length = iso_transfer.length
append(_get_iso_packet_buffer(transfer, offset, length))
offset += length
return result | [
"def",
"get_iso_packet_buffer_list",
"(",
"transfer_p",
")",
":",
"transfer",
"=",
"transfer_p",
".",
"contents",
"offset",
"=",
"0",
"result",
"=",
"[",
"]",
"append",
"=",
"result",
".",
"append",
"for",
"iso_transfer",
"in",
"_get_iso_packet_list",
"(",
"tr... | Python-specific helper extracting a list of iso packet buffers. | [
"Python",
"-",
"specific",
"helper",
"extracting",
"a",
"list",
"of",
"iso",
"packet",
"buffers",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/libusb1.py#L1083-L1095 | train | 204,690 |
vpelletier/python-libusb1 | usb1/libusb1.py | get_extra | def get_extra(descriptor):
"""
Python-specific helper to access "extra" field of descriptors,
because it's not as straight-forward as in C.
Returns a list, where each entry is an individual extra descriptor.
"""
result = []
extra_length = descriptor.extra_length
if extra_length:
extra = buffer_at(descriptor.extra.value, extra_length)
append = result.append
while extra:
length = _string_item_to_int(extra[0])
if not 0 < length <= len(extra):
raise ValueError(
'Extra descriptor %i is incomplete/invalid' % (
len(result),
),
)
append(extra[:length])
extra = extra[length:]
return result | python | def get_extra(descriptor):
"""
Python-specific helper to access "extra" field of descriptors,
because it's not as straight-forward as in C.
Returns a list, where each entry is an individual extra descriptor.
"""
result = []
extra_length = descriptor.extra_length
if extra_length:
extra = buffer_at(descriptor.extra.value, extra_length)
append = result.append
while extra:
length = _string_item_to_int(extra[0])
if not 0 < length <= len(extra):
raise ValueError(
'Extra descriptor %i is incomplete/invalid' % (
len(result),
),
)
append(extra[:length])
extra = extra[length:]
return result | [
"def",
"get_extra",
"(",
"descriptor",
")",
":",
"result",
"=",
"[",
"]",
"extra_length",
"=",
"descriptor",
".",
"extra_length",
"if",
"extra_length",
":",
"extra",
"=",
"buffer_at",
"(",
"descriptor",
".",
"extra",
".",
"value",
",",
"extra_length",
")",
... | Python-specific helper to access "extra" field of descriptors,
because it's not as straight-forward as in C.
Returns a list, where each entry is an individual extra descriptor. | [
"Python",
"-",
"specific",
"helper",
"to",
"access",
"extra",
"field",
"of",
"descriptors",
"because",
"it",
"s",
"not",
"as",
"straight",
"-",
"forward",
"as",
"in",
"C",
".",
"Returns",
"a",
"list",
"where",
"each",
"entry",
"is",
"an",
"individual",
"... | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/libusb1.py#L1097-L1118 | train | 204,691 |
vpelletier/python-libusb1 | usb1/__init__.py | create_binary_buffer | def create_binary_buffer(init_or_size):
"""
ctypes.create_string_buffer variant which does not add a trailing null
when init_or_size is not a size.
"""
# As per ctypes.create_string_buffer, as of python 2.7.10 at least:
# - int or long is a length
# - str or unicode is an initialiser
# Testing the latter confuses 2to3, so test the former.
if isinstance(init_or_size, (int, long)):
init_or_size = bytearray(init_or_size)
return create_initialised_buffer(init_or_size) | python | def create_binary_buffer(init_or_size):
"""
ctypes.create_string_buffer variant which does not add a trailing null
when init_or_size is not a size.
"""
# As per ctypes.create_string_buffer, as of python 2.7.10 at least:
# - int or long is a length
# - str or unicode is an initialiser
# Testing the latter confuses 2to3, so test the former.
if isinstance(init_or_size, (int, long)):
init_or_size = bytearray(init_or_size)
return create_initialised_buffer(init_or_size) | [
"def",
"create_binary_buffer",
"(",
"init_or_size",
")",
":",
"# As per ctypes.create_string_buffer, as of python 2.7.10 at least:",
"# - int or long is a length",
"# - str or unicode is an initialiser",
"# Testing the latter confuses 2to3, so test the former.",
"if",
"isinstance",
"(",
"i... | ctypes.create_string_buffer variant which does not add a trailing null
when init_or_size is not a size. | [
"ctypes",
".",
"create_string_buffer",
"variant",
"which",
"does",
"not",
"add",
"a",
"trailing",
"null",
"when",
"init_or_size",
"is",
"not",
"a",
"size",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L264-L275 | train | 204,692 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.close | def close(self):
"""
Break reference cycles to allow instance to be garbage-collected.
Raises if called on a submitted transfer.
"""
if self.__submitted:
raise ValueError('Cannot close a submitted transfer')
self.doom()
self.__initialized = False
# Break possible external reference cycles
self.__callback = None
self.__user_data = None
# Break libusb_transfer reference cycles
self.__ctypesCallbackWrapper = None
# For some reason, overwriting callback is not enough to remove this
# reference cycle - though sometimes it works:
# self -> self.__dict__ -> libusb_transfer -> dict[x] -> dict[x] ->
# CThunkObject -> __callbackWrapper -> self
# So free transfer altogether.
if self.__transfer is not None:
self.__libusb_free_transfer(self.__transfer)
self.__transfer = None
self.__transfer_buffer = None
# Break USBDeviceHandle reference cycle
self.__before_submit = None
self.__after_completion = None | python | def close(self):
"""
Break reference cycles to allow instance to be garbage-collected.
Raises if called on a submitted transfer.
"""
if self.__submitted:
raise ValueError('Cannot close a submitted transfer')
self.doom()
self.__initialized = False
# Break possible external reference cycles
self.__callback = None
self.__user_data = None
# Break libusb_transfer reference cycles
self.__ctypesCallbackWrapper = None
# For some reason, overwriting callback is not enough to remove this
# reference cycle - though sometimes it works:
# self -> self.__dict__ -> libusb_transfer -> dict[x] -> dict[x] ->
# CThunkObject -> __callbackWrapper -> self
# So free transfer altogether.
if self.__transfer is not None:
self.__libusb_free_transfer(self.__transfer)
self.__transfer = None
self.__transfer_buffer = None
# Break USBDeviceHandle reference cycle
self.__before_submit = None
self.__after_completion = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"__submitted",
":",
"raise",
"ValueError",
"(",
"'Cannot close a submitted transfer'",
")",
"self",
".",
"doom",
"(",
")",
"self",
".",
"__initialized",
"=",
"False",
"# Break possible external reference cy... | Break reference cycles to allow instance to be garbage-collected.
Raises if called on a submitted transfer. | [
"Break",
"reference",
"cycles",
"to",
"allow",
"instance",
"to",
"be",
"garbage",
"-",
"collected",
".",
"Raises",
"if",
"called",
"on",
"a",
"submitted",
"transfer",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L348-L373 | train | 204,693 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.setControl | def setControl(
self, request_type, request, value, index, buffer_or_len,
callback=None, user_data=None, timeout=0):
"""
Setup transfer for control use.
request_type, request, value, index
See USBDeviceHandle.controlWrite.
request_type defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
if isinstance(buffer_or_len, (int, long)):
length = buffer_or_len
# pylint: disable=undefined-variable
string_buffer, transfer_py_buffer = create_binary_buffer(
length + CONTROL_SETUP_SIZE,
)
# pylint: enable=undefined-variable
else:
length = len(buffer_or_len)
string_buffer, transfer_py_buffer = create_binary_buffer(
CONTROL_SETUP + buffer_or_len,
)
self.__initialized = False
self.__transfer_buffer = string_buffer
# pylint: disable=undefined-variable
self.__transfer_py_buffer = integer_memoryview(
transfer_py_buffer,
)[CONTROL_SETUP_SIZE:]
# pylint: enable=undefined-variable
self.__user_data = user_data
libusb1.libusb_fill_control_setup(
string_buffer, request_type, request, value, index, length)
libusb1.libusb_fill_control_transfer(
self.__transfer, self.__handle, string_buffer,
self.__ctypesCallbackWrapper, None, timeout)
self.__callback = callback
self.__initialized = True | python | def setControl(
self, request_type, request, value, index, buffer_or_len,
callback=None, user_data=None, timeout=0):
"""
Setup transfer for control use.
request_type, request, value, index
See USBDeviceHandle.controlWrite.
request_type defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
if isinstance(buffer_or_len, (int, long)):
length = buffer_or_len
# pylint: disable=undefined-variable
string_buffer, transfer_py_buffer = create_binary_buffer(
length + CONTROL_SETUP_SIZE,
)
# pylint: enable=undefined-variable
else:
length = len(buffer_or_len)
string_buffer, transfer_py_buffer = create_binary_buffer(
CONTROL_SETUP + buffer_or_len,
)
self.__initialized = False
self.__transfer_buffer = string_buffer
# pylint: disable=undefined-variable
self.__transfer_py_buffer = integer_memoryview(
transfer_py_buffer,
)[CONTROL_SETUP_SIZE:]
# pylint: enable=undefined-variable
self.__user_data = user_data
libusb1.libusb_fill_control_setup(
string_buffer, request_type, request, value, index, length)
libusb1.libusb_fill_control_transfer(
self.__transfer, self.__handle, string_buffer,
self.__ctypesCallbackWrapper, None, timeout)
self.__callback = callback
self.__initialized = True | [
"def",
"setControl",
"(",
"self",
",",
"request_type",
",",
"request",
",",
"value",
",",
"index",
",",
"buffer_or_len",
",",
"callback",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"timeout",
"=",
"0",
")",
":",
"if",
"self",
".",
"__submitted",
... | Setup transfer for control use.
request_type, request, value, index
See USBDeviceHandle.controlWrite.
request_type defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable. | [
"Setup",
"transfer",
"for",
"control",
"use",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L425-L476 | train | 204,694 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.setInterrupt | def setInterrupt(
self, endpoint, buffer_or_len, callback=None, user_data=None,
timeout=0):
"""
Setup transfer for interrupt use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
string_buffer, self.__transfer_py_buffer = create_binary_buffer(
buffer_or_len
)
self.__initialized = False
self.__transfer_buffer = string_buffer
self.__user_data = user_data
libusb1.libusb_fill_interrupt_transfer(
self.__transfer, self.__handle, endpoint, string_buffer,
sizeof(string_buffer), self.__ctypesCallbackWrapper, None, timeout)
self.__callback = callback
self.__initialized = True | python | def setInterrupt(
self, endpoint, buffer_or_len, callback=None, user_data=None,
timeout=0):
"""
Setup transfer for interrupt use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
string_buffer, self.__transfer_py_buffer = create_binary_buffer(
buffer_or_len
)
self.__initialized = False
self.__transfer_buffer = string_buffer
self.__user_data = user_data
libusb1.libusb_fill_interrupt_transfer(
self.__transfer, self.__handle, endpoint, string_buffer,
sizeof(string_buffer), self.__ctypesCallbackWrapper, None, timeout)
self.__callback = callback
self.__initialized = True | [
"def",
"setInterrupt",
"(",
"self",
",",
"endpoint",
",",
"buffer_or_len",
",",
"callback",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"timeout",
"=",
"0",
")",
":",
"if",
"self",
".",
"__submitted",
":",
"raise",
"ValueError",
"(",
"'Cannot alter a ... | Setup transfer for interrupt use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable. | [
"Setup",
"transfer",
"for",
"interrupt",
"use",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L516-L552 | train | 204,695 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.setIsochronous | def setIsochronous(
self, endpoint, buffer_or_len, callback=None,
user_data=None, timeout=0, iso_transfer_length_list=None):
"""
Setup transfer for isochronous use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
iso_transfer_length_list
List of individual transfer sizes. If not provided, buffer_or_len
will be divided evenly among available transfers if possible, and
raise ValueError otherwise.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
num_iso_packets = self.__num_iso_packets
if num_iso_packets == 0:
raise TypeError(
'This transfer canot be used for isochronous I/O. '
'You must get another one with a non-zero iso_packets '
'parameter.'
)
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
string_buffer, transfer_py_buffer = create_binary_buffer(buffer_or_len)
buffer_length = sizeof(string_buffer)
if iso_transfer_length_list is None:
iso_length, remainder = divmod(buffer_length, num_iso_packets)
if remainder:
raise ValueError(
'Buffer size %i cannot be evenly distributed among %i '
'transfers' % (
buffer_length,
num_iso_packets,
)
)
iso_transfer_length_list = [iso_length] * num_iso_packets
configured_iso_packets = len(iso_transfer_length_list)
if configured_iso_packets > num_iso_packets:
raise ValueError(
'Too many ISO transfer lengths (%i), there are '
'only %i ISO transfers available' % (
configured_iso_packets,
num_iso_packets,
)
)
if sum(iso_transfer_length_list) > buffer_length:
raise ValueError(
'ISO transfers too long (%i), there are only '
'%i bytes available' % (
sum(iso_transfer_length_list),
buffer_length,
)
)
transfer_p = self.__transfer
self.__initialized = False
self.__transfer_buffer = string_buffer
self.__transfer_py_buffer = transfer_py_buffer
self.__user_data = user_data
libusb1.libusb_fill_iso_transfer(
transfer_p, self.__handle, endpoint, string_buffer, buffer_length,
configured_iso_packets, self.__ctypesCallbackWrapper, None,
timeout)
for length, iso_packet_desc in zip(
iso_transfer_length_list,
libusb1.get_iso_packet_list(transfer_p)):
if length <= 0:
raise ValueError(
'Negative/null length transfers are not possible.'
)
iso_packet_desc.length = length
self.__callback = callback
self.__initialized = True | python | def setIsochronous(
self, endpoint, buffer_or_len, callback=None,
user_data=None, timeout=0, iso_transfer_length_list=None):
"""
Setup transfer for isochronous use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
iso_transfer_length_list
List of individual transfer sizes. If not provided, buffer_or_len
will be divided evenly among available transfers if possible, and
raise ValueError otherwise.
"""
if self.__submitted:
raise ValueError('Cannot alter a submitted transfer')
num_iso_packets = self.__num_iso_packets
if num_iso_packets == 0:
raise TypeError(
'This transfer canot be used for isochronous I/O. '
'You must get another one with a non-zero iso_packets '
'parameter.'
)
if self.__doomed:
raise DoomedTransferError('Cannot reuse a doomed transfer')
string_buffer, transfer_py_buffer = create_binary_buffer(buffer_or_len)
buffer_length = sizeof(string_buffer)
if iso_transfer_length_list is None:
iso_length, remainder = divmod(buffer_length, num_iso_packets)
if remainder:
raise ValueError(
'Buffer size %i cannot be evenly distributed among %i '
'transfers' % (
buffer_length,
num_iso_packets,
)
)
iso_transfer_length_list = [iso_length] * num_iso_packets
configured_iso_packets = len(iso_transfer_length_list)
if configured_iso_packets > num_iso_packets:
raise ValueError(
'Too many ISO transfer lengths (%i), there are '
'only %i ISO transfers available' % (
configured_iso_packets,
num_iso_packets,
)
)
if sum(iso_transfer_length_list) > buffer_length:
raise ValueError(
'ISO transfers too long (%i), there are only '
'%i bytes available' % (
sum(iso_transfer_length_list),
buffer_length,
)
)
transfer_p = self.__transfer
self.__initialized = False
self.__transfer_buffer = string_buffer
self.__transfer_py_buffer = transfer_py_buffer
self.__user_data = user_data
libusb1.libusb_fill_iso_transfer(
transfer_p, self.__handle, endpoint, string_buffer, buffer_length,
configured_iso_packets, self.__ctypesCallbackWrapper, None,
timeout)
for length, iso_packet_desc in zip(
iso_transfer_length_list,
libusb1.get_iso_packet_list(transfer_p)):
if length <= 0:
raise ValueError(
'Negative/null length transfers are not possible.'
)
iso_packet_desc.length = length
self.__callback = callback
self.__initialized = True | [
"def",
"setIsochronous",
"(",
"self",
",",
"endpoint",
",",
"buffer_or_len",
",",
"callback",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"timeout",
"=",
"0",
",",
"iso_transfer_length_list",
"=",
"None",
")",
":",
"if",
"self",
".",
"__submitted",
":... | Setup transfer for isochronous use.
endpoint
Endpoint to submit transfer to. Defines transfer direction (see
ENDPOINT_OUT and ENDPOINT_IN)).
buffer_or_len
Either a string (when sending data), or expected data length (when
receiving data)
To avoid memory copies, use an object implementing the writeable
buffer interface (ex: bytearray).
callback
Callback function to be invoked on transfer completion.
Called with transfer as parameter, return value ignored.
user_data
User data to pass to callback function.
timeout
Transfer timeout in milliseconds. 0 to disable.
iso_transfer_length_list
List of individual transfer sizes. If not provided, buffer_or_len
will be divided evenly among available transfers if possible, and
raise ValueError otherwise. | [
"Setup",
"transfer",
"for",
"isochronous",
"use",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L554-L639 | train | 204,696 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.getISOBufferList | def getISOBufferList(self):
"""
Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO.
"""
transfer_p = self.__transfer
transfer = transfer_p.contents
# pylint: disable=undefined-variable
if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:
# pylint: enable=undefined-variable
raise TypeError(
'This method cannot be called on non-iso transfers.'
)
return libusb1.get_iso_packet_buffer_list(transfer_p) | python | def getISOBufferList(self):
"""
Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO.
"""
transfer_p = self.__transfer
transfer = transfer_p.contents
# pylint: disable=undefined-variable
if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:
# pylint: enable=undefined-variable
raise TypeError(
'This method cannot be called on non-iso transfers.'
)
return libusb1.get_iso_packet_buffer_list(transfer_p) | [
"def",
"getISOBufferList",
"(",
"self",
")",
":",
"transfer_p",
"=",
"self",
".",
"__transfer",
"transfer",
"=",
"transfer_p",
".",
"contents",
"# pylint: disable=undefined-variable",
"if",
"transfer",
".",
"type",
"!=",
"TRANSFER_TYPE_ISOCHRONOUS",
":",
"# pylint: en... | Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO. | [
"Get",
"individual",
"ISO",
"transfer",
"s",
"buffer",
".",
"Returns",
"a",
"list",
"with",
"one",
"item",
"per",
"ISO",
"transfer",
"with",
"their",
"individually",
"-",
"configured",
"sizes",
".",
"Returned",
"list",
"is",
"consistent",
"with",
"getISOSetupL... | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L692-L710 | train | 204,697 |
vpelletier/python-libusb1 | usb1/__init__.py | USBTransfer.submit | def submit(self):
"""
Submit transfer for asynchronous handling.
"""
if self.__submitted:
raise ValueError('Cannot submit a submitted transfer')
if not self.__initialized:
raise ValueError(
'Cannot submit a transfer until it has been initialized'
)
if self.__doomed:
raise DoomedTransferError('Cannot submit doomed transfer')
self.__before_submit(self)
self.__submitted = True
result = libusb1.libusb_submit_transfer(self.__transfer)
if result:
self.__after_completion(self)
self.__submitted = False
raiseUSBError(result) | python | def submit(self):
"""
Submit transfer for asynchronous handling.
"""
if self.__submitted:
raise ValueError('Cannot submit a submitted transfer')
if not self.__initialized:
raise ValueError(
'Cannot submit a transfer until it has been initialized'
)
if self.__doomed:
raise DoomedTransferError('Cannot submit doomed transfer')
self.__before_submit(self)
self.__submitted = True
result = libusb1.libusb_submit_transfer(self.__transfer)
if result:
self.__after_completion(self)
self.__submitted = False
raiseUSBError(result) | [
"def",
"submit",
"(",
"self",
")",
":",
"if",
"self",
".",
"__submitted",
":",
"raise",
"ValueError",
"(",
"'Cannot submit a submitted transfer'",
")",
"if",
"not",
"self",
".",
"__initialized",
":",
"raise",
"ValueError",
"(",
"'Cannot submit a transfer until it ha... | Submit transfer for asynchronous handling. | [
"Submit",
"transfer",
"for",
"asynchronous",
"handling",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L803-L821 | train | 204,698 |
vpelletier/python-libusb1 | usb1/__init__.py | USBPoller.register | def register(self, fd, events):
"""
Register an USB-unrelated fd to poller.
Convenience method.
"""
if fd in self.__fd_set:
raise ValueError(
'This fd is a special USB event fd, it cannot be polled.'
)
self.__poller.register(fd, events) | python | def register(self, fd, events):
"""
Register an USB-unrelated fd to poller.
Convenience method.
"""
if fd in self.__fd_set:
raise ValueError(
'This fd is a special USB event fd, it cannot be polled.'
)
self.__poller.register(fd, events) | [
"def",
"register",
"(",
"self",
",",
"fd",
",",
"events",
")",
":",
"if",
"fd",
"in",
"self",
".",
"__fd_set",
":",
"raise",
"ValueError",
"(",
"'This fd is a special USB event fd, it cannot be polled.'",
")",
"self",
".",
"__poller",
".",
"register",
"(",
"fd... | Register an USB-unrelated fd to poller.
Convenience method. | [
"Register",
"an",
"USB",
"-",
"unrelated",
"fd",
"to",
"poller",
".",
"Convenience",
"method",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1107-L1116 | train | 204,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.