body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
85fcdd9b509adfa0609124d5513f878d335f376f9ba7c61fb167bffd34b35db9 | def append_column(self, label, values):
"Appends a column to the table or replaces a column.\n\n ``__setitem__`` is aliased to this method:\n ``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to\n ``table['new_col'] = make_array(1, 2, 3)``.\n\n Args:\n ``label`` (str): The label of the new column.\n\n ``values`` (single value or list/array): If a single value, every\n value in the new column is ``values``.\n\n If a list or array, the new column contains the values in\n ``values``, which must be the same length as the table.\n\n Returns:\n Original table with new or replaced column\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a string.\n - ``values`` is a list/array and does not have the same length\n as the number of rows in the table.\n\n >>> table = Table().with_columns(\n ... 'letter', make_array('a', 'b', 'c', 'z'),\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> table.append_column('new_col1', make_array(10, 20, 30, 40))\n >>> table\n letter | count | points | new_col1\n a | 9 | 1 | 10\n b | 3 | 2 | 20\n c | 3 | 2 | 30\n z | 1 | 10 | 40\n >>> table.append_column('new_col2', 'hello')\n >>> table\n letter | count | points | new_col1 | new_col2\n a | 9 | 1 | 10 | hello\n b | 3 | 2 | 20 | hello\n c | 3 | 2 | 30 | hello\n z | 1 | 10 | 40 | hello\n >>> table.append_column(123, make_array(1, 2, 3, 4))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> table.append_column('bad_col', [1, 2])\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same\n number of rows as table.\n "
if (not isinstance(label, str)):
raise ValueError('The column label must be a string, but a {} was given'.format(label.__class__.__name__))
if (not isinstance(values, np.ndarray)):
if (not _is_non_string_iterable(values)):
values = ([values] * max(self.num_rows, 1))
values = np.array(tuple(values))
if ((self.num_rows != 0) and (len(values) != self.num_rows)):
raise ValueError('Column length mismatch. New column does not have the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values | Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
>>> table
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
>>> table
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same
number of rows as table. | digital-assyriology-review/datascience/tables.py | append_column | ds-modules/NESTUD-190A | 6 | python | def append_column(self, label, values):
"Appends a column to the table or replaces a column.\n\n ``__setitem__`` is aliased to this method:\n ``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to\n ``table['new_col'] = make_array(1, 2, 3)``.\n\n Args:\n ``label`` (str): The label of the new column.\n\n ``values`` (single value or list/array): If a single value, every\n value in the new column is ``values``.\n\n If a list or array, the new column contains the values in\n ``values``, which must be the same length as the table.\n\n Returns:\n Original table with new or replaced column\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a string.\n - ``values`` is a list/array and does not have the same length\n as the number of rows in the table.\n\n >>> table = Table().with_columns(\n ... 'letter', make_array('a', 'b', 'c', 'z'),\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> table.append_column('new_col1', make_array(10, 20, 30, 40))\n >>> table\n letter | count | points | new_col1\n a | 9 | 1 | 10\n b | 3 | 2 | 20\n c | 3 | 2 | 30\n z | 1 | 10 | 40\n >>> table.append_column('new_col2', 'hello')\n >>> table\n letter | count | points | new_col1 | new_col2\n a | 9 | 1 | 10 | hello\n b | 3 | 2 | 20 | hello\n c | 3 | 2 | 30 | hello\n z | 1 | 10 | 40 | hello\n >>> table.append_column(123, make_array(1, 2, 3, 4))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> table.append_column('bad_col', [1, 2])\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same\n number of rows as table.\n "
if (not isinstance(label, str)):
raise ValueError('The column label must be a string, but a {} was given'.format(label.__class__.__name__))
if (not isinstance(values, np.ndarray)):
if (not _is_non_string_iterable(values)):
values = ([values] * max(self.num_rows, 1))
values = np.array(tuple(values))
if ((self.num_rows != 0) and (len(values) != self.num_rows)):
raise ValueError('Column length mismatch. New column does not have the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values | def append_column(self, label, values):
"Appends a column to the table or replaces a column.\n\n ``__setitem__`` is aliased to this method:\n ``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to\n ``table['new_col'] = make_array(1, 2, 3)``.\n\n Args:\n ``label`` (str): The label of the new column.\n\n ``values`` (single value or list/array): If a single value, every\n value in the new column is ``values``.\n\n If a list or array, the new column contains the values in\n ``values``, which must be the same length as the table.\n\n Returns:\n Original table with new or replaced column\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a string.\n - ``values`` is a list/array and does not have the same length\n as the number of rows in the table.\n\n >>> table = Table().with_columns(\n ... 'letter', make_array('a', 'b', 'c', 'z'),\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n letter | count | points\n a | 9 | 1\n b | 3 | 2\n c | 3 | 2\n z | 1 | 10\n >>> table.append_column('new_col1', make_array(10, 20, 30, 40))\n >>> table\n letter | count | points | new_col1\n a | 9 | 1 | 10\n b | 3 | 2 | 20\n c | 3 | 2 | 30\n z | 1 | 10 | 40\n >>> table.append_column('new_col2', 'hello')\n >>> table\n letter | count | points | new_col1 | new_col2\n a | 9 | 1 | 10 | hello\n b | 3 | 2 | 20 | hello\n c | 3 | 2 | 30 | hello\n z | 1 | 10 | 40 | hello\n >>> table.append_column(123, make_array(1, 2, 3, 4))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> table.append_column('bad_col', [1, 2])\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same\n number of rows as table.\n "
if (not isinstance(label, str)):
raise ValueError('The column label must be a string, but a {} was given'.format(label.__class__.__name__))
if (not isinstance(values, np.ndarray)):
if (not _is_non_string_iterable(values)):
values = ([values] * max(self.num_rows, 1))
values = np.array(tuple(values))
if ((self.num_rows != 0) and (len(values) != self.num_rows)):
raise ValueError('Column length mismatch. New column does not have the same number of rows as table.')
else:
self._num_rows = len(values)
self._columns[label] = values<|docstring|>Appends a column to the table or replaces a column.
``__setitem__`` is aliased to this method:
``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to
``table['new_col'] = make_array(1, 2, 3)``.
Args:
``label`` (str): The label of the new column.
``values`` (single value or list/array): If a single value, every
value in the new column is ``values``.
If a list or array, the new column contains the values in
``values``, which must be the same length as the table.
Returns:
Original table with new or replaced column
Raises:
``ValueError``: If
- ``label`` is not a string.
- ``values`` is a list/array and does not have the same length
as the number of rows in the table.
>>> table = Table().with_columns(
... 'letter', make_array('a', 'b', 'c', 'z'),
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
>>> table.append_column('new_col1', make_array(10, 20, 30, 40))
>>> table
letter | count | points | new_col1
a | 9 | 1 | 10
b | 3 | 2 | 20
c | 3 | 2 | 30
z | 1 | 10 | 40
>>> table.append_column('new_col2', 'hello')
>>> table
letter | count | points | new_col1 | new_col2
a | 9 | 1 | 10 | hello
b | 3 | 2 | 20 | hello
c | 3 | 2 | 30 | hello
z | 1 | 10 | 40 | hello
>>> table.append_column(123, make_array(1, 2, 3, 4))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> table.append_column('bad_col', [1, 2])
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same
number of rows as table.<|endoftext|> |
956b53d4d8fc6a185558e2a6d490ce42e18f06437f111f32f02e4f784e948510 | def relabel(self, column_label, new_label):
"Changes the label(s) of column(s) specified by ``column_label`` to\n labels in ``new_label``.\n\n Args:\n ``column_label`` -- (single str or array of str) The label(s) of\n columns to be changed to ``new_label``.\n\n ``new_label`` -- (single str or array of str): The label name(s)\n of columns to replace ``column_label``.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not in table, or if\n ``column_label`` and ``new_label`` are not of equal length.\n ``TypeError`` -- if ``column_label`` and/or ``new_label`` is not\n ``str``.\n\n Returns:\n Original table with ``new_label`` in place of ``column_label``.\n\n >>> table = Table().with_columns(\n ... 'points', make_array(1, 2, 3),\n ... 'id', make_array(12345, 123, 5123))\n >>> table.relabel('id', 'yolo')\n points | yolo\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('points', 'yolo'),\n ... make_array('red', 'blue'))\n red | blue\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('red', 'green', 'blue'),\n ... make_array('cyan', 'magenta', 'yellow', 'key'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid arguments. column_label and new_label must be of equal length.\n "
if isinstance(column_label, numbers.Integral):
column_label = self._as_label(column_label)
if (isinstance(column_label, str) and isinstance(new_label, str)):
(column_label, new_label) = ([column_label], [new_label])
if (len(column_label) != len(new_label)):
raise ValueError('Invalid arguments. column_label and new_label must be of equal length.')
old_to_new = dict(zip(column_label, new_label))
for label in column_label:
if (not (label in self.labels)):
raise ValueError('Invalid labels. Column labels must already exist in table in order to be replaced.')
rewrite = (lambda s: (old_to_new[s] if (s in old_to_new) else s))
columns = [(rewrite(s), c) for (s, c) in self._columns.items()]
self._columns = collections.OrderedDict(columns)
for label in self._formats:
if (label in column_label):
formatter = self._formats.pop(label)
self._formats[old_to_new[label]] = formatter
return self | Changes the label(s) of column(s) specified by ``column_label`` to
labels in ``new_label``.
Args:
``column_label`` -- (single str or array of str) The label(s) of
columns to be changed to ``new_label``.
``new_label`` -- (single str or array of str): The label name(s)
of columns to replace ``column_label``.
Raises:
``ValueError`` -- if ``column_label`` is not in table, or if
``column_label`` and ``new_label`` are not of equal length.
``TypeError`` -- if ``column_label`` and/or ``new_label`` is not
``str``.
Returns:
Original table with ``new_label`` in place of ``column_label``.
>>> table = Table().with_columns(
... 'points', make_array(1, 2, 3),
... 'id', make_array(12345, 123, 5123))
>>> table.relabel('id', 'yolo')
points | yolo
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('points', 'yolo'),
... make_array('red', 'blue'))
red | blue
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('red', 'green', 'blue'),
... make_array('cyan', 'magenta', 'yellow', 'key'))
Traceback (most recent call last):
...
ValueError: Invalid arguments. column_label and new_label must be of equal length. | digital-assyriology-review/datascience/tables.py | relabel | ds-modules/NESTUD-190A | 6 | python | def relabel(self, column_label, new_label):
"Changes the label(s) of column(s) specified by ``column_label`` to\n labels in ``new_label``.\n\n Args:\n ``column_label`` -- (single str or array of str) The label(s) of\n columns to be changed to ``new_label``.\n\n ``new_label`` -- (single str or array of str): The label name(s)\n of columns to replace ``column_label``.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not in table, or if\n ``column_label`` and ``new_label`` are not of equal length.\n ``TypeError`` -- if ``column_label`` and/or ``new_label`` is not\n ``str``.\n\n Returns:\n Original table with ``new_label`` in place of ``column_label``.\n\n >>> table = Table().with_columns(\n ... 'points', make_array(1, 2, 3),\n ... 'id', make_array(12345, 123, 5123))\n >>> table.relabel('id', 'yolo')\n points | yolo\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('points', 'yolo'),\n ... make_array('red', 'blue'))\n red | blue\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('red', 'green', 'blue'),\n ... make_array('cyan', 'magenta', 'yellow', 'key'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid arguments. column_label and new_label must be of equal length.\n "
if isinstance(column_label, numbers.Integral):
column_label = self._as_label(column_label)
if (isinstance(column_label, str) and isinstance(new_label, str)):
(column_label, new_label) = ([column_label], [new_label])
if (len(column_label) != len(new_label)):
raise ValueError('Invalid arguments. column_label and new_label must be of equal length.')
old_to_new = dict(zip(column_label, new_label))
for label in column_label:
if (not (label in self.labels)):
raise ValueError('Invalid labels. Column labels must already exist in table in order to be replaced.')
rewrite = (lambda s: (old_to_new[s] if (s in old_to_new) else s))
columns = [(rewrite(s), c) for (s, c) in self._columns.items()]
self._columns = collections.OrderedDict(columns)
for label in self._formats:
if (label in column_label):
formatter = self._formats.pop(label)
self._formats[old_to_new[label]] = formatter
return self | def relabel(self, column_label, new_label):
"Changes the label(s) of column(s) specified by ``column_label`` to\n labels in ``new_label``.\n\n Args:\n ``column_label`` -- (single str or array of str) The label(s) of\n columns to be changed to ``new_label``.\n\n ``new_label`` -- (single str or array of str): The label name(s)\n of columns to replace ``column_label``.\n\n Raises:\n ``ValueError`` -- if ``column_label`` is not in table, or if\n ``column_label`` and ``new_label`` are not of equal length.\n ``TypeError`` -- if ``column_label`` and/or ``new_label`` is not\n ``str``.\n\n Returns:\n Original table with ``new_label`` in place of ``column_label``.\n\n >>> table = Table().with_columns(\n ... 'points', make_array(1, 2, 3),\n ... 'id', make_array(12345, 123, 5123))\n >>> table.relabel('id', 'yolo')\n points | yolo\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('points', 'yolo'),\n ... make_array('red', 'blue'))\n red | blue\n 1 | 12345\n 2 | 123\n 3 | 5123\n >>> table.relabel(make_array('red', 'green', 'blue'),\n ... make_array('cyan', 'magenta', 'yellow', 'key'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid arguments. column_label and new_label must be of equal length.\n "
if isinstance(column_label, numbers.Integral):
column_label = self._as_label(column_label)
if (isinstance(column_label, str) and isinstance(new_label, str)):
(column_label, new_label) = ([column_label], [new_label])
if (len(column_label) != len(new_label)):
raise ValueError('Invalid arguments. column_label and new_label must be of equal length.')
old_to_new = dict(zip(column_label, new_label))
for label in column_label:
if (not (label in self.labels)):
raise ValueError('Invalid labels. Column labels must already exist in table in order to be replaced.')
rewrite = (lambda s: (old_to_new[s] if (s in old_to_new) else s))
columns = [(rewrite(s), c) for (s, c) in self._columns.items()]
self._columns = collections.OrderedDict(columns)
for label in self._formats:
if (label in column_label):
formatter = self._formats.pop(label)
self._formats[old_to_new[label]] = formatter
return self<|docstring|>Changes the label(s) of column(s) specified by ``column_label`` to
labels in ``new_label``.
Args:
``column_label`` -- (single str or array of str) The label(s) of
columns to be changed to ``new_label``.
``new_label`` -- (single str or array of str): The label name(s)
of columns to replace ``column_label``.
Raises:
``ValueError`` -- if ``column_label`` is not in table, or if
``column_label`` and ``new_label`` are not of equal length.
``TypeError`` -- if ``column_label`` and/or ``new_label`` is not
``str``.
Returns:
Original table with ``new_label`` in place of ``column_label``.
>>> table = Table().with_columns(
... 'points', make_array(1, 2, 3),
... 'id', make_array(12345, 123, 5123))
>>> table.relabel('id', 'yolo')
points | yolo
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('points', 'yolo'),
... make_array('red', 'blue'))
red | blue
1 | 12345
2 | 123
3 | 5123
>>> table.relabel(make_array('red', 'green', 'blue'),
... make_array('cyan', 'magenta', 'yellow', 'key'))
Traceback (most recent call last):
...
ValueError: Invalid arguments. column_label and new_label must be of equal length.<|endoftext|> |
54cc17d8fb92542ae82dd8b49dc8181a6bd2af89f78bd0949513ca7b81fa5fd5 | def remove(self, row_or_row_indices):
'Removes a row or multiple rows of a table in place.'
if (not row_or_row_indices):
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for (i, elem) in enumerate(self[col]) if (i not in rows_remove)]
return self | Removes a row or multiple rows of a table in place. | digital-assyriology-review/datascience/tables.py | remove | ds-modules/NESTUD-190A | 6 | python | def remove(self, row_or_row_indices):
if (not row_or_row_indices):
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for (i, elem) in enumerate(self[col]) if (i not in rows_remove)]
return self | def remove(self, row_or_row_indices):
if (not row_or_row_indices):
return
if isinstance(row_or_row_indices, int):
rows_remove = [row_or_row_indices]
else:
rows_remove = row_or_row_indices
for col in self._columns:
self._columns[col] = [elem for (i, elem) in enumerate(self[col]) if (i not in rows_remove)]
return self<|docstring|>Removes a row or multiple rows of a table in place.<|endoftext|> |
c72c6a9148e6b6946f4d1fdc75f2ce0a5f13c433f62abfa39abe437e86264349 | def copy(self, *, shallow=False):
'Return a copy of a table.'
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table | Return a copy of a table. | digital-assyriology-review/datascience/tables.py | copy | ds-modules/NESTUD-190A | 6 | python | def copy(self, *, shallow=False):
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table | def copy(self, *, shallow=False):
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table<|docstring|>Return a copy of a table.<|endoftext|> |
8afd94468f493e570bd3334114ce1c419f9df79b1e6029f1fa8a50789eacea08 | def select(self, *column_or_columns):
"Return a table with only the columns in ``column_or_columns``.\n\n Args:\n ``column_or_columns``: Columns to select from the ``Table`` as\n either column labels (``str``) or column indices (``int``).\n\n Returns:\n A new instance of ``Table`` containing only selected columns.\n The columns of the new ``Table`` are in the order given in\n ``column_or_columns``.\n\n Raises:\n ``KeyError`` if any of ``column_or_columns`` are not in the table.\n\n >>> flowers = Table().with_columns(\n ... 'Number of petals', make_array(8, 34, 5),\n ... 'Name', make_array('lotus', 'sunflower', 'rose'),\n ... 'Weight', make_array(10, 5, 6)\n ... )\n\n >>> flowers\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select('Number of petals', 'Weight')\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n\n >>> flowers # original table unchanged\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select(0, 2)\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n "
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table | Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6 | digital-assyriology-review/datascience/tables.py | select | ds-modules/NESTUD-190A | 6 | python | def select(self, *column_or_columns):
"Return a table with only the columns in ``column_or_columns``.\n\n Args:\n ``column_or_columns``: Columns to select from the ``Table`` as\n either column labels (``str``) or column indices (``int``).\n\n Returns:\n A new instance of ``Table`` containing only selected columns.\n The columns of the new ``Table`` are in the order given in\n ``column_or_columns``.\n\n Raises:\n ``KeyError`` if any of ``column_or_columns`` are not in the table.\n\n >>> flowers = Table().with_columns(\n ... 'Number of petals', make_array(8, 34, 5),\n ... 'Name', make_array('lotus', 'sunflower', 'rose'),\n ... 'Weight', make_array(10, 5, 6)\n ... )\n\n >>> flowers\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select('Number of petals', 'Weight')\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n\n >>> flowers # original table unchanged\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select(0, 2)\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n "
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table | def select(self, *column_or_columns):
"Return a table with only the columns in ``column_or_columns``.\n\n Args:\n ``column_or_columns``: Columns to select from the ``Table`` as\n either column labels (``str``) or column indices (``int``).\n\n Returns:\n A new instance of ``Table`` containing only selected columns.\n The columns of the new ``Table`` are in the order given in\n ``column_or_columns``.\n\n Raises:\n ``KeyError`` if any of ``column_or_columns`` are not in the table.\n\n >>> flowers = Table().with_columns(\n ... 'Number of petals', make_array(8, 34, 5),\n ... 'Name', make_array('lotus', 'sunflower', 'rose'),\n ... 'Weight', make_array(10, 5, 6)\n ... )\n\n >>> flowers\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select('Number of petals', 'Weight')\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n\n >>> flowers # original table unchanged\n Number of petals | Name | Weight\n 8 | lotus | 10\n 34 | sunflower | 5\n 5 | rose | 6\n\n >>> flowers.select(0, 2)\n Number of petals | Weight\n 8 | 10\n 34 | 5\n 5 | 6\n "
labels = self._varargs_as_labels(column_or_columns)
table = type(self)()
for label in labels:
self._add_column_and_format(table, label, np.copy(self[label]))
return table<|docstring|>Return a table with only the columns in ``column_or_columns``.
Args:
``column_or_columns``: Columns to select from the ``Table`` as
either column labels (``str``) or column indices (``int``).
Returns:
A new instance of ``Table`` containing only selected columns.
The columns of the new ``Table`` are in the order given in
``column_or_columns``.
Raises:
``KeyError`` if any of ``column_or_columns`` are not in the table.
>>> flowers = Table().with_columns(
... 'Number of petals', make_array(8, 34, 5),
... 'Name', make_array('lotus', 'sunflower', 'rose'),
... 'Weight', make_array(10, 5, 6)
... )
>>> flowers
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select('Number of petals', 'Weight')
Number of petals | Weight
8 | 10
34 | 5
5 | 6
>>> flowers # original table unchanged
Number of petals | Name | Weight
8 | lotus | 10
34 | sunflower | 5
5 | rose | 6
>>> flowers.select(0, 2)
Number of petals | Weight
8 | 10
34 | 5
5 | 6<|endoftext|> |
bc9a88cb01fb46f2749ace46a659a5b46fd22a413e03d65959837fd1df2ed2ff | def drop(self, *column_or_columns):
"Return a Table with only columns other than selected label or\n labels.\n\n Args:\n ``column_or_columns`` (string or list of strings): The header\n names or indices of the columns to be dropped.\n\n ``column_or_columns`` must be an existing header name, or a\n valid column index.\n\n Returns:\n An instance of ``Table`` with given columns removed.\n\n >>> t = Table().with_columns(\n ... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),\n ... 'prices', make_array(6, 5, 5),\n ... 'calories', make_array(743, 651, 582))\n >>> t\n burgers | prices | calories\n cheeseburger | 6 | 743\n hamburger | 5 | 651\n veggie burger | 5 | 582\n >>> t.drop('prices')\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n >>> t.drop(['burgers', 'calories'])\n prices\n 6\n 5\n 5\n >>> t.drop('burgers', 'calories')\n prices\n 6\n 5\n 5\n >>> t.drop([0, 2])\n prices\n 6\n 5\n 5\n >>> t.drop(0, 2)\n prices\n 6\n 5\n 5\n >>> t.drop(1)\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n "
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels) if ((i not in exclude) and (c not in exclude))]) | Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582 | digital-assyriology-review/datascience/tables.py | drop | ds-modules/NESTUD-190A | 6 | python | def drop(self, *column_or_columns):
"Return a Table with only columns other than selected label or\n labels.\n\n Args:\n ``column_or_columns`` (string or list of strings): The header\n names or indices of the columns to be dropped.\n\n ``column_or_columns`` must be an existing header name, or a\n valid column index.\n\n Returns:\n An instance of ``Table`` with given columns removed.\n\n >>> t = Table().with_columns(\n ... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),\n ... 'prices', make_array(6, 5, 5),\n ... 'calories', make_array(743, 651, 582))\n >>> t\n burgers | prices | calories\n cheeseburger | 6 | 743\n hamburger | 5 | 651\n veggie burger | 5 | 582\n >>> t.drop('prices')\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n >>> t.drop(['burgers', 'calories'])\n prices\n 6\n 5\n 5\n >>> t.drop('burgers', 'calories')\n prices\n 6\n 5\n 5\n >>> t.drop([0, 2])\n prices\n 6\n 5\n 5\n >>> t.drop(0, 2)\n prices\n 6\n 5\n 5\n >>> t.drop(1)\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n "
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels) if ((i not in exclude) and (c not in exclude))]) | def drop(self, *column_or_columns):
"Return a Table with only columns other than selected label or\n labels.\n\n Args:\n ``column_or_columns`` (string or list of strings): The header\n names or indices of the columns to be dropped.\n\n ``column_or_columns`` must be an existing header name, or a\n valid column index.\n\n Returns:\n An instance of ``Table`` with given columns removed.\n\n >>> t = Table().with_columns(\n ... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),\n ... 'prices', make_array(6, 5, 5),\n ... 'calories', make_array(743, 651, 582))\n >>> t\n burgers | prices | calories\n cheeseburger | 6 | 743\n hamburger | 5 | 651\n veggie burger | 5 | 582\n >>> t.drop('prices')\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n >>> t.drop(['burgers', 'calories'])\n prices\n 6\n 5\n 5\n >>> t.drop('burgers', 'calories')\n prices\n 6\n 5\n 5\n >>> t.drop([0, 2])\n prices\n 6\n 5\n 5\n >>> t.drop(0, 2)\n prices\n 6\n 5\n 5\n >>> t.drop(1)\n burgers | calories\n cheeseburger | 743\n hamburger | 651\n veggie burger | 582\n "
exclude = _varargs_labels_as_list(column_or_columns)
return self.select([c for (i, c) in enumerate(self.labels) if ((i not in exclude) and (c not in exclude))])<|docstring|>Return a Table with only columns other than selected label or
labels.
Args:
``column_or_columns`` (string or list of strings): The header
names or indices of the columns to be dropped.
``column_or_columns`` must be an existing header name, or a
valid column index.
Returns:
An instance of ``Table`` with given columns removed.
>>> t = Table().with_columns(
... 'burgers', make_array('cheeseburger', 'hamburger', 'veggie burger'),
... 'prices', make_array(6, 5, 5),
... 'calories', make_array(743, 651, 582))
>>> t
burgers | prices | calories
cheeseburger | 6 | 743
hamburger | 5 | 651
veggie burger | 5 | 582
>>> t.drop('prices')
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582
>>> t.drop(['burgers', 'calories'])
prices
6
5
5
>>> t.drop('burgers', 'calories')
prices
6
5
5
>>> t.drop([0, 2])
prices
6
5
5
>>> t.drop(0, 2)
prices
6
5
5
>>> t.drop(1)
burgers | calories
cheeseburger | 743
hamburger | 651
veggie burger | 582<|endoftext|> |
750df0223705e993186d5434091d976e391410c4cac00389f945f07cafcdfc35 | def where(self, column_or_label, value_or_predicate=None, other=None):
'\n Return a new ``Table`` containing rows where ``value_or_predicate``\n returns True for values in ``column_or_label``.\n\n Args:\n ``column_or_label``: A column of the ``Table`` either as a label\n (``str``) or an index (``int``). Can also be an array of booleans;\n only the rows where the array value is ``True`` are kept.\n\n ``value_or_predicate``: If a function, it is applied to every value\n in ``column_or_label``. Only the rows where ``value_or_predicate``\n returns True are kept. If a single value, only the rows where the\n values in ``column_or_label`` are equal to ``value_or_predicate``\n are kept.\n\n ``other``: Optional additional column label for\n ``value_or_predicate`` to make pairwise comparisons. See the\n examples below for usage. When ``other`` is supplied,\n ``value_or_predicate`` must be a callable function.\n\n Returns:\n If ``value_or_predicate`` is a function, returns a new ``Table``\n containing only the rows where ``value_or_predicate(val)`` is True\n for the ``val``s in ``column_or_label``.\n\n If ``value_or_predicate`` is a value, returns a new ``Table``\n containing only the rows where the values in ``column_or_label``\n are equal to ``value_or_predicate``.\n\n If ``column_or_label`` is an array of booleans, returns a new\n ``Table`` containing only the rows where ``column_or_label`` is\n ``True``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue",\n ... "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular",\n ... "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))\n\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.2\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 0\n Green | Round | 2 | 3\n\n Use a value to select matching rows\n\n >>> marbles.where("Price", 1.3)\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n In general, a higher order predicate function such as the functions in\n ``datascience.predicates.are`` can be used.\n\n >>> from datascience.predicates import are\n >>> # equivalent to previous example\n >>> marbles.where("Price", are.equal_to(1.3))\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n >>> marbles.where("Price", are.above(1.5))\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Round | 2 | 3\n\n Use the optional argument ``other`` to apply predicates to compare\n columns.\n\n >>> marbles.where("Price", are.above, "Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 3\n\n >>> marbles.where("Price", are.equal_to, "Amount") # empty table\n Color | Shape | Amount | Price\n '
column = self._get_column(column_or_label)
if (other is not None):
assert callable(value_or_predicate), 'Predicate required for 3-arg where'
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for (x, y) in zip(column, other)]
elif (value_or_predicate is not None):
if (not callable(value_or_predicate)):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0]) | Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price | digital-assyriology-review/datascience/tables.py | where | ds-modules/NESTUD-190A | 6 | python | def where(self, column_or_label, value_or_predicate=None, other=None):
'\n Return a new ``Table`` containing rows where ``value_or_predicate``\n returns True for values in ``column_or_label``.\n\n Args:\n ``column_or_label``: A column of the ``Table`` either as a label\n (``str``) or an index (``int``). Can also be an array of booleans;\n only the rows where the array value is ``True`` are kept.\n\n ``value_or_predicate``: If a function, it is applied to every value\n in ``column_or_label``. Only the rows where ``value_or_predicate``\n returns True are kept. If a single value, only the rows where the\n values in ``column_or_label`` are equal to ``value_or_predicate``\n are kept.\n\n ``other``: Optional additional column label for\n ``value_or_predicate`` to make pairwise comparisons. See the\n examples below for usage. When ``other`` is supplied,\n ``value_or_predicate`` must be a callable function.\n\n Returns:\n If ``value_or_predicate`` is a function, returns a new ``Table``\n containing only the rows where ``value_or_predicate(val)`` is True\n for the ``val``s in ``column_or_label``.\n\n If ``value_or_predicate`` is a value, returns a new ``Table``\n containing only the rows where the values in ``column_or_label``\n are equal to ``value_or_predicate``.\n\n If ``column_or_label`` is an array of booleans, returns a new\n ``Table`` containing only the rows where ``column_or_label`` is\n ``True``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue",\n ... "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular",\n ... "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))\n\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.2\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 0\n Green | Round | 2 | 3\n\n Use a value to select matching rows\n\n >>> marbles.where("Price", 1.3)\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n In general, a higher order predicate function such as the functions in\n ``datascience.predicates.are`` can be used.\n\n >>> from datascience.predicates import are\n >>> # equivalent to previous example\n >>> marbles.where("Price", are.equal_to(1.3))\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n >>> marbles.where("Price", are.above(1.5))\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Round | 2 | 3\n\n Use the optional argument ``other`` to apply predicates to compare\n columns.\n\n >>> marbles.where("Price", are.above, "Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 3\n\n >>> marbles.where("Price", are.equal_to, "Amount") # empty table\n Color | Shape | Amount | Price\n '
column = self._get_column(column_or_label)
if (other is not None):
assert callable(value_or_predicate), 'Predicate required for 3-arg where'
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for (x, y) in zip(column, other)]
elif (value_or_predicate is not None):
if (not callable(value_or_predicate)):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0]) | def where(self, column_or_label, value_or_predicate=None, other=None):
'\n Return a new ``Table`` containing rows where ``value_or_predicate``\n returns True for values in ``column_or_label``.\n\n Args:\n ``column_or_label``: A column of the ``Table`` either as a label\n (``str``) or an index (``int``). Can also be an array of booleans;\n only the rows where the array value is ``True`` are kept.\n\n ``value_or_predicate``: If a function, it is applied to every value\n in ``column_or_label``. Only the rows where ``value_or_predicate``\n returns True are kept. If a single value, only the rows where the\n values in ``column_or_label`` are equal to ``value_or_predicate``\n are kept.\n\n ``other``: Optional additional column label for\n ``value_or_predicate`` to make pairwise comparisons. See the\n examples below for usage. When ``other`` is supplied,\n ``value_or_predicate`` must be a callable function.\n\n Returns:\n If ``value_or_predicate`` is a function, returns a new ``Table``\n containing only the rows where ``value_or_predicate(val)`` is True\n for the ``val``s in ``column_or_label``.\n\n If ``value_or_predicate`` is a value, returns a new ``Table``\n containing only the rows where the values in ``column_or_label``\n are equal to ``value_or_predicate``.\n\n If ``column_or_label`` is an array of booleans, returns a new\n ``Table`` containing only the rows where ``column_or_label`` is\n ``True``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue",\n ... "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular",\n ... "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))\n\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.2\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 0\n Green | Round | 2 | 3\n\n Use a value to select matching rows\n\n >>> marbles.where("Price", 1.3)\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n In general, a higher order predicate function such as the functions in\n ``datascience.predicates.are`` can be used.\n\n >>> from datascience.predicates import are\n >>> # equivalent to previous example\n >>> marbles.where("Price", are.equal_to(1.3))\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n\n >>> marbles.where("Price", are.above(1.5))\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Round | 2 | 3\n\n Use the optional argument ``other`` to apply predicates to compare\n columns.\n\n >>> marbles.where("Price", are.above, "Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 3\n\n >>> marbles.where("Price", are.equal_to, "Amount") # empty table\n Color | Shape | Amount | Price\n '
column = self._get_column(column_or_label)
if (other is not None):
assert callable(value_or_predicate), 'Predicate required for 3-arg where'
predicate = value_or_predicate
other = self._get_column(other)
column = [predicate(y)(x) for (x, y) in zip(column, other)]
elif (value_or_predicate is not None):
if (not callable(value_or_predicate)):
predicate = _predicates.are.equal_to(value_or_predicate)
else:
predicate = value_or_predicate
column = [predicate(x) for x in column]
return self.take(np.nonzero(column)[0])<|docstring|>Return a new ``Table`` containing rows where ``value_or_predicate``
returns True for values in ``column_or_label``.
Args:
``column_or_label``: A column of the ``Table`` either as a label
(``str``) or an index (``int``). Can also be an array of booleans;
only the rows where the array value is ``True`` are kept.
``value_or_predicate``: If a function, it is applied to every value
in ``column_or_label``. Only the rows where ``value_or_predicate``
returns True are kept. If a single value, only the rows where the
values in ``column_or_label`` are equal to ``value_or_predicate``
are kept.
``other``: Optional additional column label for
``value_or_predicate`` to make pairwise comparisons. See the
examples below for usage. When ``other`` is supplied,
``value_or_predicate`` must be a callable function.
Returns:
If ``value_or_predicate`` is a function, returns a new ``Table``
containing only the rows where ``value_or_predicate(val)`` is True
for the ``val``s in ``column_or_label``.
If ``value_or_predicate`` is a value, returns a new ``Table``
containing only the rows where the values in ``column_or_label``
are equal to ``value_or_predicate``.
If ``column_or_label`` is an array of booleans, returns a new
``Table`` containing only the rows where ``column_or_label`` is
``True``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue",
... "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular",
... "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.2
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 0
Green | Round | 2 | 3
Use a value to select matching rows
>>> marbles.where("Price", 1.3)
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
In general, a higher order predicate function such as the functions in
``datascience.predicates.are`` can be used.
>>> from datascience.predicates import are
>>> # equivalent to previous example
>>> marbles.where("Price", are.equal_to(1.3))
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
>>> marbles.where("Price", are.above(1.5))
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Round | 2 | 3
Use the optional argument ``other`` to apply predicates to compare
columns.
>>> marbles.where("Price", are.above, "Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 3
>>> marbles.where("Price", are.equal_to, "Amount") # empty table
Color | Shape | Amount | Price<|endoftext|> |
f0ee064d44a2d35a85c449230fe4e97a64f0fb6e95c0ed2e9b07492bba9c6388 | def sort(self, column_or_label, descending=False, distinct=False):
'Return a Table of rows sorted according to the values in a column.\n\n Args:\n ``column_or_label``: the column whose values are used for sorting.\n\n ``descending``: if True, sorting will be in descending, rather than\n ascending order.\n\n ``distinct``: if True, repeated values in ``column_or_label`` will\n be omitted.\n\n Returns:\n An instance of ``Table`` containing rows sorted based on the values\n in ``column_or_label``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.sort("Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Blue | Rectangular | 12 | 2\n >>> marbles.sort("Amount", descending = True)\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Green | Rectangular | 6 | 1.3\n Red | Round | 4 | 1.3\n Green | Round | 2 | 1\n >>> marbles.sort(3) # the Price column\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n >>> marbles.sort(3, distinct = True)\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n '
column = self._get_column(column_or_label)
if distinct:
(_, row_numbers) = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::(- 1)])
return self.take(row_numbers) | Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2 | digital-assyriology-review/datascience/tables.py | sort | ds-modules/NESTUD-190A | 6 | python | def sort(self, column_or_label, descending=False, distinct=False):
'Return a Table of rows sorted according to the values in a column.\n\n Args:\n ``column_or_label``: the column whose values are used for sorting.\n\n ``descending``: if True, sorting will be in descending, rather than\n ascending order.\n\n ``distinct``: if True, repeated values in ``column_or_label`` will\n be omitted.\n\n Returns:\n An instance of ``Table`` containing rows sorted based on the values\n in ``column_or_label``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.sort("Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Blue | Rectangular | 12 | 2\n >>> marbles.sort("Amount", descending = True)\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Green | Rectangular | 6 | 1.3\n Red | Round | 4 | 1.3\n Green | Round | 2 | 1\n >>> marbles.sort(3) # the Price column\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n >>> marbles.sort(3, distinct = True)\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n '
column = self._get_column(column_or_label)
if distinct:
(_, row_numbers) = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::(- 1)])
return self.take(row_numbers) | def sort(self, column_or_label, descending=False, distinct=False):
'Return a Table of rows sorted according to the values in a column.\n\n Args:\n ``column_or_label``: the column whose values are used for sorting.\n\n ``descending``: if True, sorting will be in descending, rather than\n ascending order.\n\n ``distinct``: if True, repeated values in ``column_or_label`` will\n be omitted.\n\n Returns:\n An instance of ``Table`` containing rows sorted based on the values\n in ``column_or_label``.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.sort("Amount")\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Blue | Rectangular | 12 | 2\n >>> marbles.sort("Amount", descending = True)\n Color | Shape | Amount | Price\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Green | Rectangular | 6 | 1.3\n Red | Round | 4 | 1.3\n Green | Round | 2 | 1\n >>> marbles.sort(3) # the Price column\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n >>> marbles.sort(3, distinct = True)\n Color | Shape | Amount | Price\n Green | Round | 2 | 1\n Red | Round | 4 | 1.3\n Green | Rectangular | 9 | 1.4\n Red | Round | 7 | 1.75\n Blue | Rectangular | 12 | 2\n '
column = self._get_column(column_or_label)
if distinct:
(_, row_numbers) = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind='mergesort')
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::(- 1)])
return self.take(row_numbers)<|docstring|>Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2<|endoftext|> |
8dd1b9f42d7f10dbdb1c57f7ed0296c7a83c5c1f97c6fe96cbb5b0f86195f72f | def group(self, column_or_label, collect=None):
'Group rows by unique values in a column; count or aggregate others.\n\n Args:\n ``column_or_label``: values to group (column label or index, or array)\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns:\n A Table with each row corresponding to a unique value in ``column_or_label``,\n where the first column contains the unique values from ``column_or_label``, and the\n second contains counts for each of the unique values. If ``collect`` is\n provided, a Table is returned with all original columns, each containing values\n calculated by first grouping rows according to ``column_or_label``, then applying\n ``collect`` to each set of grouped values in the other columns.\n\n Note:\n The grouped column will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.group("Color") # just gives counts\n Color | count\n Blue | 1\n Green | 3\n Red | 2\n >>> marbles.group("Color", max) # takes the max of each grouping, in each column\n Color | Shape max | Amount max | Price max\n Blue | Rectangular | 12 | 2\n Green | Round | 9 | 1.4\n Red | Round | 7 | 1.75\n >>> marbles.group("Shape", sum) # sum doesn\'t make sense for strings\n Shape | Color sum | Amount sum | Price sum\n Rectangular | | 27 | 4.7\n Round | | 13 | 4.05\n '
if (_is_non_string_iterable(column_or_label) and (len(column_or_label) != self._num_rows)):
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
column = self._get_column(column_or_label)
if (isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral)):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
groups = self.index_by(column)
keys = sorted(groups.keys())
if (collect is None):
labels = [column_label, ('count' if (column_label != 'count') else self._unused_label('count'))]
columns = [keys, [len(groups[k]) for k in keys]]
else:
(columns, labels) = ([], [])
for (i, label) in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert (column_label == self._unused_label(column_label))
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped | Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05 | digital-assyriology-review/datascience/tables.py | group | ds-modules/NESTUD-190A | 6 | python | def group(self, column_or_label, collect=None):
'Group rows by unique values in a column; count or aggregate others.\n\n Args:\n ``column_or_label``: values to group (column label or index, or array)\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns:\n A Table with each row corresponding to a unique value in ``column_or_label``,\n where the first column contains the unique values from ``column_or_label``, and the\n second contains counts for each of the unique values. If ``collect`` is\n provided, a Table is returned with all original columns, each containing values\n calculated by first grouping rows according to ``column_or_label``, then applying\n ``collect`` to each set of grouped values in the other columns.\n\n Note:\n The grouped column will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.group("Color") # just gives counts\n Color | count\n Blue | 1\n Green | 3\n Red | 2\n >>> marbles.group("Color", max) # takes the max of each grouping, in each column\n Color | Shape max | Amount max | Price max\n Blue | Rectangular | 12 | 2\n Green | Round | 9 | 1.4\n Red | Round | 7 | 1.75\n >>> marbles.group("Shape", sum) # sum doesn\'t make sense for strings\n Shape | Color sum | Amount sum | Price sum\n Rectangular | | 27 | 4.7\n Round | | 13 | 4.05\n '
if (_is_non_string_iterable(column_or_label) and (len(column_or_label) != self._num_rows)):
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
column = self._get_column(column_or_label)
if (isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral)):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
groups = self.index_by(column)
keys = sorted(groups.keys())
if (collect is None):
labels = [column_label, ('count' if (column_label != 'count') else self._unused_label('count'))]
columns = [keys, [len(groups[k]) for k in keys]]
else:
(columns, labels) = ([], [])
for (i, label) in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert (column_label == self._unused_label(column_label))
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped | def group(self, column_or_label, collect=None):
'Group rows by unique values in a column; count or aggregate others.\n\n Args:\n ``column_or_label``: values to group (column label or index, or array)\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns:\n A Table with each row corresponding to a unique value in ``column_or_label``,\n where the first column contains the unique values from ``column_or_label``, and the\n second contains counts for each of the unique values. If ``collect`` is\n provided, a Table is returned with all original columns, each containing values\n calculated by first grouping rows according to ``column_or_label``, then applying\n ``collect`` to each set of grouped values in the other columns.\n\n Note:\n The grouped column will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.group("Color") # just gives counts\n Color | count\n Blue | 1\n Green | 3\n Red | 2\n >>> marbles.group("Color", max) # takes the max of each grouping, in each column\n Color | Shape max | Amount max | Price max\n Blue | Rectangular | 12 | 2\n Green | Round | 9 | 1.4\n Red | Round | 7 | 1.75\n >>> marbles.group("Shape", sum) # sum doesn\'t make sense for strings\n Shape | Color sum | Amount sum | Price sum\n Rectangular | | 27 | 4.7\n Round | | 13 | 4.05\n '
if (_is_non_string_iterable(column_or_label) and (len(column_or_label) != self._num_rows)):
return self.groups(column_or_label, collect)
self = self.copy(shallow=True)
collect = _zero_on_type_error(collect)
column = self._get_column(column_or_label)
if (isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral)):
column_label = self._as_label(column_or_label)
del self[column_label]
else:
column_label = self._unused_label('group')
groups = self.index_by(column)
keys = sorted(groups.keys())
if (collect is None):
labels = [column_label, ('count' if (column_label != 'count') else self._unused_label('count'))]
columns = [keys, [len(groups[k]) for k in keys]]
else:
(columns, labels) = ([], [])
for (i, label) in enumerate(self.labels):
labels.append(_collected_label(collect, label))
c = [collect(np.array([row[i] for row in groups[k]])) for k in keys]
columns.append(c)
grouped = type(self)().with_columns(zip(labels, columns))
assert (column_label == self._unused_label(column_label))
grouped[column_label] = keys
grouped.move_to_start(column_label)
return grouped<|docstring|>Group rows by unique values in a column; count or aggregate others.
Args:
``column_or_label``: values to group (column label or index, or array)
``collect``: a function applied to values in other columns for each group
Returns:
A Table with each row corresponding to a unique value in ``column_or_label``,
where the first column contains the unique values from ``column_or_label``, and the
second contains counts for each of the unique values. If ``collect`` is
provided, a Table is returned with all original columns, each containing values
calculated by first grouping rows according to ``column_or_label``, then applying
``collect`` to each set of grouped values in the other columns.
Note:
The grouped column will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.group("Color") # just gives counts
Color | count
Blue | 1
Green | 3
Red | 2
>>> marbles.group("Color", max) # takes the max of each grouping, in each column
Color | Shape max | Amount max | Price max
Blue | Rectangular | 12 | 2
Green | Round | 9 | 1.4
Red | Round | 7 | 1.75
>>> marbles.group("Shape", sum) # sum doesn't make sense for strings
Shape | Color sum | Amount sum | Price sum
Rectangular | | 27 | 4.7
Round | | 13 | 4.05<|endoftext|> |
26eafe84c2f6158d10b05e5b9912b0752a2493e7b9747f75311e7cd2fbaba542 | def groups(self, labels, collect=None):
'Group rows by multiple columns, count or aggregate others.\n\n Args:\n ``labels``: list of column names (or indices) to group on\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns: A Table with each row corresponding to a unique combination of values in\n the columns specified in ``labels``, where the first columns are those\n specified in ``labels``, followed by a column of counts for each of the unique\n values. If ``collect`` is provided, a Table is returned with all original\n columns, each containing values calculated by first grouping rows according to\n to values in the ``labels`` column, then applying ``collect`` to each set of\n grouped values in the other columns.\n\n Note:\n The grouped columns will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.groups(["Color", "Shape"])\n Color | Shape | count\n Blue | Rectangular | 1\n Green | Rectangular | 2\n Green | Round | 1\n Red | Round | 2\n >>> marbles.groups(["Color", "Shape"], sum)\n Color | Shape | Amount sum | Price sum\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 15 | 2.7\n Green | Round | 2 | 1\n Red | Round | 11 | 3.05\n '
if (not _is_non_string_iterable(labels)):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if (label not in self.labels):
raise ValueError('All labels must exist in the table')
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), (lambda s: s))
grouped._columns.popitem(last=False)
counts = [len(v) for v in grouped[0]]
for label in labels[::(- 1)]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
if (collect is None):
count = ('count' if ('count' not in labels) else self._unused_label('count'))
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if (label in labels):
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped | Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05 | digital-assyriology-review/datascience/tables.py | groups | ds-modules/NESTUD-190A | 6 | python | def groups(self, labels, collect=None):
'Group rows by multiple columns, count or aggregate others.\n\n Args:\n ``labels``: list of column names (or indices) to group on\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns: A Table with each row corresponding to a unique combination of values in\n the columns specified in ``labels``, where the first columns are those\n specified in ``labels``, followed by a column of counts for each of the unique\n values. If ``collect`` is provided, a Table is returned with all original\n columns, each containing values calculated by first grouping rows according to\n to values in the ``labels`` column, then applying ``collect`` to each set of\n grouped values in the other columns.\n\n Note:\n The grouped columns will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.groups(["Color", "Shape"])\n Color | Shape | count\n Blue | Rectangular | 1\n Green | Rectangular | 2\n Green | Round | 1\n Red | Round | 2\n >>> marbles.groups(["Color", "Shape"], sum)\n Color | Shape | Amount sum | Price sum\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 15 | 2.7\n Green | Round | 2 | 1\n Red | Round | 11 | 3.05\n '
if (not _is_non_string_iterable(labels)):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if (label not in self.labels):
raise ValueError('All labels must exist in the table')
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), (lambda s: s))
grouped._columns.popitem(last=False)
counts = [len(v) for v in grouped[0]]
for label in labels[::(- 1)]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
if (collect is None):
count = ('count' if ('count' not in labels) else self._unused_label('count'))
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if (label in labels):
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped | def groups(self, labels, collect=None):
'Group rows by multiple columns, count or aggregate others.\n\n Args:\n ``labels``: list of column names (or indices) to group on\n\n ``collect``: a function applied to values in other columns for each group\n\n Returns: A Table with each row corresponding to a unique combination of values in\n the columns specified in ``labels``, where the first columns are those\n specified in ``labels``, followed by a column of counts for each of the unique\n values. If ``collect`` is provided, a Table is returned with all original\n columns, each containing values calculated by first grouping rows according to\n to values in the ``labels`` column, then applying ``collect`` to each set of\n grouped values in the other columns.\n\n Note:\n The grouped columns will appear first in the result table. If ``collect`` does not\n accept arguments with one of the column types, that column will be empty in the resulting\n table.\n\n >>> marbles = Table().with_columns(\n ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),\n ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),\n ... "Amount", make_array(4, 6, 12, 7, 9, 2),\n ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))\n >>> marbles\n Color | Shape | Amount | Price\n Red | Round | 4 | 1.3\n Green | Rectangular | 6 | 1.3\n Blue | Rectangular | 12 | 2\n Red | Round | 7 | 1.75\n Green | Rectangular | 9 | 1.4\n Green | Round | 2 | 1\n >>> marbles.groups(["Color", "Shape"])\n Color | Shape | count\n Blue | Rectangular | 1\n Green | Rectangular | 2\n Green | Round | 1\n Red | Round | 2\n >>> marbles.groups(["Color", "Shape"], sum)\n Color | Shape | Amount sum | Price sum\n Blue | Rectangular | 12 | 2\n Green | Rectangular | 15 | 2.7\n Green | Round | 2 | 1\n Red | Round | 11 | 3.05\n '
if (not _is_non_string_iterable(labels)):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if (label not in self.labels):
raise ValueError('All labels must exist in the table')
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), (lambda s: s))
grouped._columns.popitem(last=False)
counts = [len(v) for v in grouped[0]]
for label in labels[::(- 1)]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
if (collect is None):
count = ('count' if ('count' not in labels) else self._unused_label('count'))
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if (label in labels):
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped<|docstring|>Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05<|endoftext|> |
7b29546f991264a8fdf010d7da1e7f601069eb52496ba5cef51c8e3a6ee4b6c3 | def pivot(self, columns, rows, values=None, collect=None, zero=None):
"Generate a table with a column for each unique value in ``columns``,\n with rows for each unique value in ``rows``. Each row counts/aggregates\n the values that match both row and column based on ``collect``.\n\n Args:\n ``columns`` -- a single column label or index, (``str`` or ``int``),\n used to create new columns, based on its unique values.\n ``rows`` -- row labels or indices, (``str`` or ``int`` or list),\n used to create new rows based on it's unique values.\n ``values`` -- column label in table for use in aggregation.\n Default None.\n ``collect`` -- aggregation function, used to group ``values``\n over row-column combinations. Default None.\n ``zero`` -- zero value for non-existent row-column combinations.\n\n Raises:\n TypeError -- if ``collect`` is passed in and ``values`` is not,\n vice versa.\n\n Returns:\n New pivot table, with row-column combinations, as specified, with\n aggregated ``values`` by ``collect`` across the intersection of\n ``columns`` and ``rows``. Simple counts provided if values and\n collect are None, as default.\n\n >>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95\n ... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),\n ... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),\n ... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))\n >>> titanic\n age | survival | gender | prediction\n 21 | 0 | M | 0\n 44 | 0 | M | 0\n 56 | 0 | M | 1\n 89 | 1 | M | 1\n 95 | 1 | F | 0\n 40 | 1 | F | 1\n 80 | 0 | F | 0\n 45 | 1 | F | 1\n >>> titanic.pivot('survival', 'gender')\n gender | 0 | 1\n F | 1 | 3\n M | 3 | 1\n >>> titanic.pivot('prediction', 'gender')\n gender | 0 | 1\n F | 2 | 2\n M | 2 | 2\n >>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)\n gender | 0 | 1\n F | 80 | 60\n M | 40.3333 | 89\n >>> titanic.pivot('survival', make_array('prediction', 'gender'))\n prediction | gender | 0 | 1\n 0 | F | 1 | 1\n 0 | M | 2 | 0\n 1 | F | 0 | 2\n 1 | M | 1 | 1\n >>> titanic.pivot('survival', 'gender', values = 'age')\n Traceback (most recent call last):\n ...\n TypeError: values requires collect to be specified\n >>> titanic.pivot('survival', 'gender', collect = np.mean)\n Traceback (most recent call last):\n ...\n TypeError: collect requires values to be specified\n "
if ((collect is not None) and (values is None)):
raise TypeError('collect requires values to be specified')
if ((values is not None) and (collect is None)):
raise TypeError('values requires collect to be specified')
columns = self._as_label(columns)
rows = self._as_labels(rows)
if (values is None):
selected = self.select(([columns] + rows))
else:
selected = self.select(([columns, values] + rows))
grouped = selected.groups(([columns] + rows), collect)
rows_values = sorted(list(set(self.select(rows).rows)))
pivoted = type(self)(rows).with_rows(rows_values)
by_columns = grouped.index_by(columns)
for label in sorted(by_columns):
tuples = [t[1:] for t in by_columns[label]]
column = _fill_with_zeros(rows_values, tuples, zero)
pivot = self._unused_label(str(label))
pivoted[pivot] = column
return pivoted | Generate a table with a column for each unique value in ``columns``,
with rows for each unique value in ``rows``. Each row counts/aggregates
the values that match both row and column based on ``collect``.
Args:
``columns`` -- a single column label or index, (``str`` or ``int``),
used to create new columns, based on its unique values.
``rows`` -- row labels or indices, (``str`` or ``int`` or list),
used to create new rows based on it's unique values.
``values`` -- column label in table for use in aggregation.
Default None.
``collect`` -- aggregation function, used to group ``values``
over row-column combinations. Default None.
``zero`` -- zero value for non-existent row-column combinations.
Raises:
TypeError -- if ``collect`` is passed in and ``values`` is not,
vice versa.
Returns:
New pivot table, with row-column combinations, as specified, with
aggregated ``values`` by ``collect`` across the intersection of
``columns`` and ``rows``. Simple counts provided if values and
collect are None, as default.
>>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95
... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),
... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),
... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))
>>> titanic
age | survival | gender | prediction
21 | 0 | M | 0
44 | 0 | M | 0
56 | 0 | M | 1
89 | 1 | M | 1
95 | 1 | F | 0
40 | 1 | F | 1
80 | 0 | F | 0
45 | 1 | F | 1
>>> titanic.pivot('survival', 'gender')
gender | 0 | 1
F | 1 | 3
M | 3 | 1
>>> titanic.pivot('prediction', 'gender')
gender | 0 | 1
F | 2 | 2
M | 2 | 2
>>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)
gender | 0 | 1
F | 80 | 60
M | 40.3333 | 89
>>> titanic.pivot('survival', make_array('prediction', 'gender'))
prediction | gender | 0 | 1
0 | F | 1 | 1
0 | M | 2 | 0
1 | F | 0 | 2
1 | M | 1 | 1
>>> titanic.pivot('survival', 'gender', values = 'age')
Traceback (most recent call last):
...
TypeError: values requires collect to be specified
>>> titanic.pivot('survival', 'gender', collect = np.mean)
Traceback (most recent call last):
...
TypeError: collect requires values to be specified | digital-assyriology-review/datascience/tables.py | pivot | ds-modules/NESTUD-190A | 6 | python | def pivot(self, columns, rows, values=None, collect=None, zero=None):
"Generate a table with a column for each unique value in ``columns``,\n with rows for each unique value in ``rows``. Each row counts/aggregates\n the values that match both row and column based on ``collect``.\n\n Args:\n ``columns`` -- a single column label or index, (``str`` or ``int``),\n used to create new columns, based on its unique values.\n ``rows`` -- row labels or indices, (``str`` or ``int`` or list),\n used to create new rows based on it's unique values.\n ``values`` -- column label in table for use in aggregation.\n Default None.\n ``collect`` -- aggregation function, used to group ``values``\n over row-column combinations. Default None.\n ``zero`` -- zero value for non-existent row-column combinations.\n\n Raises:\n TypeError -- if ``collect`` is passed in and ``values`` is not,\n vice versa.\n\n Returns:\n New pivot table, with row-column combinations, as specified, with\n aggregated ``values`` by ``collect`` across the intersection of\n ``columns`` and ``rows``. Simple counts provided if values and\n collect are None, as default.\n\n >>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95\n ... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),\n ... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),\n ... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))\n >>> titanic\n age | survival | gender | prediction\n 21 | 0 | M | 0\n 44 | 0 | M | 0\n 56 | 0 | M | 1\n 89 | 1 | M | 1\n 95 | 1 | F | 0\n 40 | 1 | F | 1\n 80 | 0 | F | 0\n 45 | 1 | F | 1\n >>> titanic.pivot('survival', 'gender')\n gender | 0 | 1\n F | 1 | 3\n M | 3 | 1\n >>> titanic.pivot('prediction', 'gender')\n gender | 0 | 1\n F | 2 | 2\n M | 2 | 2\n >>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)\n gender | 0 | 1\n F | 80 | 60\n M | 40.3333 | 89\n >>> titanic.pivot('survival', make_array('prediction', 'gender'))\n prediction | gender | 0 | 1\n 0 | F | 1 | 1\n 0 | M | 2 | 0\n 1 | F | 0 | 2\n 1 | M | 1 | 1\n >>> titanic.pivot('survival', 'gender', values = 'age')\n Traceback (most recent call last):\n ...\n TypeError: values requires collect to be specified\n >>> titanic.pivot('survival', 'gender', collect = np.mean)\n Traceback (most recent call last):\n ...\n TypeError: collect requires values to be specified\n "
if ((collect is not None) and (values is None)):
raise TypeError('collect requires values to be specified')
if ((values is not None) and (collect is None)):
raise TypeError('values requires collect to be specified')
columns = self._as_label(columns)
rows = self._as_labels(rows)
if (values is None):
selected = self.select(([columns] + rows))
else:
selected = self.select(([columns, values] + rows))
grouped = selected.groups(([columns] + rows), collect)
rows_values = sorted(list(set(self.select(rows).rows)))
pivoted = type(self)(rows).with_rows(rows_values)
by_columns = grouped.index_by(columns)
for label in sorted(by_columns):
tuples = [t[1:] for t in by_columns[label]]
column = _fill_with_zeros(rows_values, tuples, zero)
pivot = self._unused_label(str(label))
pivoted[pivot] = column
return pivoted | def pivot(self, columns, rows, values=None, collect=None, zero=None):
"Generate a table with a column for each unique value in ``columns``,\n with rows for each unique value in ``rows``. Each row counts/aggregates\n the values that match both row and column based on ``collect``.\n\n Args:\n ``columns`` -- a single column label or index, (``str`` or ``int``),\n used to create new columns, based on its unique values.\n ``rows`` -- row labels or indices, (``str`` or ``int`` or list),\n used to create new rows based on it's unique values.\n ``values`` -- column label in table for use in aggregation.\n Default None.\n ``collect`` -- aggregation function, used to group ``values``\n over row-column combinations. Default None.\n ``zero`` -- zero value for non-existent row-column combinations.\n\n Raises:\n TypeError -- if ``collect`` is passed in and ``values`` is not,\n vice versa.\n\n Returns:\n New pivot table, with row-column combinations, as specified, with\n aggregated ``values`` by ``collect`` across the intersection of\n ``columns`` and ``rows``. Simple counts provided if values and\n collect are None, as default.\n\n >>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95\n ... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),\n ... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),\n ... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))\n >>> titanic\n age | survival | gender | prediction\n 21 | 0 | M | 0\n 44 | 0 | M | 0\n 56 | 0 | M | 1\n 89 | 1 | M | 1\n 95 | 1 | F | 0\n 40 | 1 | F | 1\n 80 | 0 | F | 0\n 45 | 1 | F | 1\n >>> titanic.pivot('survival', 'gender')\n gender | 0 | 1\n F | 1 | 3\n M | 3 | 1\n >>> titanic.pivot('prediction', 'gender')\n gender | 0 | 1\n F | 2 | 2\n M | 2 | 2\n >>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)\n gender | 0 | 1\n F | 80 | 60\n M | 40.3333 | 89\n >>> titanic.pivot('survival', make_array('prediction', 'gender'))\n prediction | gender | 0 | 1\n 0 | F | 1 | 1\n 0 | M | 2 | 0\n 1 | F | 0 | 2\n 1 | M | 1 | 1\n >>> titanic.pivot('survival', 'gender', values = 'age')\n Traceback (most recent call last):\n ...\n TypeError: values requires collect to be specified\n >>> titanic.pivot('survival', 'gender', collect = np.mean)\n Traceback (most recent call last):\n ...\n TypeError: collect requires values to be specified\n "
if ((collect is not None) and (values is None)):
raise TypeError('collect requires values to be specified')
if ((values is not None) and (collect is None)):
raise TypeError('values requires collect to be specified')
columns = self._as_label(columns)
rows = self._as_labels(rows)
if (values is None):
selected = self.select(([columns] + rows))
else:
selected = self.select(([columns, values] + rows))
grouped = selected.groups(([columns] + rows), collect)
rows_values = sorted(list(set(self.select(rows).rows)))
pivoted = type(self)(rows).with_rows(rows_values)
by_columns = grouped.index_by(columns)
for label in sorted(by_columns):
tuples = [t[1:] for t in by_columns[label]]
column = _fill_with_zeros(rows_values, tuples, zero)
pivot = self._unused_label(str(label))
pivoted[pivot] = column
return pivoted<|docstring|>Generate a table with a column for each unique value in ``columns``,
with rows for each unique value in ``rows``. Each row counts/aggregates
the values that match both row and column based on ``collect``.
Args:
``columns`` -- a single column label or index, (``str`` or ``int``),
used to create new columns, based on its unique values.
``rows`` -- row labels or indices, (``str`` or ``int`` or list),
used to create new rows based on it's unique values.
``values`` -- column label in table for use in aggregation.
Default None.
``collect`` -- aggregation function, used to group ``values``
over row-column combinations. Default None.
``zero`` -- zero value for non-existent row-column combinations.
Raises:
TypeError -- if ``collect`` is passed in and ``values`` is not,
vice versa.
Returns:
New pivot table, with row-column combinations, as specified, with
aggregated ``values`` by ``collect`` across the intersection of
``columns`` and ``rows``. Simple counts provided if values and
collect are None, as default.
>>> titanic = Table().with_columns('age', make_array(21, 44, 56, 89, 95
... , 40, 80, 45), 'survival', make_array(0,0,0,1, 1, 1, 0, 1),
... 'gender', make_array('M', 'M', 'M', 'M', 'F', 'F', 'F', 'F'),
... 'prediction', make_array(0, 0, 1, 1, 0, 1, 0, 1))
>>> titanic
age | survival | gender | prediction
21 | 0 | M | 0
44 | 0 | M | 0
56 | 0 | M | 1
89 | 1 | M | 1
95 | 1 | F | 0
40 | 1 | F | 1
80 | 0 | F | 0
45 | 1 | F | 1
>>> titanic.pivot('survival', 'gender')
gender | 0 | 1
F | 1 | 3
M | 3 | 1
>>> titanic.pivot('prediction', 'gender')
gender | 0 | 1
F | 2 | 2
M | 2 | 2
>>> titanic.pivot('survival', 'gender', values='age', collect = np.mean)
gender | 0 | 1
F | 80 | 60
M | 40.3333 | 89
>>> titanic.pivot('survival', make_array('prediction', 'gender'))
prediction | gender | 0 | 1
0 | F | 1 | 1
0 | M | 2 | 0
1 | F | 0 | 2
1 | M | 1 | 1
>>> titanic.pivot('survival', 'gender', values = 'age')
Traceback (most recent call last):
...
TypeError: values requires collect to be specified
>>> titanic.pivot('survival', 'gender', collect = np.mean)
Traceback (most recent call last):
...
TypeError: collect requires values to be specified<|endoftext|> |
16dee0f26e4747d32332af68d4bc2ecb84fc6ec1f9795ee8ecdfc40609d3f964 | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs):
'Form a table with columns formed by the unique tuples in pivot_columns\n containing counts per bin of the values associated with each tuple in the value_column.\n\n By default, bins are chosen to contain all values in the value_column. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n Args:\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``normed`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is normalized such that\n the integral over the range is 1.\n '
pivot_columns = _as_labels(pivot_columns)
selected = self.select((pivot_columns + [value_column]))
grouped = selected.groups(pivot_columns, collect=(lambda x: x))
if (bins is not None):
vargs['bins'] = bins
(_, rbins) = np.histogram(self[value_column], **vargs)
vargs['bins'] = rbins
binned = type(self)().with_column('bin', rbins)
for group in grouped.rows:
col_label = '-'.join(map(str, group[0:(- 1)]))
col_vals = group[(- 1)]
(counts, _) = np.histogram(col_vals, **vargs)
binned[col_label] = np.append(counts, 0)
return binned | Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1. | digital-assyriology-review/datascience/tables.py | pivot_bin | ds-modules/NESTUD-190A | 6 | python | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs):
'Form a table with columns formed by the unique tuples in pivot_columns\n containing counts per bin of the values associated with each tuple in the value_column.\n\n By default, bins are chosen to contain all values in the value_column. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n Args:\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``normed`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is normalized such that\n the integral over the range is 1.\n '
pivot_columns = _as_labels(pivot_columns)
selected = self.select((pivot_columns + [value_column]))
grouped = selected.groups(pivot_columns, collect=(lambda x: x))
if (bins is not None):
vargs['bins'] = bins
(_, rbins) = np.histogram(self[value_column], **vargs)
vargs['bins'] = rbins
binned = type(self)().with_column('bin', rbins)
for group in grouped.rows:
col_label = '-'.join(map(str, group[0:(- 1)]))
col_vals = group[(- 1)]
(counts, _) = np.histogram(col_vals, **vargs)
binned[col_label] = np.append(counts, 0)
return binned | def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs):
'Form a table with columns formed by the unique tuples in pivot_columns\n containing counts per bin of the values associated with each tuple in the value_column.\n\n By default, bins are chosen to contain all values in the value_column. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n Args:\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``normed`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is normalized such that\n the integral over the range is 1.\n '
pivot_columns = _as_labels(pivot_columns)
selected = self.select((pivot_columns + [value_column]))
grouped = selected.groups(pivot_columns, collect=(lambda x: x))
if (bins is not None):
vargs['bins'] = bins
(_, rbins) = np.histogram(self[value_column], **vargs)
vargs['bins'] = rbins
binned = type(self)().with_column('bin', rbins)
for group in grouped.rows:
col_label = '-'.join(map(str, group[0:(- 1)]))
col_vals = group[(- 1)]
(counts, _) = np.histogram(col_vals, **vargs)
binned[col_label] = np.append(counts, 0)
return binned<|docstring|>Form a table with columns formed by the unique tuples in pivot_columns
containing counts per bin of the values associated with each tuple in the value_column.
By default, bins are chosen to contain all values in the value_column. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
Args:
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``normed`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is normalized such that
the integral over the range is 1.<|endoftext|> |
649941de072bbd3228819f832f320d8c083b83d17e7feab130aea7f5ba346c4f | def stack(self, key, labels=None):
'Takes k original columns and returns two columns, with col. 1 of\n all column names and col. 2 of all associated data.\n '
(rows, labels) = ([], (labels or self.labels))
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for (k, v) in row.asdict().items() if ((k != key) and (k in labels))]
return type(self)([key, 'column', 'value']).with_rows(rows) | Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data. | digital-assyriology-review/datascience/tables.py | stack | ds-modules/NESTUD-190A | 6 | python | def stack(self, key, labels=None):
'Takes k original columns and returns two columns, with col. 1 of\n all column names and col. 2 of all associated data.\n '
(rows, labels) = ([], (labels or self.labels))
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for (k, v) in row.asdict().items() if ((k != key) and (k in labels))]
return type(self)([key, 'column', 'value']).with_rows(rows) | def stack(self, key, labels=None):
'Takes k original columns and returns two columns, with col. 1 of\n all column names and col. 2 of all associated data.\n '
(rows, labels) = ([], (labels or self.labels))
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for (k, v) in row.asdict().items() if ((k != key) and (k in labels))]
return type(self)([key, 'column', 'value']).with_rows(rows)<|docstring|>Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.<|endoftext|> |
699a32a8104a926a310d053ae485bd49b9f0287fd284e1231082ce03eb2033c2 | def join(self, column_label, other, other_label=None):
"Creates a new table with the columns of self and other, containing\n rows for all values of a column that appear in both tables.\n\n Args:\n ``column_label`` (``str``): label of column in self that is used to\n join rows of ``other``.\n ``other``: Table object to join with self on matching values of\n ``column_label``.\n\n Kwargs:\n ``other_label`` (``str``): default None, assumes ``column_label``.\n Otherwise in ``other`` used to join rows.\n\n Returns:\n New table self joined with ``other`` by matching values in\n ``column_label`` and ``other_label``. If the resulting join is\n empty, returns None. If a join value appears more than once in\n ``self``, each row with that value will appear in resulting join,\n but in ``other``, only the first row with that value will be used.\n\n >>> table = Table().with_columns('a', make_array(9, 3, 3, 1),\n ... 'b', make_array(1, 2, 2, 10),\n ... 'c', make_array(3, 4, 5, 6))\n >>> table\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),\n ... 'd', make_array(1, 2, 2, 10),\n ... 'e', make_array(3, 4, 5, 6))\n >>> table2\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table.join('a', table2)\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'a') # Equivalent to previous join\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'd') # Repeat column labels relabeled\n a | b | c | a_2 | e\n 1 | 10 | 6 | 9 | 3\n >>> table2 #table2 has three rows with a = 1\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table #table has only one row with a = 1\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2.join('a', table) # When we join, we get all three rows in table2 where a = 1\n a | d | e | b | c\n 1 | 2 | 4 | 10 | 6\n 1 | 2 | 5 | 10 | 6\n 1 | 10 | 6 | 10 | 6\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2) # Opposite join only keeps first row in table2 with a = 1\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n "
if ((self.num_rows == 0) or (other.num_rows == 0)):
return None
if (not other_label):
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
joined_rows = []
for (label, rows) in self_rows.items():
if (label in other_rows):
other_row = other_rows[label][0]
joined_rows += [(row + other_row) for row in rows]
if (not joined_rows):
return None
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)((self_labels + other_labels)).with_rows(joined_rows)
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
del joined[other_labels_map[other_label]]
if ((column_label not in self._formats) and (other_label in other._formats)):
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label) | Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None. If a join value appears more than once in
``self``, each row with that value will appear in resulting join,
but in ``other``, only the first row with that value will be used.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2.join('a', table) # When we join, we get all three rows in table2 where a = 1
a | d | e | b | c
1 | 2 | 4 | 10 | 6
1 | 2 | 5 | 10 | 6
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2) # Opposite join only keeps first row in table2 with a = 1
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3 | digital-assyriology-review/datascience/tables.py | join | ds-modules/NESTUD-190A | 6 | python | def join(self, column_label, other, other_label=None):
"Creates a new table with the columns of self and other, containing\n rows for all values of a column that appear in both tables.\n\n Args:\n ``column_label`` (``str``): label of column in self that is used to\n join rows of ``other``.\n ``other``: Table object to join with self on matching values of\n ``column_label``.\n\n Kwargs:\n ``other_label`` (``str``): default None, assumes ``column_label``.\n Otherwise in ``other`` used to join rows.\n\n Returns:\n New table self joined with ``other`` by matching values in\n ``column_label`` and ``other_label``. If the resulting join is\n empty, returns None. If a join value appears more than once in\n ``self``, each row with that value will appear in resulting join,\n but in ``other``, only the first row with that value will be used.\n\n >>> table = Table().with_columns('a', make_array(9, 3, 3, 1),\n ... 'b', make_array(1, 2, 2, 10),\n ... 'c', make_array(3, 4, 5, 6))\n >>> table\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),\n ... 'd', make_array(1, 2, 2, 10),\n ... 'e', make_array(3, 4, 5, 6))\n >>> table2\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table.join('a', table2)\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'a') # Equivalent to previous join\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'd') # Repeat column labels relabeled\n a | b | c | a_2 | e\n 1 | 10 | 6 | 9 | 3\n >>> table2 #table2 has three rows with a = 1\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table #table has only one row with a = 1\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2.join('a', table) # When we join, we get all three rows in table2 where a = 1\n a | d | e | b | c\n 1 | 2 | 4 | 10 | 6\n 1 | 2 | 5 | 10 | 6\n 1 | 10 | 6 | 10 | 6\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2) # Opposite join only keeps first row in table2 with a = 1\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n "
if ((self.num_rows == 0) or (other.num_rows == 0)):
return None
if (not other_label):
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
joined_rows = []
for (label, rows) in self_rows.items():
if (label in other_rows):
other_row = other_rows[label][0]
joined_rows += [(row + other_row) for row in rows]
if (not joined_rows):
return None
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)((self_labels + other_labels)).with_rows(joined_rows)
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
del joined[other_labels_map[other_label]]
if ((column_label not in self._formats) and (other_label in other._formats)):
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label) | def join(self, column_label, other, other_label=None):
"Creates a new table with the columns of self and other, containing\n rows for all values of a column that appear in both tables.\n\n Args:\n ``column_label`` (``str``): label of column in self that is used to\n join rows of ``other``.\n ``other``: Table object to join with self on matching values of\n ``column_label``.\n\n Kwargs:\n ``other_label`` (``str``): default None, assumes ``column_label``.\n Otherwise in ``other`` used to join rows.\n\n Returns:\n New table self joined with ``other`` by matching values in\n ``column_label`` and ``other_label``. If the resulting join is\n empty, returns None. If a join value appears more than once in\n ``self``, each row with that value will appear in resulting join,\n but in ``other``, only the first row with that value will be used.\n\n >>> table = Table().with_columns('a', make_array(9, 3, 3, 1),\n ... 'b', make_array(1, 2, 2, 10),\n ... 'c', make_array(3, 4, 5, 6))\n >>> table\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),\n ... 'd', make_array(1, 2, 2, 10),\n ... 'e', make_array(3, 4, 5, 6))\n >>> table2\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table.join('a', table2)\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'a') # Equivalent to previous join\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2, 'd') # Repeat column labels relabeled\n a | b | c | a_2 | e\n 1 | 10 | 6 | 9 | 3\n >>> table2 #table2 has three rows with a = 1\n a | d | e\n 9 | 1 | 3\n 1 | 2 | 4\n 1 | 2 | 5\n 1 | 10 | 6\n >>> table #table has only one row with a = 1\n a | b | c\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table2.join('a', table) # When we join, we get all three rows in table2 where a = 1\n a | d | e | b | c\n 1 | 2 | 4 | 10 | 6\n 1 | 2 | 5 | 10 | 6\n 1 | 10 | 6 | 10 | 6\n 9 | 1 | 3 | 1 | 3\n >>> table.join('a', table2) # Opposite join only keeps first row in table2 with a = 1\n a | b | c | d | e\n 1 | 10 | 6 | 2 | 4\n 9 | 1 | 3 | 1 | 3\n "
if ((self.num_rows == 0) or (other.num_rows == 0)):
return None
if (not other_label):
other_label = column_label
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
joined_rows = []
for (label, rows) in self_rows.items():
if (label in other_rows):
other_row = other_rows[label][0]
joined_rows += [(row + other_row) for row in rows]
if (not joined_rows):
return None
self_labels = list(self.labels)
other_labels = [self._unused_label(s) for s in other.labels]
other_labels_map = dict(zip(other.labels, other_labels))
joined = type(self)((self_labels + other_labels)).with_rows(joined_rows)
joined._formats.update(self._formats)
for label in other._formats:
joined._formats[other_labels_map[label]] = other._formats[label]
del joined[other_labels_map[other_label]]
if ((column_label not in self._formats) and (other_label in other._formats)):
joined._formats[column_label] = other._formats[other_label]
return joined.move_to_start(column_label).sort(column_label)<|docstring|>Creates a new table with the columns of self and other, containing
rows for all values of a column that appear in both tables.
Args:
``column_label`` (``str``): label of column in self that is used to
join rows of ``other``.
``other``: Table object to join with self on matching values of
``column_label``.
Kwargs:
``other_label`` (``str``): default None, assumes ``column_label``.
Otherwise in ``other`` used to join rows.
Returns:
New table self joined with ``other`` by matching values in
``column_label`` and ``other_label``. If the resulting join is
empty, returns None. If a join value appears more than once in
``self``, each row with that value will appear in resulting join,
but in ``other``, only the first row with that value will be used.
>>> table = Table().with_columns('a', make_array(9, 3, 3, 1),
... 'b', make_array(1, 2, 2, 10),
... 'c', make_array(3, 4, 5, 6))
>>> table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2 = Table().with_columns( 'a', make_array(9, 1, 1, 1),
... 'd', make_array(1, 2, 2, 10),
... 'e', make_array(3, 4, 5, 6))
>>> table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table.join('a', table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'a') # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2, 'd') # Repeat column labels relabeled
a | b | c | a_2 | e
1 | 10 | 6 | 9 | 3
>>> table2 #table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
>>> table #table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table2.join('a', table) # When we join, we get all three rows in table2 where a = 1
a | d | e | b | c
1 | 2 | 4 | 10 | 6
1 | 2 | 5 | 10 | 6
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
>>> table.join('a', table2) # Opposite join only keeps first row in table2 with a = 1
a | b | c | d | e
1 | 10 | 6 | 2 | 4
9 | 1 | 3 | 1 | 3<|endoftext|> |
1ed0b57c0d9d8ccbbf9f48307ab5643b36e580d7a8ebd79ab59bb79ba93cff8a | def stats(self, ops=(min, max, np.median, sum)):
'Compute statistics for each column and place them in a table.'
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table | Compute statistics for each column and place them in a table. | digital-assyriology-review/datascience/tables.py | stats | ds-modules/NESTUD-190A | 6 | python | def stats(self, ops=(min, max, np.median, sum)):
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table | def stats(self, ops=(min, max, np.median, sum)):
names = [op.__name__ for op in ops]
ops = [_zero_on_type_error(op) for op in ops]
columns = [[op(column) for op in ops] for column in self.columns]
table = type(self)().with_columns(zip(self.labels, columns))
stats = table._unused_label('statistic')
table[stats] = names
table.move_to_start(stats)
return table<|docstring|>Compute statistics for each column and place them in a table.<|endoftext|> |
4eb788e9daa2105226d9468a18d48a9170333fc09aed00da86ee8b7f708f91a6 | def _as_label(self, index_or_label):
'Convert index to label.'
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError((str(index_or_label) + ' is not a label or index')) | Convert index to label. | digital-assyriology-review/datascience/tables.py | _as_label | ds-modules/NESTUD-190A | 6 | python | def _as_label(self, index_or_label):
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError((str(index_or_label) + ' is not a label or index')) | def _as_label(self, index_or_label):
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError((str(index_or_label) + ' is not a label or index'))<|docstring|>Convert index to label.<|endoftext|> |
5ec750847bb8945bb45e4674632a7e74020e8fcf78ce0bea1b3bbb97ae7f98d8 | def _as_labels(self, label_or_labels):
'Convert single label to list and convert indices to labels.'
return [self._as_label(s) for s in _as_labels(label_or_labels)] | Convert single label to list and convert indices to labels. | digital-assyriology-review/datascience/tables.py | _as_labels | ds-modules/NESTUD-190A | 6 | python | def _as_labels(self, label_or_labels):
return [self._as_label(s) for s in _as_labels(label_or_labels)] | def _as_labels(self, label_or_labels):
return [self._as_label(s) for s in _as_labels(label_or_labels)]<|docstring|>Convert single label to list and convert indices to labels.<|endoftext|> |
c02b9ef9e62c7c964dee444a94ad0c672de59788efbb29c04bf0c87e0f9fbb6f | def _varargs_as_labels(self, label_list):
'Converts a list of labels or singleton list of list of labels into\n a list of labels. Useful when labels are passed as varargs.'
return self._as_labels(_varargs_labels_as_list(label_list)) | Converts a list of labels or singleton list of list of labels into
a list of labels. Useful when labels are passed as varargs. | digital-assyriology-review/datascience/tables.py | _varargs_as_labels | ds-modules/NESTUD-190A | 6 | python | def _varargs_as_labels(self, label_list):
'Converts a list of labels or singleton list of list of labels into\n a list of labels. Useful when labels are passed as varargs.'
return self._as_labels(_varargs_labels_as_list(label_list)) | def _varargs_as_labels(self, label_list):
'Converts a list of labels or singleton list of list of labels into\n a list of labels. Useful when labels are passed as varargs.'
return self._as_labels(_varargs_labels_as_list(label_list))<|docstring|>Converts a list of labels or singleton list of list of labels into
a list of labels. Useful when labels are passed as varargs.<|endoftext|> |
fda66f3e1ae5603a5ea5ae1c6fcde7e3593ab13ee277c41678cbfd69dc6240ba | def _unused_label(self, label):
'Generate an unused label.'
original = label
existing = self.labels
i = 2
while (label in existing):
label = '{}_{}'.format(original, i)
i += 1
return label | Generate an unused label. | digital-assyriology-review/datascience/tables.py | _unused_label | ds-modules/NESTUD-190A | 6 | python | def _unused_label(self, label):
original = label
existing = self.labels
i = 2
while (label in existing):
label = '{}_{}'.format(original, i)
i += 1
return label | def _unused_label(self, label):
original = label
existing = self.labels
i = 2
while (label in existing):
label = '{}_{}'.format(original, i)
i += 1
return label<|docstring|>Generate an unused label.<|endoftext|> |
a0dc4bd012cb0dbc477cf8f907a5b1799924a3191aa352f5b18509264500f303 | def _get_column(self, column_or_label):
'Convert label to column and check column length.'
c = column_or_label
if (isinstance(c, collections.Hashable) and (c in self.labels)):
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert (len(c) == self.num_rows), 'column length mismatch'
return c | Convert label to column and check column length. | digital-assyriology-review/datascience/tables.py | _get_column | ds-modules/NESTUD-190A | 6 | python | def _get_column(self, column_or_label):
c = column_or_label
if (isinstance(c, collections.Hashable) and (c in self.labels)):
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert (len(c) == self.num_rows), 'column length mismatch'
return c | def _get_column(self, column_or_label):
c = column_or_label
if (isinstance(c, collections.Hashable) and (c in self.labels)):
return self[c]
elif isinstance(c, numbers.Integral):
return self[c]
elif isinstance(c, str):
raise ValueError('label "{}" not in labels {}'.format(c, self.labels))
else:
assert (len(c) == self.num_rows), 'column length mismatch'
return c<|docstring|>Convert label to column and check column length.<|endoftext|> |
21f9f7ddaecab18c31e9afd4d53e131c93c4273e166302f17b0c2fa7b41f80d0 | def percentile(self, p):
"Return a new table with one row containing the pth percentile for\n each column.\n\n Assumes that each column only contains one type of value.\n\n Returns a new table with one row and the same column labels.\n The row contains the pth percentile of the original column, where the\n pth percentile of a column is the smallest value that at at least as\n large as the p% of numbers in the column.\n\n >>> table = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> table.percentile(80)\n count | points\n 9 | 10\n "
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles) | Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10 | digital-assyriology-review/datascience/tables.py | percentile | ds-modules/NESTUD-190A | 6 | python | def percentile(self, p):
"Return a new table with one row containing the pth percentile for\n each column.\n\n Assumes that each column only contains one type of value.\n\n Returns a new table with one row and the same column labels.\n The row contains the pth percentile of the original column, where the\n pth percentile of a column is the smallest value that at at least as\n large as the p% of numbers in the column.\n\n >>> table = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> table.percentile(80)\n count | points\n 9 | 10\n "
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles) | def percentile(self, p):
"Return a new table with one row containing the pth percentile for\n each column.\n\n Assumes that each column only contains one type of value.\n\n Returns a new table with one row and the same column labels.\n The row contains the pth percentile of the original column, where the\n pth percentile of a column is the smallest value that at at least as\n large as the p% of numbers in the column.\n\n >>> table = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> table\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> table.percentile(80)\n count | points\n 9 | 10\n "
percentiles = [[_util.percentile(p, column)] for column in self.columns]
return self._with_columns(percentiles)<|docstring|>Return a new table with one row containing the pth percentile for
each column.
Assumes that each column only contains one type of value.
Returns a new table with one row and the same column labels.
The row contains the pth percentile of the original column, where the
pth percentile of a column is the smallest value that at at least as
large as the p% of numbers in the column.
>>> table = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> table
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> table.percentile(80)
count | points
9 | 10<|endoftext|> |
05ff441bda2fb90194cf3648fc6daa33d5d22b361dc662fd8d49364a13928d78 | def sample(self, k=None, with_replacement=True, weights=None):
"Return a new table where k rows are randomly sampled from the\n original table.\n\n Args:\n ``k`` -- specifies the number of rows (``int``) to be sampled from\n the table. Default is k equal to number of rows in the table.\n\n ``with_replacement`` -- (``bool``) By default True;\n Samples ``k`` rows with replacement from table, else samples\n ``k`` rows without replacement.\n\n ``weights`` -- Array specifying probability the ith row of the\n table is sampled. Defaults to None, which samples each row\n with equal probability. ``weights`` must be a valid probability\n distribution -- i.e. an array the length of the number of rows,\n summing to 1.\n\n Raises:\n ValueError -- if ``weights`` is not length equal to number of rows\n in the table; or, if ``weights`` does not sum to 1.\n\n Returns:\n A new instance of ``Table`` with ``k`` rows resampled.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.sample() # doctest: +SKIP\n job | wage\n b | 20\n b | 20\n a | 10\n d | 8\n >>> jobs.sample(with_replacement=True) # doctest: +SKIP\n job | wage\n d | 8\n b | 20\n c | 15\n a | 10\n >>> jobs.sample(k = 2) # doctest: +SKIP\n job | wage\n b | 20\n c | 15\n >>> ws = make_array(0.5, 0.5, 0, 0)\n >>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP\n job | wage\n a | 10\n a | 10\n >>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))\n Traceback (most recent call last):\n ...\n ValueError: probabilities do not sum to 1\n\n # Weights must be length of table.\n >>> jobs.sample(k=2, weights=make_array(1, 0, 0))\n Traceback (most recent call last):\n ...\n ValueError: a and p must have same size\n "
n = self.num_rows
if (k is None):
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample | Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
# Weights must be length of table.
>>> jobs.sample(k=2, weights=make_array(1, 0, 0))
Traceback (most recent call last):
...
ValueError: a and p must have same size | digital-assyriology-review/datascience/tables.py | sample | ds-modules/NESTUD-190A | 6 | python | def sample(self, k=None, with_replacement=True, weights=None):
"Return a new table where k rows are randomly sampled from the\n original table.\n\n Args:\n ``k`` -- specifies the number of rows (``int``) to be sampled from\n the table. Default is k equal to number of rows in the table.\n\n ``with_replacement`` -- (``bool``) By default True;\n Samples ``k`` rows with replacement from table, else samples\n ``k`` rows without replacement.\n\n ``weights`` -- Array specifying probability the ith row of the\n table is sampled. Defaults to None, which samples each row\n with equal probability. ``weights`` must be a valid probability\n distribution -- i.e. an array the length of the number of rows,\n summing to 1.\n\n Raises:\n ValueError -- if ``weights`` is not length equal to number of rows\n in the table; or, if ``weights`` does not sum to 1.\n\n Returns:\n A new instance of ``Table`` with ``k`` rows resampled.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.sample() # doctest: +SKIP\n job | wage\n b | 20\n b | 20\n a | 10\n d | 8\n >>> jobs.sample(with_replacement=True) # doctest: +SKIP\n job | wage\n d | 8\n b | 20\n c | 15\n a | 10\n >>> jobs.sample(k = 2) # doctest: +SKIP\n job | wage\n b | 20\n c | 15\n >>> ws = make_array(0.5, 0.5, 0, 0)\n >>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP\n job | wage\n a | 10\n a | 10\n >>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))\n Traceback (most recent call last):\n ...\n ValueError: probabilities do not sum to 1\n\n # Weights must be length of table.\n >>> jobs.sample(k=2, weights=make_array(1, 0, 0))\n Traceback (most recent call last):\n ...\n ValueError: a and p must have same size\n "
n = self.num_rows
if (k is None):
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample | def sample(self, k=None, with_replacement=True, weights=None):
"Return a new table where k rows are randomly sampled from the\n original table.\n\n Args:\n ``k`` -- specifies the number of rows (``int``) to be sampled from\n the table. Default is k equal to number of rows in the table.\n\n ``with_replacement`` -- (``bool``) By default True;\n Samples ``k`` rows with replacement from table, else samples\n ``k`` rows without replacement.\n\n ``weights`` -- Array specifying probability the ith row of the\n table is sampled. Defaults to None, which samples each row\n with equal probability. ``weights`` must be a valid probability\n distribution -- i.e. an array the length of the number of rows,\n summing to 1.\n\n Raises:\n ValueError -- if ``weights`` is not length equal to number of rows\n in the table; or, if ``weights`` does not sum to 1.\n\n Returns:\n A new instance of ``Table`` with ``k`` rows resampled.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.sample() # doctest: +SKIP\n job | wage\n b | 20\n b | 20\n a | 10\n d | 8\n >>> jobs.sample(with_replacement=True) # doctest: +SKIP\n job | wage\n d | 8\n b | 20\n c | 15\n a | 10\n >>> jobs.sample(k = 2) # doctest: +SKIP\n job | wage\n b | 20\n c | 15\n >>> ws = make_array(0.5, 0.5, 0, 0)\n >>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP\n job | wage\n a | 10\n a | 10\n >>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))\n Traceback (most recent call last):\n ...\n ValueError: probabilities do not sum to 1\n\n # Weights must be length of table.\n >>> jobs.sample(k=2, weights=make_array(1, 0, 0))\n Traceback (most recent call last):\n ...\n ValueError: a and p must have same size\n "
n = self.num_rows
if (k is None):
k = n
index = np.random.choice(n, k, replace=with_replacement, p=weights)
columns = [[c[i] for i in index] for c in self.columns]
sample = self._with_columns(columns)
return sample<|docstring|>Return a new table where k rows are randomly sampled from the
original table.
Args:
``k`` -- specifies the number of rows (``int``) to be sampled from
the table. Default is k equal to number of rows in the table.
``with_replacement`` -- (``bool``) By default True;
Samples ``k`` rows with replacement from table, else samples
``k`` rows without replacement.
``weights`` -- Array specifying probability the ith row of the
table is sampled. Defaults to None, which samples each row
with equal probability. ``weights`` must be a valid probability
distribution -- i.e. an array the length of the number of rows,
summing to 1.
Raises:
ValueError -- if ``weights`` is not length equal to number of rows
in the table; or, if ``weights`` does not sum to 1.
Returns:
A new instance of ``Table`` with ``k`` rows resampled.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.sample() # doctest: +SKIP
job | wage
b | 20
b | 20
a | 10
d | 8
>>> jobs.sample(with_replacement=True) # doctest: +SKIP
job | wage
d | 8
b | 20
c | 15
a | 10
>>> jobs.sample(k = 2) # doctest: +SKIP
job | wage
b | 20
c | 15
>>> ws = make_array(0.5, 0.5, 0, 0)
>>> jobs.sample(k=2, with_replacement=True, weights=ws) # doctest: +SKIP
job | wage
a | 10
a | 10
>>> jobs.sample(k=2, weights=make_array(1, 0, 1, 0))
Traceback (most recent call last):
...
ValueError: probabilities do not sum to 1
# Weights must be length of table.
>>> jobs.sample(k=2, weights=make_array(1, 0, 0))
Traceback (most recent call last):
...
ValueError: a and p must have same size<|endoftext|> |
b8504730b08dbf6b33e9c62fd63a2fc32d96d2858286d81d3dcce592946b657d | def sample_from_distribution(self, distribution, k, proportions=False):
"Return a new table with the same number of rows and a new column.\n The values in the distribution column are define a multinomial.\n They are replaced by sample counts/proportions in the output.\n\n >>> sizes = Table(['size', 'count']).with_rows([\n ... ['small', 50],\n ... ['medium', 100],\n ... ['big', 50],\n ... ])\n >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP\n size | count | count sample\n small | 50 | 239\n medium | 100 | 496\n big | 50 | 265\n >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP\n size | count | count sample\n small | 50 | 0.24\n medium | 100 | 0.51\n big | 50 | 0.25\n "
dist = self._get_column(distribution)
total = sum(dist)
assert ((total > 0) and np.all((dist >= 0))), 'Counts or a distribution required'
dist = (dist / sum(dist))
sample = np.random.multinomial(k, dist)
if proportions:
sample = (sample / sum(sample))
label = self._unused_label((self._as_label(distribution) + ' sample'))
return self.with_column(label, sample) | Return a new table with the same number of rows and a new column.
The values in the distribution column are define a multinomial.
They are replaced by sample counts/proportions in the output.
>>> sizes = Table(['size', 'count']).with_rows([
... ['small', 50],
... ['medium', 100],
... ['big', 50],
... ])
>>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP
size | count | count sample
small | 50 | 239
medium | 100 | 496
big | 50 | 265
>>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP
size | count | count sample
small | 50 | 0.24
medium | 100 | 0.51
big | 50 | 0.25 | digital-assyriology-review/datascience/tables.py | sample_from_distribution | ds-modules/NESTUD-190A | 6 | python | def sample_from_distribution(self, distribution, k, proportions=False):
"Return a new table with the same number of rows and a new column.\n The values in the distribution column are define a multinomial.\n They are replaced by sample counts/proportions in the output.\n\n >>> sizes = Table(['size', 'count']).with_rows([\n ... ['small', 50],\n ... ['medium', 100],\n ... ['big', 50],\n ... ])\n >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP\n size | count | count sample\n small | 50 | 239\n medium | 100 | 496\n big | 50 | 265\n >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP\n size | count | count sample\n small | 50 | 0.24\n medium | 100 | 0.51\n big | 50 | 0.25\n "
dist = self._get_column(distribution)
total = sum(dist)
assert ((total > 0) and np.all((dist >= 0))), 'Counts or a distribution required'
dist = (dist / sum(dist))
sample = np.random.multinomial(k, dist)
if proportions:
sample = (sample / sum(sample))
label = self._unused_label((self._as_label(distribution) + ' sample'))
return self.with_column(label, sample) | def sample_from_distribution(self, distribution, k, proportions=False):
"Return a new table with the same number of rows and a new column.\n The values in the distribution column are define a multinomial.\n They are replaced by sample counts/proportions in the output.\n\n >>> sizes = Table(['size', 'count']).with_rows([\n ... ['small', 50],\n ... ['medium', 100],\n ... ['big', 50],\n ... ])\n >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP\n size | count | count sample\n small | 50 | 239\n medium | 100 | 496\n big | 50 | 265\n >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP\n size | count | count sample\n small | 50 | 0.24\n medium | 100 | 0.51\n big | 50 | 0.25\n "
dist = self._get_column(distribution)
total = sum(dist)
assert ((total > 0) and np.all((dist >= 0))), 'Counts or a distribution required'
dist = (dist / sum(dist))
sample = np.random.multinomial(k, dist)
if proportions:
sample = (sample / sum(sample))
label = self._unused_label((self._as_label(distribution) + ' sample'))
return self.with_column(label, sample)<|docstring|>Return a new table with the same number of rows and a new column.
The values in the distribution column are define a multinomial.
They are replaced by sample counts/proportions in the output.
>>> sizes = Table(['size', 'count']).with_rows([
... ['small', 50],
... ['medium', 100],
... ['big', 50],
... ])
>>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP
size | count | count sample
small | 50 | 239
medium | 100 | 496
big | 50 | 265
>>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP
size | count | count sample
small | 50 | 0.24
medium | 100 | 0.51
big | 50 | 0.25<|endoftext|> |
cf8830a1a0337862ec9b020ddfd8b9a6ce55055497e460e31dfc969ee7b2995f | def split(self, k):
"Return a tuple of two tables where the first table contains\n ``k`` rows randomly sampled and the second contains the remaining rows.\n\n Args:\n ``k`` (int): The number of rows randomly sampled into the first\n table. ``k`` must be between 1 and ``num_rows - 1``.\n\n Raises:\n ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.\n\n Returns:\n A tuple containing two instances of ``Table``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> sample, rest = jobs.split(3)\n >>> sample # doctest: +SKIP\n job | wage\n c | 15\n a | 10\n b | 20\n >>> rest # doctest: +SKIP\n job | wage\n d | 8\n "
if (not (1 <= k <= (self.num_rows - 1))):
raise ValueError('Invalid value of k. k must be between 1 and thenumber of rows - 1')
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return (first, rest) | Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8 | digital-assyriology-review/datascience/tables.py | split | ds-modules/NESTUD-190A | 6 | python | def split(self, k):
"Return a tuple of two tables where the first table contains\n ``k`` rows randomly sampled and the second contains the remaining rows.\n\n Args:\n ``k`` (int): The number of rows randomly sampled into the first\n table. ``k`` must be between 1 and ``num_rows - 1``.\n\n Raises:\n ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.\n\n Returns:\n A tuple containing two instances of ``Table``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> sample, rest = jobs.split(3)\n >>> sample # doctest: +SKIP\n job | wage\n c | 15\n a | 10\n b | 20\n >>> rest # doctest: +SKIP\n job | wage\n d | 8\n "
if (not (1 <= k <= (self.num_rows - 1))):
raise ValueError('Invalid value of k. k must be between 1 and thenumber of rows - 1')
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return (first, rest) | def split(self, k):
"Return a tuple of two tables where the first table contains\n ``k`` rows randomly sampled and the second contains the remaining rows.\n\n Args:\n ``k`` (int): The number of rows randomly sampled into the first\n table. ``k`` must be between 1 and ``num_rows - 1``.\n\n Raises:\n ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.\n\n Returns:\n A tuple containing two instances of ``Table``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> sample, rest = jobs.split(3)\n >>> sample # doctest: +SKIP\n job | wage\n c | 15\n a | 10\n b | 20\n >>> rest # doctest: +SKIP\n job | wage\n d | 8\n "
if (not (1 <= k <= (self.num_rows - 1))):
raise ValueError('Invalid value of k. k must be between 1 and thenumber of rows - 1')
rows = np.random.permutation(self.num_rows)
first = self.take(rows[:k])
rest = self.take(rows[k:])
for column_label in self._formats:
first._formats[column_label] = self._formats[column_label]
rest._formats[column_label] = self._formats[column_label]
return (first, rest)<|docstring|>Return a tuple of two tables where the first table contains
``k`` rows randomly sampled and the second contains the remaining rows.
Args:
``k`` (int): The number of rows randomly sampled into the first
table. ``k`` must be between 1 and ``num_rows - 1``.
Raises:
``ValueError``: ``k`` is not between 1 and ``num_rows - 1``.
Returns:
A tuple containing two instances of ``Table``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> sample, rest = jobs.split(3)
>>> sample # doctest: +SKIP
job | wage
c | 15
a | 10
b | 20
>>> rest # doctest: +SKIP
job | wage
d | 8<|endoftext|> |
1a7ad5ef32632579d4083853a495dd29a411f08205d5ca178c052ab18037bdc3 | def with_row(self, row):
"Return a table with an additional row.\n\n Args:\n ``row`` (sequence): A value for each column.\n\n Raises:\n ``ValueError``: If the row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(row)
return self | Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2 | digital-assyriology-review/datascience/tables.py | with_row | ds-modules/NESTUD-190A | 6 | python | def with_row(self, row):
"Return a table with an additional row.\n\n Args:\n ``row`` (sequence): A value for each column.\n\n Raises:\n ``ValueError``: If the row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(row)
return self | def with_row(self, row):
"Return a table with an additional row.\n\n Args:\n ``row`` (sequence): A value for each column.\n\n Raises:\n ``ValueError``: If the row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(row)
return self<|docstring|>Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2<|endoftext|> |
9ce814cb55746e682899c0fbdee4bd073a42cd95655d52361231142d9d43b47c | def with_rows(self, rows):
"Return a table with additional rows.\n\n Args:\n ``rows`` (sequence of sequences): Each row has a value per column.\n\n If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.\n\n Raises:\n ``ValueError``: If a row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_rows(make_array(make_array('c', 2, 3),\n ... make_array('d', 4, 2)))\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self | Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2 | digital-assyriology-review/datascience/tables.py | with_rows | ds-modules/NESTUD-190A | 6 | python | def with_rows(self, rows):
"Return a table with additional rows.\n\n Args:\n ``rows`` (sequence of sequences): Each row has a value per column.\n\n If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.\n\n Raises:\n ``ValueError``: If a row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_rows(make_array(make_array('c', 2, 3),\n ... make_array('d', 4, 2)))\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self | def with_rows(self, rows):
"Return a table with additional rows.\n\n Args:\n ``rows`` (sequence of sequences): Each row has a value per column.\n\n If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.\n\n Raises:\n ``ValueError``: If a row length differs from the column count.\n\n >>> tiles = Table(make_array('letter', 'count', 'points'))\n >>> tiles.with_rows(make_array(make_array('c', 2, 3),\n ... make_array('d', 4, 2)))\n letter | count | points\n c | 2 | 3\n d | 4 | 2\n "
self = self.copy()
self.append(self._with_columns(zip(*rows)))
return self<|docstring|>Return a table with additional rows.
Args:
``rows`` (sequence of sequences): Each row has a value per column.
If ``rows`` is a 2-d array, its shape must be (_, n) for n columns.
Raises:
``ValueError``: If a row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_rows(make_array(make_array('c', 2, 3),
... make_array('d', 4, 2)))
letter | count | points
c | 2 | 3
d | 4 | 2<|endoftext|> |
de71d9ed514429f51d83d1bd91f4c4dc19a4b40900fa52ab3d8e94059c54f81d | def with_column(self, label, values, *rest):
"Return a new table with an additional or replaced column.\n\n Args:\n ``label`` (str): The column label. If an existing label is used,\n the existing column will be replaced in the new table.\n\n ``values`` (single value or sequence): If a single value, every\n value in the new column is ``values``. If sequence of values,\n new column takes on values in ``values``.\n\n ``rest``: An alternating list of labels and values describing\n additional columns. See with_columns for a full description.\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a valid column name\n - if ``label`` is not of type (str)\n - ``values`` is a list/array that does not have the same\n length as the number of rows in the table.\n\n Returns:\n copy of original table with new or replaced column\n\n >>> alphabet = Table().with_column('letter', make_array('c','d'))\n >>> alphabet = alphabet.with_column('count', make_array(2, 4))\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('permutes', make_array('a', 'g'))\n letter | count | permutes\n c | 2 | a\n d | 4 | g\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('count', 1)\n letter | count\n c | 1\n d | 1\n >>> alphabet.with_column(1, make_array(1, 2))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> alphabet.with_column('bad_col', make_array(1))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table | Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table. | digital-assyriology-review/datascience/tables.py | with_column | ds-modules/NESTUD-190A | 6 | python | def with_column(self, label, values, *rest):
"Return a new table with an additional or replaced column.\n\n Args:\n ``label`` (str): The column label. If an existing label is used,\n the existing column will be replaced in the new table.\n\n ``values`` (single value or sequence): If a single value, every\n value in the new column is ``values``. If sequence of values,\n new column takes on values in ``values``.\n\n ``rest``: An alternating list of labels and values describing\n additional columns. See with_columns for a full description.\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a valid column name\n - if ``label`` is not of type (str)\n - ``values`` is a list/array that does not have the same\n length as the number of rows in the table.\n\n Returns:\n copy of original table with new or replaced column\n\n >>> alphabet = Table().with_column('letter', make_array('c','d'))\n >>> alphabet = alphabet.with_column('count', make_array(2, 4))\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('permutes', make_array('a', 'g'))\n letter | count | permutes\n c | 2 | a\n d | 4 | g\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('count', 1)\n letter | count\n c | 1\n d | 1\n >>> alphabet.with_column(1, make_array(1, 2))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> alphabet.with_column('bad_col', make_array(1))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table | def with_column(self, label, values, *rest):
"Return a new table with an additional or replaced column.\n\n Args:\n ``label`` (str): The column label. If an existing label is used,\n the existing column will be replaced in the new table.\n\n ``values`` (single value or sequence): If a single value, every\n value in the new column is ``values``. If sequence of values,\n new column takes on values in ``values``.\n\n ``rest``: An alternating list of labels and values describing\n additional columns. See with_columns for a full description.\n\n Raises:\n ``ValueError``: If\n - ``label`` is not a valid column name\n - if ``label`` is not of type (str)\n - ``values`` is a list/array that does not have the same\n length as the number of rows in the table.\n\n Returns:\n copy of original table with new or replaced column\n\n >>> alphabet = Table().with_column('letter', make_array('c','d'))\n >>> alphabet = alphabet.with_column('count', make_array(2, 4))\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('permutes', make_array('a', 'g'))\n letter | count | permutes\n c | 2 | a\n d | 4 | g\n >>> alphabet\n letter | count\n c | 2\n d | 4\n >>> alphabet.with_column('count', 1)\n letter | count\n c | 1\n d | 1\n >>> alphabet.with_column(1, make_array(1, 2))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> alphabet.with_column('bad_col', make_array(1))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if rest:
return self.with_columns(label, values, *rest)
new_table = self.copy()
new_table.append_column(label, values)
return new_table<|docstring|>Return a new table with an additional or replaced column.
Args:
``label`` (str): The column label. If an existing label is used,
the existing column will be replaced in the new table.
``values`` (single value or sequence): If a single value, every
value in the new column is ``values``. If sequence of values,
new column takes on values in ``values``.
``rest``: An alternating list of labels and values describing
additional columns. See with_columns for a full description.
Raises:
``ValueError``: If
- ``label`` is not a valid column name
- if ``label`` is not of type (str)
- ``values`` is a list/array that does not have the same
length as the number of rows in the table.
Returns:
copy of original table with new or replaced column
>>> alphabet = Table().with_column('letter', make_array('c','d'))
>>> alphabet = alphabet.with_column('count', make_array(2, 4))
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('permutes', make_array('a', 'g'))
letter | count | permutes
c | 2 | a
d | 4 | g
>>> alphabet
letter | count
c | 2
d | 4
>>> alphabet.with_column('count', 1)
letter | count
c | 1
d | 1
>>> alphabet.with_column(1, make_array(1, 2))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> alphabet.with_column('bad_col', make_array(1))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.<|endoftext|> |
d13233b3ff095e5d2b0d53e19b04dc1a9475d56a6bbbf3ace3f72b3e09424c50 | def with_columns(self, *labels_and_values):
"Return a table with additional or replaced columns.\n\n\n Args:\n ``labels_and_values``: An alternating list of labels and values or\n a list of label-value pairs. If one of the labels is in\n existing table, then every value in the corresponding column is\n set to that value. If label has only a single value (``int``),\n every row of corresponding column takes on that value.\n\n Raises:\n ``ValueError``: If\n - any label in ``labels_and_values`` is not a valid column\n name, i.e if label is not of type (str).\n - if any value in ``labels_and_values`` is a list/array and\n does not have the same length as the number of rows in the\n table.\n ``AssertionError``:\n - 'incorrect columns format', if passed more than one sequence\n (iterables) for ``labels_and_values``.\n - 'even length sequence required' if missing a pair in\n label-value pairs.\n\n\n Returns:\n Copy of original table with new or replaced columns. Columns added\n in order of labels. Equivalent to ``with_column(label, value)``\n when passed only one label-value pair.\n\n\n >>> players = Table().with_columns('player_id',\n ... make_array(110234, 110235), 'wOBA', make_array(.354, .236))\n >>> players\n player_id | wOBA\n 110234 | 0.354\n 110235 | 0.236\n >>> players = players.with_columns('salaries', 'N/A', 'season', 2016)\n >>> players\n player_id | wOBA | salaries | season\n 110234 | 0.354 | N/A | 2016\n 110235 | 0.236 | N/A | 2016\n >>> salaries = Table().with_column('salary',\n ... make_array('$500,000', '$15,500,000'))\n >>> players.with_columns('salaries', salaries.column('salary'),\n ... 'years', make_array(6, 1))\n player_id | wOBA | salaries | season | years\n 110234 | 0.354 | $500,000 | 2016 | 6\n 110235 | 0.236 | $15,500,000 | 2016 | 1\n >>> players.with_columns(2, make_array('$600,000', '$20,000,000'))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> players.with_columns('salaries', make_array('$600,000'))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if (len(labels_and_values) == 1):
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if (not isinstance(labels_and_values, collections.abc.Sequence)):
labels_and_values = list(labels_and_values)
if (not labels_and_values):
return self
first = labels_and_values[0]
if ((not isinstance(first, str)) and hasattr(first, '__iter__')):
for pair in labels_and_values:
assert (len(pair) == 2), 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert ((len(labels_and_values) % 2) == 0), 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
(label, values) = (labels_and_values[i], labels_and_values[(i + 1)])
self = self.with_column(label, values)
return self | Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values or
a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110234 | 0.354
110235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110234 | 0.354 | N/A | 2016
110235 | 0.236 | N/A | 2016
>>> salaries = Table().with_column('salary',
... make_array('$500,000', '$15,500,000'))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'years', make_array(6, 1))
player_id | wOBA | salaries | season | years
110234 | 0.354 | $500,000 | 2016 | 6
110235 | 0.236 | $15,500,000 | 2016 | 1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table. | digital-assyriology-review/datascience/tables.py | with_columns | ds-modules/NESTUD-190A | 6 | python | def with_columns(self, *labels_and_values):
"Return a table with additional or replaced columns.\n\n\n Args:\n ``labels_and_values``: An alternating list of labels and values or\n a list of label-value pairs. If one of the labels is in\n existing table, then every value in the corresponding column is\n set to that value. If label has only a single value (``int``),\n every row of corresponding column takes on that value.\n\n Raises:\n ``ValueError``: If\n - any label in ``labels_and_values`` is not a valid column\n name, i.e if label is not of type (str).\n - if any value in ``labels_and_values`` is a list/array and\n does not have the same length as the number of rows in the\n table.\n ``AssertionError``:\n - 'incorrect columns format', if passed more than one sequence\n (iterables) for ``labels_and_values``.\n - 'even length sequence required' if missing a pair in\n label-value pairs.\n\n\n Returns:\n Copy of original table with new or replaced columns. Columns added\n in order of labels. Equivalent to ``with_column(label, value)``\n when passed only one label-value pair.\n\n\n >>> players = Table().with_columns('player_id',\n ... make_array(110234, 110235), 'wOBA', make_array(.354, .236))\n >>> players\n player_id | wOBA\n 110234 | 0.354\n 110235 | 0.236\n >>> players = players.with_columns('salaries', 'N/A', 'season', 2016)\n >>> players\n player_id | wOBA | salaries | season\n 110234 | 0.354 | N/A | 2016\n 110235 | 0.236 | N/A | 2016\n >>> salaries = Table().with_column('salary',\n ... make_array('$500,000', '$15,500,000'))\n >>> players.with_columns('salaries', salaries.column('salary'),\n ... 'years', make_array(6, 1))\n player_id | wOBA | salaries | season | years\n 110234 | 0.354 | $500,000 | 2016 | 6\n 110235 | 0.236 | $15,500,000 | 2016 | 1\n >>> players.with_columns(2, make_array('$600,000', '$20,000,000'))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> players.with_columns('salaries', make_array('$600,000'))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if (len(labels_and_values) == 1):
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if (not isinstance(labels_and_values, collections.abc.Sequence)):
labels_and_values = list(labels_and_values)
if (not labels_and_values):
return self
first = labels_and_values[0]
if ((not isinstance(first, str)) and hasattr(first, '__iter__')):
for pair in labels_and_values:
assert (len(pair) == 2), 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert ((len(labels_and_values) % 2) == 0), 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
(label, values) = (labels_and_values[i], labels_and_values[(i + 1)])
self = self.with_column(label, values)
return self | def with_columns(self, *labels_and_values):
"Return a table with additional or replaced columns.\n\n\n Args:\n ``labels_and_values``: An alternating list of labels and values or\n a list of label-value pairs. If one of the labels is in\n existing table, then every value in the corresponding column is\n set to that value. If label has only a single value (``int``),\n every row of corresponding column takes on that value.\n\n Raises:\n ``ValueError``: If\n - any label in ``labels_and_values`` is not a valid column\n name, i.e if label is not of type (str).\n - if any value in ``labels_and_values`` is a list/array and\n does not have the same length as the number of rows in the\n table.\n ``AssertionError``:\n - 'incorrect columns format', if passed more than one sequence\n (iterables) for ``labels_and_values``.\n - 'even length sequence required' if missing a pair in\n label-value pairs.\n\n\n Returns:\n Copy of original table with new or replaced columns. Columns added\n in order of labels. Equivalent to ``with_column(label, value)``\n when passed only one label-value pair.\n\n\n >>> players = Table().with_columns('player_id',\n ... make_array(110234, 110235), 'wOBA', make_array(.354, .236))\n >>> players\n player_id | wOBA\n 110234 | 0.354\n 110235 | 0.236\n >>> players = players.with_columns('salaries', 'N/A', 'season', 2016)\n >>> players\n player_id | wOBA | salaries | season\n 110234 | 0.354 | N/A | 2016\n 110235 | 0.236 | N/A | 2016\n >>> salaries = Table().with_column('salary',\n ... make_array('$500,000', '$15,500,000'))\n >>> players.with_columns('salaries', salaries.column('salary'),\n ... 'years', make_array(6, 1))\n player_id | wOBA | salaries | season | years\n 110234 | 0.354 | $500,000 | 2016 | 6\n 110235 | 0.236 | $15,500,000 | 2016 | 1\n >>> players.with_columns(2, make_array('$600,000', '$20,000,000'))\n Traceback (most recent call last):\n ...\n ValueError: The column label must be a string, but a int was given\n >>> players.with_columns('salaries', make_array('$600,000'))\n Traceback (most recent call last):\n ...\n ValueError: Column length mismatch. New column does not have the same number of rows as table.\n "
if (len(labels_and_values) == 1):
labels_and_values = labels_and_values[0]
if isinstance(labels_and_values, collections.abc.Mapping):
labels_and_values = list(labels_and_values.items())
if (not isinstance(labels_and_values, collections.abc.Sequence)):
labels_and_values = list(labels_and_values)
if (not labels_and_values):
return self
first = labels_and_values[0]
if ((not isinstance(first, str)) and hasattr(first, '__iter__')):
for pair in labels_and_values:
assert (len(pair) == 2), 'incorrect columns format'
labels_and_values = [x for pair in labels_and_values for x in pair]
assert ((len(labels_and_values) % 2) == 0), 'Even length sequence required'
for i in range(0, len(labels_and_values), 2):
(label, values) = (labels_and_values[i], labels_and_values[(i + 1)])
self = self.with_column(label, values)
return self<|docstring|>Return a table with additional or replaced columns.
Args:
``labels_and_values``: An alternating list of labels and values or
a list of label-value pairs. If one of the labels is in
existing table, then every value in the corresponding column is
set to that value. If label has only a single value (``int``),
every row of corresponding column takes on that value.
Raises:
``ValueError``: If
- any label in ``labels_and_values`` is not a valid column
name, i.e if label is not of type (str).
- if any value in ``labels_and_values`` is a list/array and
does not have the same length as the number of rows in the
table.
``AssertionError``:
- 'incorrect columns format', if passed more than one sequence
(iterables) for ``labels_and_values``.
- 'even length sequence required' if missing a pair in
label-value pairs.
Returns:
Copy of original table with new or replaced columns. Columns added
in order of labels. Equivalent to ``with_column(label, value)``
when passed only one label-value pair.
>>> players = Table().with_columns('player_id',
... make_array(110234, 110235), 'wOBA', make_array(.354, .236))
>>> players
player_id | wOBA
110234 | 0.354
110235 | 0.236
>>> players = players.with_columns('salaries', 'N/A', 'season', 2016)
>>> players
player_id | wOBA | salaries | season
110234 | 0.354 | N/A | 2016
110235 | 0.236 | N/A | 2016
>>> salaries = Table().with_column('salary',
... make_array('$500,000', '$15,500,000'))
>>> players.with_columns('salaries', salaries.column('salary'),
... 'years', make_array(6, 1))
player_id | wOBA | salaries | season | years
110234 | 0.354 | $500,000 | 2016 | 6
110235 | 0.236 | $15,500,000 | 2016 | 1
>>> players.with_columns(2, make_array('$600,000', '$20,000,000'))
Traceback (most recent call last):
...
ValueError: The column label must be a string, but a int was given
>>> players.with_columns('salaries', make_array('$600,000'))
Traceback (most recent call last):
...
ValueError: Column length mismatch. New column does not have the same number of rows as table.<|endoftext|> |
55f13025cb02e611fabf9e1afbbd9a581ea8df448ddba52944dacb2cf92e6096 | def relabeled(self, label, new_label):
"Return a new table with ``label`` specifying column label(s)\n replaced by corresponding ``new_label``.\n\n Args:\n ``label`` -- (str or array of str) The label(s) of\n columns to be changed.\n\n ``new_label`` -- (str or array of str): The new label(s) of\n columns to be changed. Same number of elements as label.\n\n Raises:\n ``ValueError`` -- if ``label`` does not exist in\n table, or if the ``label`` and ``new_label`` are not not of\n equal length. Also, raised if ``label`` and/or ``new_label``\n are not ``str``.\n\n Returns:\n New table with ``new_label`` in place of ``label``.\n\n >>> tiles = Table().with_columns('letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4))\n >>> tiles\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled('count', 'number')\n letter | number\n c | 2\n d | 4\n >>> tiles # original table unmodified\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'count'),\n ... make_array('column1', 'column2'))\n column1 | column2\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'number'),\n ... make_array('column1', 'column2'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.\n "
copy = self.copy()
copy.relabel(label, new_label)
return copy | Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced. | digital-assyriology-review/datascience/tables.py | relabeled | ds-modules/NESTUD-190A | 6 | python | def relabeled(self, label, new_label):
"Return a new table with ``label`` specifying column label(s)\n replaced by corresponding ``new_label``.\n\n Args:\n ``label`` -- (str or array of str) The label(s) of\n columns to be changed.\n\n ``new_label`` -- (str or array of str): The new label(s) of\n columns to be changed. Same number of elements as label.\n\n Raises:\n ``ValueError`` -- if ``label`` does not exist in\n table, or if the ``label`` and ``new_label`` are not not of\n equal length. Also, raised if ``label`` and/or ``new_label``\n are not ``str``.\n\n Returns:\n New table with ``new_label`` in place of ``label``.\n\n >>> tiles = Table().with_columns('letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4))\n >>> tiles\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled('count', 'number')\n letter | number\n c | 2\n d | 4\n >>> tiles # original table unmodified\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'count'),\n ... make_array('column1', 'column2'))\n column1 | column2\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'number'),\n ... make_array('column1', 'column2'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.\n "
copy = self.copy()
copy.relabel(label, new_label)
return copy | def relabeled(self, label, new_label):
"Return a new table with ``label`` specifying column label(s)\n replaced by corresponding ``new_label``.\n\n Args:\n ``label`` -- (str or array of str) The label(s) of\n columns to be changed.\n\n ``new_label`` -- (str or array of str): The new label(s) of\n columns to be changed. Same number of elements as label.\n\n Raises:\n ``ValueError`` -- if ``label`` does not exist in\n table, or if the ``label`` and ``new_label`` are not not of\n equal length. Also, raised if ``label`` and/or ``new_label``\n are not ``str``.\n\n Returns:\n New table with ``new_label`` in place of ``label``.\n\n >>> tiles = Table().with_columns('letter', make_array('c', 'd'),\n ... 'count', make_array(2, 4))\n >>> tiles\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled('count', 'number')\n letter | number\n c | 2\n d | 4\n >>> tiles # original table unmodified\n letter | count\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'count'),\n ... make_array('column1', 'column2'))\n column1 | column2\n c | 2\n d | 4\n >>> tiles.relabeled(make_array('letter', 'number'),\n ... make_array('column1', 'column2'))\n Traceback (most recent call last):\n ...\n ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.\n "
copy = self.copy()
copy.relabel(label, new_label)
return copy<|docstring|>Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.<|endoftext|> |
3ec787e85d0ef781bab86f31cb4b3cec7bcfc412b9d9db1c88b559bdd9b251ed | def bin(self, *columns, **vargs):
'Group values by bin and compute counts per bin by column.\n\n By default, bins are chosen to contain all values in all columns. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n If the original table has n columns, the resulting binned table has\n n+1 columns, where column 0 contains the lower bound of each bin.\n\n Args:\n ``columns`` (str or int): Labels or indices of columns to be\n binned. If empty, all columns are binned.\n\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``density`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is the value of the\n probability density function at the bin, normalized such that\n the integral over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability mass function.\n '
if columns:
self = self.select(*columns)
if ('normed' in vargs):
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = ('density' if density else 'count')
cols = list(self._columns.values())
(_, bins) = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
(counts, _) = np.histogram(self[label], bins=bins, density=density)
binned[((label + ' ') + tag)] = np.append(counts, 0)
return binned | Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function. | digital-assyriology-review/datascience/tables.py | bin | ds-modules/NESTUD-190A | 6 | python | def bin(self, *columns, **vargs):
'Group values by bin and compute counts per bin by column.\n\n By default, bins are chosen to contain all values in all columns. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n If the original table has n columns, the resulting binned table has\n n+1 columns, where column 0 contains the lower bound of each bin.\n\n Args:\n ``columns`` (str or int): Labels or indices of columns to be\n binned. If empty, all columns are binned.\n\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``density`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is the value of the\n probability density function at the bin, normalized such that\n the integral over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability mass function.\n '
if columns:
self = self.select(*columns)
if ('normed' in vargs):
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = ('density' if density else 'count')
cols = list(self._columns.values())
(_, bins) = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
(counts, _) = np.histogram(self[label], bins=bins, density=density)
binned[((label + ' ') + tag)] = np.append(counts, 0)
return binned | def bin(self, *columns, **vargs):
'Group values by bin and compute counts per bin by column.\n\n By default, bins are chosen to contain all values in all columns. The\n following named arguments from numpy.histogram can be applied to\n specialize bin widths:\n\n If the original table has n columns, the resulting binned table has\n n+1 columns, where column 0 contains the lower bound of each bin.\n\n Args:\n ``columns`` (str or int): Labels or indices of columns to be\n binned. If empty, all columns are binned.\n\n ``bins`` (int or sequence of scalars): If bins is an int,\n it defines the number of equal-width bins in the given range\n (10, by default). If bins is a sequence, it defines the bin\n edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n ``range`` ((float, float)): The lower and upper range of\n the bins. If not provided, range contains all values in the\n table. Values outside the range are ignored.\n\n ``density`` (bool): If False, the result will contain the number of\n samples in each bin. If True, the result is the value of the\n probability density function at the bin, normalized such that\n the integral over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability mass function.\n '
if columns:
self = self.select(*columns)
if ('normed' in vargs):
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = ('density' if density else 'count')
cols = list(self._columns.values())
(_, bins) = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
(counts, _) = np.histogram(self[label], bins=bins, density=density)
binned[((label + ' ') + tag)] = np.append(counts, 0)
return binned<|docstring|>Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.<|endoftext|> |
8350209e3aa1d1c06f68b4171000597f82ef64e7335cf1d54924057fdb544ce6 | def show(self, max_rows=0):
'Display the table.'
IPython.display.display(IPython.display.HTML(self.as_html(max_rows))) | Display the table. | digital-assyriology-review/datascience/tables.py | show | ds-modules/NESTUD-190A | 6 | python | def show(self, max_rows=0):
IPython.display.display(IPython.display.HTML(self.as_html(max_rows))) | def show(self, max_rows=0):
IPython.display.display(IPython.display.HTML(self.as_html(max_rows)))<|docstring|>Display the table.<|endoftext|> |
ce851ef84aab07e31baee4a18b9fec237f365340b26a588051df4c0ab782191c | @staticmethod
def _use_html_if_available(format_fn):
"Use the value's HTML rendering if available, overriding format_fn."
def format_using_as_html(v, label=False):
if ((not label) and hasattr(v, 'as_html')):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html | Use the value's HTML rendering if available, overriding format_fn. | digital-assyriology-review/datascience/tables.py | _use_html_if_available | ds-modules/NESTUD-190A | 6 | python | @staticmethod
def _use_html_if_available(format_fn):
def format_using_as_html(v, label=False):
if ((not label) and hasattr(v, 'as_html')):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html | @staticmethod
def _use_html_if_available(format_fn):
def format_using_as_html(v, label=False):
if ((not label) and hasattr(v, 'as_html')):
return v.as_html()
else:
return format_fn(v, label)
return format_using_as_html<|docstring|>Use the value's HTML rendering if available, overriding format_fn.<|endoftext|> |
e2719443b3a104035a72086be130c4feee5df9add816fe2308b8500aef8547d4 | def _get_column_formatters(self, max_rows, as_html):
'Return one value formatting function per column.\n\n Each function has the signature f(value, label=False) -> str\n '
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for (k, v) in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts | Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str | digital-assyriology-review/datascience/tables.py | _get_column_formatters | ds-modules/NESTUD-190A | 6 | python | def _get_column_formatters(self, max_rows, as_html):
'Return one value formatting function per column.\n\n Each function has the signature f(value, label=False) -> str\n '
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for (k, v) in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts | def _get_column_formatters(self, max_rows, as_html):
'Return one value formatting function per column.\n\n Each function has the signature f(value, label=False) -> str\n '
formats = {s: self._formats.get(s, self.formatter) for s in self.labels}
cols = self._columns.items()
fmts = [formats[k].format_column(k, v[:max_rows]) for (k, v) in cols]
if as_html:
fmts = list(map(type(self)._use_html_if_available, fmts))
return fmts<|docstring|>Return one value formatting function per column.
Each function has the signature f(value, label=False) -> str<|endoftext|> |
2b206f16e2f120d2fbe54b6759ca9ab5b9261b850688c2081be1bdc7df6a5c35 | def as_text(self, max_rows=0, sep=' | '):
'Format table as text.'
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for (fmt, label) in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for (v, f) in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines]) | Format table as text. | digital-assyriology-review/datascience/tables.py | as_text | ds-modules/NESTUD-190A | 6 | python | def as_text(self, max_rows=0, sep=' | '):
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for (fmt, label) in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for (v, f) in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines]) | def as_text(self, max_rows=0, sep=' | '):
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self._columns.keys()
fmts = self._get_column_formatters(max_rows, False)
rows = [[fmt(label, label=True) for (fmt, label) in zip(fmts, labels)]]
for row in itertools.islice(self.rows, max_rows):
rows.append([f(v, label=False) for (v, f) in zip(row, fmts)])
lines = [sep.join(row) for row in rows]
if omitted:
lines.append('... ({} rows omitted)'.format(omitted))
return '\n'.join([line.rstrip() for line in lines])<|docstring|>Format table as text.<|endoftext|> |
f34ed48555ea0f5343106d1f68d4828e6474f19bcf4d756ec4ccc0af8ff65b50 | def as_html(self, max_rows=0):
'Format table as HTML.'
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self.labels
lines = [(0, '<table border="1" class="dataframe">'), (1, '<thead>'), (2, '<tr>'), (3, ' '.join(((('<th>' + label) + '</th>') for label in labels))), (2, '</tr>'), (1, '</thead>'), (1, '<tbody>')]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [(2, '<tr>'), (3, ' '.join(((('<td>' + fmt(v, label=False)) + '</td>') for (v, fmt) in zip(row, fmts)))), (2, '</tr>'), (1, '</tbody>')]
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))
return '\n'.join(((((4 * indent) * ' ') + text) for (indent, text) in lines)) | Format table as HTML. | digital-assyriology-review/datascience/tables.py | as_html | ds-modules/NESTUD-190A | 6 | python | def as_html(self, max_rows=0):
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self.labels
lines = [(0, '<table border="1" class="dataframe">'), (1, '<thead>'), (2, '<tr>'), (3, ' '.join(((('<th>' + label) + '</th>') for label in labels))), (2, '</tr>'), (1, '</thead>'), (1, '<tbody>')]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [(2, '<tr>'), (3, ' '.join(((('<td>' + fmt(v, label=False)) + '</td>') for (v, fmt) in zip(row, fmts)))), (2, '</tr>'), (1, '</tbody>')]
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))
return '\n'.join(((((4 * indent) * ' ') + text) for (indent, text) in lines)) | def as_html(self, max_rows=0):
if ((not max_rows) or (max_rows > self.num_rows)):
max_rows = self.num_rows
omitted = max(0, (self.num_rows - max_rows))
labels = self.labels
lines = [(0, '<table border="1" class="dataframe">'), (1, '<thead>'), (2, '<tr>'), (3, ' '.join(((('<th>' + label) + '</th>') for label in labels))), (2, '</tr>'), (1, '</thead>'), (1, '<tbody>')]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [(2, '<tr>'), (3, ' '.join(((('<td>' + fmt(v, label=False)) + '</td>') for (v, fmt) in zip(row, fmts)))), (2, '</tr>'), (1, '</tbody>')]
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))
return '\n'.join(((((4 * indent) * ' ') + text) for (indent, text) in lines))<|docstring|>Format table as HTML.<|endoftext|> |
09959eccb0f1ea48c4e6a23bbb4a5fdd71dc2c7efaf3b70b4b09f8b62afe83f2 | def index_by(self, column_or_label):
'Return a dict keyed by values in a column that contains lists of\n rows corresponding to each value.\n '
column = self._get_column(column_or_label)
index = {}
for (key, row) in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | Return a dict keyed by values in a column that contains lists of
rows corresponding to each value. | digital-assyriology-review/datascience/tables.py | index_by | ds-modules/NESTUD-190A | 6 | python | def index_by(self, column_or_label):
'Return a dict keyed by values in a column that contains lists of\n rows corresponding to each value.\n '
column = self._get_column(column_or_label)
index = {}
for (key, row) in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | def index_by(self, column_or_label):
'Return a dict keyed by values in a column that contains lists of\n rows corresponding to each value.\n '
column = self._get_column(column_or_label)
index = {}
for (key, row) in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index<|docstring|>Return a dict keyed by values in a column that contains lists of
rows corresponding to each value.<|endoftext|> |
7f63f35685aa3d4e466bfe32c96e97c326ac665860b10260babc26d00958bf49 | def to_df(self):
'Convert the table to a Pandas DataFrame.'
return pandas.DataFrame(self._columns) | Convert the table to a Pandas DataFrame. | digital-assyriology-review/datascience/tables.py | to_df | ds-modules/NESTUD-190A | 6 | python | def to_df(self):
return pandas.DataFrame(self._columns) | def to_df(self):
return pandas.DataFrame(self._columns)<|docstring|>Convert the table to a Pandas DataFrame.<|endoftext|> |
843defce53fed80c3a5fb9287ab6820a5a649142615e4c37ddfd5bf605e14715 | def to_csv(self, filename):
"Creates a CSV file with the provided filename.\n\n The CSV is created in such a way that if we run\n ``table.to_csv('my_table.csv')`` we can recreate the same table with\n ``Table.read_table('my_table.csv')``.\n\n Args:\n ``filename`` (str): The filename of the output CSV file.\n\n Returns:\n None, outputs a file with name ``filename``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.to_csv('my_table.csv') # doctest: +SKIP\n <outputs a file called my_table.csv in the current directory>\n "
self.to_df().to_csv(filename, index=False) | Creates a CSV file with the provided filename.
The CSV is created in such a way that if we run
``table.to_csv('my_table.csv')`` we can recreate the same table with
``Table.read_table('my_table.csv')``.
Args:
``filename`` (str): The filename of the output CSV file.
Returns:
None, outputs a file with name ``filename``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.to_csv('my_table.csv') # doctest: +SKIP
<outputs a file called my_table.csv in the current directory> | digital-assyriology-review/datascience/tables.py | to_csv | ds-modules/NESTUD-190A | 6 | python | def to_csv(self, filename):
"Creates a CSV file with the provided filename.\n\n The CSV is created in such a way that if we run\n ``table.to_csv('my_table.csv')`` we can recreate the same table with\n ``Table.read_table('my_table.csv')``.\n\n Args:\n ``filename`` (str): The filename of the output CSV file.\n\n Returns:\n None, outputs a file with name ``filename``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.to_csv('my_table.csv') # doctest: +SKIP\n <outputs a file called my_table.csv in the current directory>\n "
self.to_df().to_csv(filename, index=False) | def to_csv(self, filename):
"Creates a CSV file with the provided filename.\n\n The CSV is created in such a way that if we run\n ``table.to_csv('my_table.csv')`` we can recreate the same table with\n ``Table.read_table('my_table.csv')``.\n\n Args:\n ``filename`` (str): The filename of the output CSV file.\n\n Returns:\n None, outputs a file with name ``filename``.\n\n >>> jobs = Table().with_columns(\n ... 'job', make_array('a', 'b', 'c', 'd'),\n ... 'wage', make_array(10, 20, 15, 8))\n >>> jobs\n job | wage\n a | 10\n b | 20\n c | 15\n d | 8\n >>> jobs.to_csv('my_table.csv') # doctest: +SKIP\n <outputs a file called my_table.csv in the current directory>\n "
self.to_df().to_csv(filename, index=False)<|docstring|>Creates a CSV file with the provided filename.
The CSV is created in such a way that if we run
``table.to_csv('my_table.csv')`` we can recreate the same table with
``Table.read_table('my_table.csv')``.
Args:
``filename`` (str): The filename of the output CSV file.
Returns:
None, outputs a file with name ``filename``.
>>> jobs = Table().with_columns(
... 'job', make_array('a', 'b', 'c', 'd'),
... 'wage', make_array(10, 20, 15, 8))
>>> jobs
job | wage
a | 10
b | 20
c | 15
d | 8
>>> jobs.to_csv('my_table.csv') # doctest: +SKIP
<outputs a file called my_table.csv in the current directory><|endoftext|> |
6f9bba8b4d4b79cfe266503bdf921fc69de0bfb4c9edd13331f1b174ea9839af | def to_array(self):
'Convert the table to a structured NumPy array.'
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr | Convert the table to a structured NumPy array. | digital-assyriology-review/datascience/tables.py | to_array | ds-modules/NESTUD-190A | 6 | python | def to_array(self):
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr | def to_array(self):
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr<|docstring|>Convert the table to a structured NumPy array.<|endoftext|> |
8eac5c0241ee895d6084e6515abb38c5bd183bfd6dac1fa6da08ac78fa50eefb | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"Plot line charts for the table.\n\n Args:\n column_for_xticks (``str/array``): A column containing x-axis labels\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each plot will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.plot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot\n for additional arguments that can be passed into vargs.\n Raises:\n ValueError -- Every selected column must be numerical.\n\n Returns:\n Returns a line plot (connected scatter). Each plot is labeled using\n the values in `column_for_xticks` and one plot is produced for all\n other columns in self (or for the columns designated by `select`).\n >>> table = Table().with_columns(\n ... 'days', make_array(0, 1, 2, 3, 4, 5),\n ... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),\n ... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))\n >>> table\n days | price | projection\n 0 | 90.5 | 90.75\n 1 | 90 | 82\n 2 | 83 | 82.5\n 3 | 95.5 | 82.5\n 4 | 82 | 83\n 5 | 82 | 82.5\n >>> table.plot('days') # doctest: +SKIP\n <line graph with days as x-axis and lines for price and projection>\n >>> table.plot('days', overlay=False) # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n <line graph with days as x-axis and line for projection>\n >>> table.plot('days', 'price') # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n "
options = self.default_options.copy()
options.update(vargs)
if (column_for_xticks is not None):
(x_data, y_labels) = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
(x_data, y_labels) = (None, self.labels)
x_label = None
if (select is not None):
y_labels = self._as_labels(select)
if (x_data is not None):
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if (x_data is None):
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price> | digital-assyriology-review/datascience/tables.py | plot | ds-modules/NESTUD-190A | 6 | python | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"Plot line charts for the table.\n\n Args:\n column_for_xticks (``str/array``): A column containing x-axis labels\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each plot will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.plot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot\n for additional arguments that can be passed into vargs.\n Raises:\n ValueError -- Every selected column must be numerical.\n\n Returns:\n Returns a line plot (connected scatter). Each plot is labeled using\n the values in `column_for_xticks` and one plot is produced for all\n other columns in self (or for the columns designated by `select`).\n >>> table = Table().with_columns(\n ... 'days', make_array(0, 1, 2, 3, 4, 5),\n ... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),\n ... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))\n >>> table\n days | price | projection\n 0 | 90.5 | 90.75\n 1 | 90 | 82\n 2 | 83 | 82.5\n 3 | 95.5 | 82.5\n 4 | 82 | 83\n 5 | 82 | 82.5\n >>> table.plot('days') # doctest: +SKIP\n <line graph with days as x-axis and lines for price and projection>\n >>> table.plot('days', overlay=False) # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n <line graph with days as x-axis and line for projection>\n >>> table.plot('days', 'price') # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n "
options = self.default_options.copy()
options.update(vargs)
if (column_for_xticks is not None):
(x_data, y_labels) = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
(x_data, y_labels) = (None, self.labels)
x_label = None
if (select is not None):
y_labels = self._as_labels(select)
if (x_data is not None):
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if (x_data is None):
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"Plot line charts for the table.\n\n Args:\n column_for_xticks (``str/array``): A column containing x-axis labels\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each plot will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.plot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot\n for additional arguments that can be passed into vargs.\n Raises:\n ValueError -- Every selected column must be numerical.\n\n Returns:\n Returns a line plot (connected scatter). Each plot is labeled using\n the values in `column_for_xticks` and one plot is produced for all\n other columns in self (or for the columns designated by `select`).\n >>> table = Table().with_columns(\n ... 'days', make_array(0, 1, 2, 3, 4, 5),\n ... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),\n ... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))\n >>> table\n days | price | projection\n 0 | 90.5 | 90.75\n 1 | 90 | 82\n 2 | 83 | 82.5\n 3 | 95.5 | 82.5\n 4 | 82 | 83\n 5 | 82 | 82.5\n >>> table.plot('days') # doctest: +SKIP\n <line graph with days as x-axis and lines for price and projection>\n >>> table.plot('days', overlay=False) # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n <line graph with days as x-axis and line for projection>\n >>> table.plot('days', 'price') # doctest: +SKIP\n <line graph with days as x-axis and line for price>\n "
options = self.default_options.copy()
options.update(vargs)
if (column_for_xticks is not None):
(x_data, y_labels) = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
(x_data, y_labels) = (None, self.labels)
x_label = None
if (select is not None):
y_labels = self._as_labels(select)
if (x_data is not None):
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if (x_data is None):
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height)<|docstring|>Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price><|endoftext|> |
1a382f10d132ef9366cebe707386ab7a40c6f1643eb4b6e01d90b31ad6d53c03 | def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
'Plot bar charts for the table.\n\n Each plot is labeled using the values in `column_for_categories` and\n one plot is produced for every other column (or for the columns\n designated by `select`).\n\n Every selected except column for `column_for_categories` must be numerical.\n\n Args:\n column_for_categories (str): A column containing x-axis categories\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.bar`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar\n for additional arguments that can be passed into vargs.\n '
options = self.default_options.copy()
options.update(vargs)
(xticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar((index - 0.5), self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None):
tick_labels = [(ticks[int(l)] if (0 <= l < len(ticks)) else '') for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height) | Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected except column for `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs. | digital-assyriology-review/datascience/tables.py | bar | ds-modules/NESTUD-190A | 6 | python | def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
'Plot bar charts for the table.\n\n Each plot is labeled using the values in `column_for_categories` and\n one plot is produced for every other column (or for the columns\n designated by `select`).\n\n Every selected except column for `column_for_categories` must be numerical.\n\n Args:\n column_for_categories (str): A column containing x-axis categories\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.bar`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar\n for additional arguments that can be passed into vargs.\n '
options = self.default_options.copy()
options.update(vargs)
(xticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar((index - 0.5), self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None):
tick_labels = [(ticks[int(l)] if (0 <= l < len(ticks)) else ) for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height) | def bar(self, column_for_categories=None, select=None, overlay=True, width=6, height=4, **vargs):
'Plot bar charts for the table.\n\n Each plot is labeled using the values in `column_for_categories` and\n one plot is produced for every other column (or for the columns\n designated by `select`).\n\n Every selected except column for `column_for_categories` must be numerical.\n\n Args:\n column_for_categories (str): A column containing x-axis categories\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.bar`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar\n for additional arguments that can be passed into vargs.\n '
options = self.default_options.copy()
options.update(vargs)
(xticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
index = np.arange(self.num_rows)
def draw(axis, label, color):
axis.bar((index - 0.5), self[label], 1.0, color=color, **options)
def annotate(axis, ticks):
if (ticks is not None):
tick_labels = [(ticks[int(l)] if (0 <= l < len(ticks)) else ) for l in axis.get_xticks()]
axis.set_xticklabels(tick_labels, stretch='ultra-condensed')
self._visualize(column_for_categories, labels, xticks, overlay, draw, annotate, width=width, height=height)<|docstring|>Plot bar charts for the table.
Each plot is labeled using the values in `column_for_categories` and
one plot is produced for every other column (or for the columns
designated by `select`).
Every selected except column for `column_for_categories` must be numerical.
Args:
column_for_categories (str): A column containing x-axis categories
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.bar`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
for additional arguments that can be passed into vargs.<|endoftext|> |
014cb6906b6c03f876ad514e76a91f91b758ce43b0b58401d688ecbc31491685 | def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"Plot horizontal bar charts for the table.\n\n Args:\n ``column_for_categories`` (``str``): A column containing y-axis categories\n used to create buckets for bar chart.\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.barh`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh\n for additional arguments that can be passed into vargs.\n\n Raises:\n ValueError -- Every selected except column for ``column_for_categories``\n must be numerical.\n\n Returns:\n Horizontal bar graph with buckets specified by ``column_for_categories``.\n Each plot is labeled using the values in ``column_for_categories``\n and one plot is produced for every other column (or for the columns\n designated by ``select``).\n\n >>> t = Table().with_columns(\n ... 'Furniture', make_array('chairs', 'tables', 'desks'),\n ... 'Count', make_array(6, 1, 2),\n ... 'Price', make_array(10, 20, 30)\n ... )\n >>> t\n Furniture | Count | Price\n chairs | 6 | 10\n tables | 1 | 20\n desks | 2 | 30\n >>> furniture_table.barh('Furniture') # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n >>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP\n <bar graph with furniture as categories and bars for price>\n >>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n "
options = self.default_options.copy()
options.update(vargs)
(yticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = (1 - (2 * margin))
if overlay:
bwidth /= len(labels)
if ('height' in options):
height = options.pop('height')
else:
height = max(4, (len(index) / 2))
def draw(axis, label, color):
if overlay:
ypos = ((index + margin) + (((1 - (2 * margin)) * ((n - 1) - labels.index(label))) / n))
else:
ypos = index
axis.barh(ypos, self[label][::(- 1)], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks((index + 0.5))
axis.set_yticklabels(ticks[::(- 1)], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize('', labels, yticks, overlay, draw, annotate, width=width, height=height) | Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price> | digital-assyriology-review/datascience/tables.py | barh | ds-modules/NESTUD-190A | 6 | python | def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"Plot horizontal bar charts for the table.\n\n Args:\n ``column_for_categories`` (``str``): A column containing y-axis categories\n used to create buckets for bar chart.\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.barh`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh\n for additional arguments that can be passed into vargs.\n\n Raises:\n ValueError -- Every selected except column for ``column_for_categories``\n must be numerical.\n\n Returns:\n Horizontal bar graph with buckets specified by ``column_for_categories``.\n Each plot is labeled using the values in ``column_for_categories``\n and one plot is produced for every other column (or for the columns\n designated by ``select``).\n\n >>> t = Table().with_columns(\n ... 'Furniture', make_array('chairs', 'tables', 'desks'),\n ... 'Count', make_array(6, 1, 2),\n ... 'Price', make_array(10, 20, 30)\n ... )\n >>> t\n Furniture | Count | Price\n chairs | 6 | 10\n tables | 1 | 20\n desks | 2 | 30\n >>> furniture_table.barh('Furniture') # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n >>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP\n <bar graph with furniture as categories and bars for price>\n >>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n "
options = self.default_options.copy()
options.update(vargs)
(yticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = (1 - (2 * margin))
if overlay:
bwidth /= len(labels)
if ('height' in options):
height = options.pop('height')
else:
height = max(4, (len(index) / 2))
def draw(axis, label, color):
if overlay:
ypos = ((index + margin) + (((1 - (2 * margin)) * ((n - 1) - labels.index(label))) / n))
else:
ypos = index
axis.barh(ypos, self[label][::(- 1)], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks((index + 0.5))
axis.set_yticklabels(ticks[::(- 1)], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize(, labels, yticks, overlay, draw, annotate, width=width, height=height) | def barh(self, column_for_categories=None, select=None, overlay=True, width=6, **vargs):
"Plot horizontal bar charts for the table.\n\n Args:\n ``column_for_categories`` (``str``): A column containing y-axis categories\n used to create buckets for bar chart.\n\n Kwargs:\n overlay (bool): create a chart with one color per data column;\n if False, each will be displayed separately.\n\n vargs: Additional arguments that get passed into `plt.barh`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh\n for additional arguments that can be passed into vargs.\n\n Raises:\n ValueError -- Every selected except column for ``column_for_categories``\n must be numerical.\n\n Returns:\n Horizontal bar graph with buckets specified by ``column_for_categories``.\n Each plot is labeled using the values in ``column_for_categories``\n and one plot is produced for every other column (or for the columns\n designated by ``select``).\n\n >>> t = Table().with_columns(\n ... 'Furniture', make_array('chairs', 'tables', 'desks'),\n ... 'Count', make_array(6, 1, 2),\n ... 'Price', make_array(10, 20, 30)\n ... )\n >>> t\n Furniture | Count | Price\n chairs | 6 | 10\n tables | 1 | 20\n desks | 2 | 30\n >>> furniture_table.barh('Furniture') # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n >>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP\n <bar graph with furniture as categories and bars for price>\n >>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP\n <bar graph with furniture as categories and bars for count and price>\n "
options = self.default_options.copy()
options.update(vargs)
(yticks, labels) = self._split_column_and_labels(column_for_categories)
if (select is not None):
labels = self._as_labels(select)
n = len(labels)
index = np.arange(self.num_rows)
margin = 0.1
bwidth = (1 - (2 * margin))
if overlay:
bwidth /= len(labels)
if ('height' in options):
height = options.pop('height')
else:
height = max(4, (len(index) / 2))
def draw(axis, label, color):
if overlay:
ypos = ((index + margin) + (((1 - (2 * margin)) * ((n - 1) - labels.index(label))) / n))
else:
ypos = index
axis.barh(ypos, self[label][::(- 1)], bwidth, color=color, **options)
ylabel = self._as_label(column_for_categories)
def annotate(axis, ticks):
axis.set_yticks((index + 0.5))
axis.set_yticklabels(ticks[::(- 1)], stretch='ultra-condensed')
axis.set_xlabel(axis.get_ylabel())
axis.set_ylabel(ylabel)
self._visualize(, labels, yticks, overlay, draw, annotate, width=width, height=height)<|docstring|>Plot horizontal bar charts for the table.
Args:
``column_for_categories`` (``str``): A column containing y-axis categories
used to create buckets for bar chart.
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each will be displayed separately.
vargs: Additional arguments that get passed into `plt.barh`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected except column for ``column_for_categories``
must be numerical.
Returns:
Horizontal bar graph with buckets specified by ``column_for_categories``.
Each plot is labeled using the values in ``column_for_categories``
and one plot is produced for every other column (or for the columns
designated by ``select``).
>>> t = Table().with_columns(
... 'Furniture', make_array('chairs', 'tables', 'desks'),
... 'Count', make_array(6, 1, 2),
... 'Price', make_array(10, 20, 30)
... )
>>> t
Furniture | Count | Price
chairs | 6 | 10
tables | 1 | 20
desks | 2 | 30
>>> furniture_table.barh('Furniture') # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price>
>>> furniture_table.barh('Furniture', 'Price') # doctest: +SKIP
<bar graph with furniture as categories and bars for price>
>>> furniture_table.barh('Furniture', make_array(1, 2)) # doctest: +SKIP
<bar graph with furniture as categories and bars for count and price><|endoftext|> |
75ffa986ddc648f85cca2285e15b3e437145846288961f04fd8f5230c25e3a3f | def scatter(self, column_for_x, select=None, overlay=True, fit_line=False, colors=None, labels=None, sizes=None, width=5, height=5, s=20, **vargs):
"Creates scatterplots, optionally adding a line of best fit.\n\n Args:\n ``column_for_x`` (``str``): The column to use for the x-axis values\n and label of the scatter plots.\n\n Kwargs:\n ``overlay`` (``bool``): If true, creates a chart with one color\n per data column; if False, each plot will be displayed separately.\n\n ``fit_line`` (``bool``): draw a line of best fit for each set of points.\n\n ``vargs``: Additional arguments that get passed into `plt.scatter`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n for additional arguments that can be passed into vargs. These\n include: `marker` and `norm`, to name a couple.\n\n ``colors``: A column of categories to be used for coloring dots.\n\n ``labels``: A column of text labels to annotate dots.\n\n ``sizes``: A column of values to set the relative areas of dots.\n\n ``s``: Size of dots. If sizes is also provided, then dots will be\n in the range 0 to 2 * s.\n\n Raises:\n ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical\n\n Returns:\n Scatter plot of values of ``column_for_x`` plotted against\n values for all other columns in self. Each plot uses the values in\n `column_for_x` for horizontal positions. One plot is produced for\n all other columns in self as y (or for the columns designated by\n `select`).\n\n\n >>> table = Table().with_columns(\n ... 'x', make_array(9, 3, 3, 1),\n ... 'y', make_array(1, 2, 2, 10),\n ... 'z', make_array(3, 4, 5, 6))\n >>> table\n x | y | z\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table.scatter('x') # doctest: +SKIP\n <scatterplot of values in y and z on x>\n\n >>> table.scatter('x', overlay=False) # doctest: +SKIP\n <scatterplot of values in y on x>\n <scatterplot of values in z on x>\n\n >>> table.scatter('x', fit_line=True) # doctest: +SKIP\n <scatterplot of values in y and z on x with lines of best fit>\n "
options = self.default_options.copy()
options.update(vargs)
(x_data, y_labels) = self._split_column_and_labels(column_for_x)
if (colors is not None):
y_labels.remove(self._as_label(colors))
if (sizes is not None):
y_labels.remove(self._as_label(sizes))
if (select is not None):
y_labels = self._as_labels(select)
if ((len(y_labels) > 1) and (colors is not None) and overlay):
warnings.warn('Colors and overlay are incompatible in a scatter')
overlay = False
def draw(axis, label, color):
if (colors is not None):
colored = sorted(np.unique(self.column(colors)))
color_list = list(itertools.islice(itertools.cycle(self.chart_colors), len(colored)))
color_map = collections.OrderedDict(zip(colored, color_list))
color = [color_map[x] for x in self.column(colors)]
elif ('color' in options):
color = options.pop('color')
y_data = self[label]
if (sizes is not None):
max_size = (max(self[sizes]) ** 0.5)
size = (((2 * s) * (self[sizes] ** 0.5)) / max_size)
else:
size = s
axis.scatter(x_data, y_data, color=color, s=size, **options)
if fit_line:
(m, b) = np.polyfit(x_data, self[label], 1)
(minx, maxx) = (np.min(x_data), np.max(x_data))
axis.plot([minx, maxx], [((m * minx) + b), ((m * maxx) + b)], color=color)
if (labels is not None):
for (x, y, label) in zip(x_data, y_data, self[labels]):
axis.annotate(label, (x, y), xytext=((- 20), 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.7), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0', color='black'))
if (colors is not None):
import matplotlib.patches as mpatches
patches = [mpatches.Patch(color=c, label=v) for (v, c) in color_map.items()]
axis.legend(loc=2, bbox_to_anchor=(1.05, 1), handles=patches)
x_label = self._as_label(column_for_x)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | Creates scatterplots, optionally adding a line of best fit.
Args:
``column_for_x`` (``str``): The column to use for the x-axis values
and label of the scatter plots.
Kwargs:
``overlay`` (``bool``): If true, creates a chart with one color
per data column; if False, each plot will be displayed separately.
``fit_line`` (``bool``): draw a line of best fit for each set of points.
``vargs``: Additional arguments that get passed into `plt.scatter`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
for additional arguments that can be passed into vargs. These
include: `marker` and `norm`, to name a couple.
``colors``: A column of categories to be used for coloring dots.
``labels``: A column of text labels to annotate dots.
``sizes``: A column of values to set the relative areas of dots.
``s``: Size of dots. If sizes is also provided, then dots will be
in the range 0 to 2 * s.
Raises:
ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical
Returns:
Scatter plot of values of ``column_for_x`` plotted against
values for all other columns in self. Each plot uses the values in
`column_for_x` for horizontal positions. One plot is produced for
all other columns in self as y (or for the columns designated by
`select`).
>>> table = Table().with_columns(
... 'x', make_array(9, 3, 3, 1),
... 'y', make_array(1, 2, 2, 10),
... 'z', make_array(3, 4, 5, 6))
>>> table
x | y | z
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.scatter('x') # doctest: +SKIP
<scatterplot of values in y and z on x>
>>> table.scatter('x', overlay=False) # doctest: +SKIP
<scatterplot of values in y on x>
<scatterplot of values in z on x>
>>> table.scatter('x', fit_line=True) # doctest: +SKIP
<scatterplot of values in y and z on x with lines of best fit> | digital-assyriology-review/datascience/tables.py | scatter | ds-modules/NESTUD-190A | 6 | python | def scatter(self, column_for_x, select=None, overlay=True, fit_line=False, colors=None, labels=None, sizes=None, width=5, height=5, s=20, **vargs):
"Creates scatterplots, optionally adding a line of best fit.\n\n Args:\n ``column_for_x`` (``str``): The column to use for the x-axis values\n and label of the scatter plots.\n\n Kwargs:\n ``overlay`` (``bool``): If true, creates a chart with one color\n per data column; if False, each plot will be displayed separately.\n\n ``fit_line`` (``bool``): draw a line of best fit for each set of points.\n\n ``vargs``: Additional arguments that get passed into `plt.scatter`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n for additional arguments that can be passed into vargs. These\n include: `marker` and `norm`, to name a couple.\n\n ``colors``: A column of categories to be used for coloring dots.\n\n ``labels``: A column of text labels to annotate dots.\n\n ``sizes``: A column of values to set the relative areas of dots.\n\n ``s``: Size of dots. If sizes is also provided, then dots will be\n in the range 0 to 2 * s.\n\n Raises:\n ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical\n\n Returns:\n Scatter plot of values of ``column_for_x`` plotted against\n values for all other columns in self. Each plot uses the values in\n `column_for_x` for horizontal positions. One plot is produced for\n all other columns in self as y (or for the columns designated by\n `select`).\n\n\n >>> table = Table().with_columns(\n ... 'x', make_array(9, 3, 3, 1),\n ... 'y', make_array(1, 2, 2, 10),\n ... 'z', make_array(3, 4, 5, 6))\n >>> table\n x | y | z\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table.scatter('x') # doctest: +SKIP\n <scatterplot of values in y and z on x>\n\n >>> table.scatter('x', overlay=False) # doctest: +SKIP\n <scatterplot of values in y on x>\n <scatterplot of values in z on x>\n\n >>> table.scatter('x', fit_line=True) # doctest: +SKIP\n <scatterplot of values in y and z on x with lines of best fit>\n "
options = self.default_options.copy()
options.update(vargs)
(x_data, y_labels) = self._split_column_and_labels(column_for_x)
if (colors is not None):
y_labels.remove(self._as_label(colors))
if (sizes is not None):
y_labels.remove(self._as_label(sizes))
if (select is not None):
y_labels = self._as_labels(select)
if ((len(y_labels) > 1) and (colors is not None) and overlay):
warnings.warn('Colors and overlay are incompatible in a scatter')
overlay = False
def draw(axis, label, color):
if (colors is not None):
colored = sorted(np.unique(self.column(colors)))
color_list = list(itertools.islice(itertools.cycle(self.chart_colors), len(colored)))
color_map = collections.OrderedDict(zip(colored, color_list))
color = [color_map[x] for x in self.column(colors)]
elif ('color' in options):
color = options.pop('color')
y_data = self[label]
if (sizes is not None):
max_size = (max(self[sizes]) ** 0.5)
size = (((2 * s) * (self[sizes] ** 0.5)) / max_size)
else:
size = s
axis.scatter(x_data, y_data, color=color, s=size, **options)
if fit_line:
(m, b) = np.polyfit(x_data, self[label], 1)
(minx, maxx) = (np.min(x_data), np.max(x_data))
axis.plot([minx, maxx], [((m * minx) + b), ((m * maxx) + b)], color=color)
if (labels is not None):
for (x, y, label) in zip(x_data, y_data, self[labels]):
axis.annotate(label, (x, y), xytext=((- 20), 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.7), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0', color='black'))
if (colors is not None):
import matplotlib.patches as mpatches
patches = [mpatches.Patch(color=c, label=v) for (v, c) in color_map.items()]
axis.legend(loc=2, bbox_to_anchor=(1.05, 1), handles=patches)
x_label = self._as_label(column_for_x)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | def scatter(self, column_for_x, select=None, overlay=True, fit_line=False, colors=None, labels=None, sizes=None, width=5, height=5, s=20, **vargs):
"Creates scatterplots, optionally adding a line of best fit.\n\n Args:\n ``column_for_x`` (``str``): The column to use for the x-axis values\n and label of the scatter plots.\n\n Kwargs:\n ``overlay`` (``bool``): If true, creates a chart with one color\n per data column; if False, each plot will be displayed separately.\n\n ``fit_line`` (``bool``): draw a line of best fit for each set of points.\n\n ``vargs``: Additional arguments that get passed into `plt.scatter`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n for additional arguments that can be passed into vargs. These\n include: `marker` and `norm`, to name a couple.\n\n ``colors``: A column of categories to be used for coloring dots.\n\n ``labels``: A column of text labels to annotate dots.\n\n ``sizes``: A column of values to set the relative areas of dots.\n\n ``s``: Size of dots. If sizes is also provided, then dots will be\n in the range 0 to 2 * s.\n\n Raises:\n ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical\n\n Returns:\n Scatter plot of values of ``column_for_x`` plotted against\n values for all other columns in self. Each plot uses the values in\n `column_for_x` for horizontal positions. One plot is produced for\n all other columns in self as y (or for the columns designated by\n `select`).\n\n\n >>> table = Table().with_columns(\n ... 'x', make_array(9, 3, 3, 1),\n ... 'y', make_array(1, 2, 2, 10),\n ... 'z', make_array(3, 4, 5, 6))\n >>> table\n x | y | z\n 9 | 1 | 3\n 3 | 2 | 4\n 3 | 2 | 5\n 1 | 10 | 6\n >>> table.scatter('x') # doctest: +SKIP\n <scatterplot of values in y and z on x>\n\n >>> table.scatter('x', overlay=False) # doctest: +SKIP\n <scatterplot of values in y on x>\n <scatterplot of values in z on x>\n\n >>> table.scatter('x', fit_line=True) # doctest: +SKIP\n <scatterplot of values in y and z on x with lines of best fit>\n "
options = self.default_options.copy()
options.update(vargs)
(x_data, y_labels) = self._split_column_and_labels(column_for_x)
if (colors is not None):
y_labels.remove(self._as_label(colors))
if (sizes is not None):
y_labels.remove(self._as_label(sizes))
if (select is not None):
y_labels = self._as_labels(select)
if ((len(y_labels) > 1) and (colors is not None) and overlay):
warnings.warn('Colors and overlay are incompatible in a scatter')
overlay = False
def draw(axis, label, color):
if (colors is not None):
colored = sorted(np.unique(self.column(colors)))
color_list = list(itertools.islice(itertools.cycle(self.chart_colors), len(colored)))
color_map = collections.OrderedDict(zip(colored, color_list))
color = [color_map[x] for x in self.column(colors)]
elif ('color' in options):
color = options.pop('color')
y_data = self[label]
if (sizes is not None):
max_size = (max(self[sizes]) ** 0.5)
size = (((2 * s) * (self[sizes] ** 0.5)) / max_size)
else:
size = s
axis.scatter(x_data, y_data, color=color, s=size, **options)
if fit_line:
(m, b) = np.polyfit(x_data, self[label], 1)
(minx, maxx) = (np.min(x_data), np.max(x_data))
axis.plot([minx, maxx], [((m * minx) + b), ((m * maxx) + b)], color=color)
if (labels is not None):
for (x, y, label) in zip(x_data, y_data, self[labels]):
axis.annotate(label, (x, y), xytext=((- 20), 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.7), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0', color='black'))
if (colors is not None):
import matplotlib.patches as mpatches
patches = [mpatches.Patch(color=c, label=v) for (v, c) in color_map.items()]
axis.legend(loc=2, bbox_to_anchor=(1.05, 1), handles=patches)
x_label = self._as_label(column_for_x)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height)<|docstring|>Creates scatterplots, optionally adding a line of best fit.
Args:
``column_for_x`` (``str``): The column to use for the x-axis values
and label of the scatter plots.
Kwargs:
``overlay`` (``bool``): If true, creates a chart with one color
per data column; if False, each plot will be displayed separately.
``fit_line`` (``bool``): draw a line of best fit for each set of points.
``vargs``: Additional arguments that get passed into `plt.scatter`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
for additional arguments that can be passed into vargs. These
include: `marker` and `norm`, to name a couple.
``colors``: A column of categories to be used for coloring dots.
``labels``: A column of text labels to annotate dots.
``sizes``: A column of values to set the relative areas of dots.
``s``: Size of dots. If sizes is also provided, then dots will be
in the range 0 to 2 * s.
Raises:
ValueError -- Every column, ``column_for_x`` or ``select``, must be numerical
Returns:
Scatter plot of values of ``column_for_x`` plotted against
values for all other columns in self. Each plot uses the values in
`column_for_x` for horizontal positions. One plot is produced for
all other columns in self as y (or for the columns designated by
`select`).
>>> table = Table().with_columns(
... 'x', make_array(9, 3, 3, 1),
... 'y', make_array(1, 2, 2, 10),
... 'z', make_array(3, 4, 5, 6))
>>> table
x | y | z
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
>>> table.scatter('x') # doctest: +SKIP
<scatterplot of values in y and z on x>
>>> table.scatter('x', overlay=False) # doctest: +SKIP
<scatterplot of values in y on x>
<scatterplot of values in z on x>
>>> table.scatter('x', fit_line=True) # doctest: +SKIP
<scatterplot of values in y and z on x with lines of best fit><|endoftext|> |
37f6c73f87293b0029305ffa5f88eb5b0905a4638f711e60325e91af25d49086 | def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
'Generic visualization that overlays or separates the draw function.\n\n Raises:\n ValueError: The Table contains non-numerical values in columns\n other than `column_for_categories`\n '
for label in y_labels:
if (not all((isinstance(x, numbers.Real) for x in self[label]))):
raise ValueError("The column '{0}' contains non-numerical values. A plot cannot be drawn for this column.".format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if (overlay and (n > 1)):
(_, axis) = plt.subplots(figsize=(width, height))
if (x_label is not None):
axis.set_xlabel(x_label)
for (label, color) in zip(y_labels, colors):
draw(axis, label, color)
if (ticks is not None):
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(fig, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if (not isinstance(axes, collections.Iterable)):
axes = [axes]
for (axis, y_label, color) in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if (x_label is not None):
axis.set_xlabel(x_label, fontsize=16)
if (ticks is not None):
annotate(axis, ticks)
type(self).plots.append(axis) | Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories` | digital-assyriology-review/datascience/tables.py | _visualize | ds-modules/NESTUD-190A | 6 | python | def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
'Generic visualization that overlays or separates the draw function.\n\n Raises:\n ValueError: The Table contains non-numerical values in columns\n other than `column_for_categories`\n '
for label in y_labels:
if (not all((isinstance(x, numbers.Real) for x in self[label]))):
raise ValueError("The column '{0}' contains non-numerical values. A plot cannot be drawn for this column.".format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if (overlay and (n > 1)):
(_, axis) = plt.subplots(figsize=(width, height))
if (x_label is not None):
axis.set_xlabel(x_label)
for (label, color) in zip(y_labels, colors):
draw(axis, label, color)
if (ticks is not None):
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(fig, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if (not isinstance(axes, collections.Iterable)):
axes = [axes]
for (axis, y_label, color) in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if (x_label is not None):
axis.set_xlabel(x_label, fontsize=16)
if (ticks is not None):
annotate(axis, ticks)
type(self).plots.append(axis) | def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):
'Generic visualization that overlays or separates the draw function.\n\n Raises:\n ValueError: The Table contains non-numerical values in columns\n other than `column_for_categories`\n '
for label in y_labels:
if (not all((isinstance(x, numbers.Real) for x in self[label]))):
raise ValueError("The column '{0}' contains non-numerical values. A plot cannot be drawn for this column.".format(label))
n = len(y_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if (overlay and (n > 1)):
(_, axis) = plt.subplots(figsize=(width, height))
if (x_label is not None):
axis.set_xlabel(x_label)
for (label, color) in zip(y_labels, colors):
draw(axis, label, color)
if (ticks is not None):
annotate(axis, ticks)
axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(fig, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if (not isinstance(axes, collections.Iterable)):
axes = [axes]
for (axis, y_label, color) in zip(axes, y_labels, colors):
draw(axis, y_label, color)
axis.set_ylabel(y_label, fontsize=16)
if (x_label is not None):
axis.set_xlabel(x_label, fontsize=16)
if (ticks is not None):
annotate(axis, ticks)
type(self).plots.append(axis)<|docstring|>Generic visualization that overlays or separates the draw function.
Raises:
ValueError: The Table contains non-numerical values in columns
other than `column_for_categories`<|endoftext|> |
15a48f1107e71731b29ff722d84a7535b4eab008d6ef35c01cfea9e91903cf6f | def _split_column_and_labels(self, column_or_label):
'Return the specified column and labels of other columns.'
column = (None if (column_or_label is None) else self._get_column(column_or_label))
labels = [label for (i, label) in enumerate(self.labels) if (column_or_label not in (i, label))]
return (column, labels) | Return the specified column and labels of other columns. | digital-assyriology-review/datascience/tables.py | _split_column_and_labels | ds-modules/NESTUD-190A | 6 | python | def _split_column_and_labels(self, column_or_label):
column = (None if (column_or_label is None) else self._get_column(column_or_label))
labels = [label for (i, label) in enumerate(self.labels) if (column_or_label not in (i, label))]
return (column, labels) | def _split_column_and_labels(self, column_or_label):
column = (None if (column_or_label is None) else self._get_column(column_or_label))
labels = [label for (i, label) in enumerate(self.labels) if (column_or_label not in (i, label))]
return (column, labels)<|docstring|>Return the specified column and labels of other columns.<|endoftext|> |
f1b21bac42707973ec79aafaff0b8a711241d6b8e18aa963167fdfc0f8cd4360 | def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
'Draw histograms of each category in a column.'
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where((self[pivot_column_label] == pivot))] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
(vals, bins, patches) = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
vals = []
bins = None
for (axis, label, column, color) in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
(avals, abins, patches) = axis.hist(column, color=color, bins=bins, **vargs)
else:
(avals, abins, patches) = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if (not isinstance(bins, np.ndarray)):
bins = abins
else:
assert (bins.all() == abins.all()), 'Inconsistent bins in hist'
t = type(self)()
t['start'] = bins[0:(- 1)]
t['end'] = bins[1:]
for (label, column) in zip(pvt_labels, vals):
t[label] = column | Draw histograms of each category in a column. | digital-assyriology-review/datascience/tables.py | pivot_hist | ds-modules/NESTUD-190A | 6 | python | def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where((self[pivot_column_label] == pivot))] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
(vals, bins, patches) = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
vals = []
bins = None
for (axis, label, column, color) in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
(avals, abins, patches) = axis.hist(column, color=color, bins=bins, **vargs)
else:
(avals, abins, patches) = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if (not isinstance(bins, np.ndarray)):
bins = abins
else:
assert (bins.all() == abins.all()), 'Inconsistent bins in hist'
t = type(self)()
t['start'] = bins[0:(- 1)]
t['end'] = bins[1:]
for (label, column) in zip(pvt_labels, vals):
t[label] = column | def pivot_hist(self, pivot_column_label, value_column_label, overlay=True, width=6, height=4, **vargs):
pvt_labels = np.unique(self[pivot_column_label])
pvt_columns = [self[value_column_label][np.where((self[pivot_column_label] == pivot))] for pivot in pvt_labels]
n = len(pvt_labels)
colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))
if overlay:
plt.figure(figsize=(width, height))
(vals, bins, patches) = plt.hist(pvt_columns, color=colors, **vargs)
plt.legend(pvt_labels)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
vals = []
bins = None
for (axis, label, column, color) in zip(axes, pvt_labels, pvt_columns, colors):
if isinstance(bins, np.ndarray):
(avals, abins, patches) = axis.hist(column, color=color, bins=bins, **vargs)
else:
(avals, abins, patches) = axis.hist(column, color=color, **vargs)
axis.set_xlabel(label, fontsize=16)
vals.append(avals)
if (not isinstance(bins, np.ndarray)):
bins = abins
else:
assert (bins.all() == abins.all()), 'Inconsistent bins in hist'
t = type(self)()
t['start'] = bins[0:(- 1)]
t['end'] = bins[1:]
for (label, column) in zip(pvt_labels, vals):
t[label] = column<|docstring|>Draw histograms of each category in a column.<|endoftext|> |
9403651e1a5828ae54235a792bcaba9cc40775a25f6220bb03cc19b9f067f20c | def hist(self, *columns, overlay=True, bins=None, bin_column=None, unit=None, counts=None, width=6, height=4, **vargs):
"Plots one histogram for each column in columns. If no column is\n specificed, plot all columns.\n\n Kwargs:\n overlay (bool): If True, plots 1 chart with all the histograms\n overlaid on top of each other (instead of the default behavior\n of one histogram for each column in the table). Also adds a\n legend that matches each bar color to its column.\n\n bins (list or int): Lower bound for each bin in the\n histogram or number of bins. If None, bins will\n be chosen automatically.\n\n bin_column (column name or index): A column of bin lower bounds.\n All other columns are treated as counts of these bins.\n If None, each value in each row is assigned a count of 1.\n\n counts (column name or index): Deprecated name for bin_column.\n\n vargs: Additional arguments that get passed into :func:plt.hist.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist\n for additional arguments that can be passed into vargs. These\n include: `range`, `normed`, `cumulative`, and `orientation`,\n to name a few.\n\n >>> t = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> t\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> t.hist() # doctest: +SKIP\n <histogram of values in count>\n <histogram of values in points>\n\n >>> t = Table().with_columns(\n ... 'value', make_array(101, 102, 103),\n ... 'proportion', make_array(0.25, 0.5, 0.25))\n >>> t.hist(bin_column='value') # doctest: +SKIP\n <histogram of values weighted by corresponding proportions>\n "
if ((counts is not None) and (bin_column is None)):
warnings.warn('counts arg of hist is deprecated; use bin_column')
bin_column = counts
if columns:
if (bin_column is not None):
columns = (list(columns) + [bin_column])
self = self.select(*columns)
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
if ((bin_column is not None) and (bins is None)):
bins = np.unique(self.column(bin_column))
if (bins is not None):
vargs['bins'] = bins
if ('normed' not in vargs):
vargs['normed'] = True
percentage = plt.FuncFormatter((lambda x, _: '{:g}'.format((100 * x))))
counted_values = counted_label = None
if (bin_column is not None):
counted_label = self._as_label(bin_column)
counted_values = self.column(counted_label)
columns.pop(counted_label)
n = len(columns)
colors = [(rgb_color + (self.default_alpha,)) for rgb_color in itertools.islice(itertools.cycle(self.chart_colors), n)]
if (overlay and (n > 1)):
column_keys = list(columns.keys())[::(- 1)]
values = list(columns.values())[::(- 1)]
colors = list(colors)[::(- 1)]
if (counted_values is not None):
vargs['weights'] = np.transpose(values)
values = np.repeat(counted_values, n).reshape((- 1), n)
vargs.setdefault('histtype', 'stepfilled')
figure = plt.figure(figsize=(width, height))
plt.hist(values, color=colors, **vargs)
axis = figure.get_axes()[0]
_vertical_x(axis)
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
if unit:
axis.set_xlabel((('(' + unit) + ')'), fontsize=16)
plt.legend(columns.keys(), loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if ((isinstance(bins, numbers.Integral) and (bins > 76)) or (hasattr(bins, '__len__') and (len(bins) > 76))):
vargs.setdefault('histtype', 'stepfilled')
if (n == 1):
axes = [axes]
for (axis, label, color) in zip(axes, columns.keys(), colors):
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
x_unit = (((' (' + unit) + ')') if unit else '')
if (counted_values is None):
values = columns[label]
axis.set_xlabel((label + x_unit), fontsize=16)
else:
values = counted_values
vargs['weights'] = columns[label]
axis.set_xlabel((label.rstrip(' count') + x_unit), fontsize=16)
axis.hist(values, color=color, **vargs)
_vertical_x(axis)
type(self).plots.append(axis) | Plots one histogram for each column in columns. If no column is
specificed, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column.
bins (list or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
counts (column name or index): Deprecated name for bin_column.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `normed`, `cumulative`, and `orientation`,
to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist() # doctest: +SKIP
<histogram of values in count>
<histogram of values in points>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'proportion', make_array(0.25, 0.5, 0.25))
>>> t.hist(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding proportions> | digital-assyriology-review/datascience/tables.py | hist | ds-modules/NESTUD-190A | 6 | python | def hist(self, *columns, overlay=True, bins=None, bin_column=None, unit=None, counts=None, width=6, height=4, **vargs):
"Plots one histogram for each column in columns. If no column is\n specificed, plot all columns.\n\n Kwargs:\n overlay (bool): If True, plots 1 chart with all the histograms\n overlaid on top of each other (instead of the default behavior\n of one histogram for each column in the table). Also adds a\n legend that matches each bar color to its column.\n\n bins (list or int): Lower bound for each bin in the\n histogram or number of bins. If None, bins will\n be chosen automatically.\n\n bin_column (column name or index): A column of bin lower bounds.\n All other columns are treated as counts of these bins.\n If None, each value in each row is assigned a count of 1.\n\n counts (column name or index): Deprecated name for bin_column.\n\n vargs: Additional arguments that get passed into :func:plt.hist.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist\n for additional arguments that can be passed into vargs. These\n include: `range`, `normed`, `cumulative`, and `orientation`,\n to name a few.\n\n >>> t = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> t\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> t.hist() # doctest: +SKIP\n <histogram of values in count>\n <histogram of values in points>\n\n >>> t = Table().with_columns(\n ... 'value', make_array(101, 102, 103),\n ... 'proportion', make_array(0.25, 0.5, 0.25))\n >>> t.hist(bin_column='value') # doctest: +SKIP\n <histogram of values weighted by corresponding proportions>\n "
if ((counts is not None) and (bin_column is None)):
warnings.warn('counts arg of hist is deprecated; use bin_column')
bin_column = counts
if columns:
if (bin_column is not None):
columns = (list(columns) + [bin_column])
self = self.select(*columns)
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
if ((bin_column is not None) and (bins is None)):
bins = np.unique(self.column(bin_column))
if (bins is not None):
vargs['bins'] = bins
if ('normed' not in vargs):
vargs['normed'] = True
percentage = plt.FuncFormatter((lambda x, _: '{:g}'.format((100 * x))))
counted_values = counted_label = None
if (bin_column is not None):
counted_label = self._as_label(bin_column)
counted_values = self.column(counted_label)
columns.pop(counted_label)
n = len(columns)
colors = [(rgb_color + (self.default_alpha,)) for rgb_color in itertools.islice(itertools.cycle(self.chart_colors), n)]
if (overlay and (n > 1)):
column_keys = list(columns.keys())[::(- 1)]
values = list(columns.values())[::(- 1)]
colors = list(colors)[::(- 1)]
if (counted_values is not None):
vargs['weights'] = np.transpose(values)
values = np.repeat(counted_values, n).reshape((- 1), n)
vargs.setdefault('histtype', 'stepfilled')
figure = plt.figure(figsize=(width, height))
plt.hist(values, color=colors, **vargs)
axis = figure.get_axes()[0]
_vertical_x(axis)
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
if unit:
axis.set_xlabel((('(' + unit) + ')'), fontsize=16)
plt.legend(columns.keys(), loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if ((isinstance(bins, numbers.Integral) and (bins > 76)) or (hasattr(bins, '__len__') and (len(bins) > 76))):
vargs.setdefault('histtype', 'stepfilled')
if (n == 1):
axes = [axes]
for (axis, label, color) in zip(axes, columns.keys(), colors):
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
x_unit = (((' (' + unit) + ')') if unit else )
if (counted_values is None):
values = columns[label]
axis.set_xlabel((label + x_unit), fontsize=16)
else:
values = counted_values
vargs['weights'] = columns[label]
axis.set_xlabel((label.rstrip(' count') + x_unit), fontsize=16)
axis.hist(values, color=color, **vargs)
_vertical_x(axis)
type(self).plots.append(axis) | def hist(self, *columns, overlay=True, bins=None, bin_column=None, unit=None, counts=None, width=6, height=4, **vargs):
"Plots one histogram for each column in columns. If no column is\n specificed, plot all columns.\n\n Kwargs:\n overlay (bool): If True, plots 1 chart with all the histograms\n overlaid on top of each other (instead of the default behavior\n of one histogram for each column in the table). Also adds a\n legend that matches each bar color to its column.\n\n bins (list or int): Lower bound for each bin in the\n histogram or number of bins. If None, bins will\n be chosen automatically.\n\n bin_column (column name or index): A column of bin lower bounds.\n All other columns are treated as counts of these bins.\n If None, each value in each row is assigned a count of 1.\n\n counts (column name or index): Deprecated name for bin_column.\n\n vargs: Additional arguments that get passed into :func:plt.hist.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist\n for additional arguments that can be passed into vargs. These\n include: `range`, `normed`, `cumulative`, and `orientation`,\n to name a few.\n\n >>> t = Table().with_columns(\n ... 'count', make_array(9, 3, 3, 1),\n ... 'points', make_array(1, 2, 2, 10))\n >>> t\n count | points\n 9 | 1\n 3 | 2\n 3 | 2\n 1 | 10\n >>> t.hist() # doctest: +SKIP\n <histogram of values in count>\n <histogram of values in points>\n\n >>> t = Table().with_columns(\n ... 'value', make_array(101, 102, 103),\n ... 'proportion', make_array(0.25, 0.5, 0.25))\n >>> t.hist(bin_column='value') # doctest: +SKIP\n <histogram of values weighted by corresponding proportions>\n "
if ((counts is not None) and (bin_column is None)):
warnings.warn('counts arg of hist is deprecated; use bin_column')
bin_column = counts
if columns:
if (bin_column is not None):
columns = (list(columns) + [bin_column])
self = self.select(*columns)
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
if ((bin_column is not None) and (bins is None)):
bins = np.unique(self.column(bin_column))
if (bins is not None):
vargs['bins'] = bins
if ('normed' not in vargs):
vargs['normed'] = True
percentage = plt.FuncFormatter((lambda x, _: '{:g}'.format((100 * x))))
counted_values = counted_label = None
if (bin_column is not None):
counted_label = self._as_label(bin_column)
counted_values = self.column(counted_label)
columns.pop(counted_label)
n = len(columns)
colors = [(rgb_color + (self.default_alpha,)) for rgb_color in itertools.islice(itertools.cycle(self.chart_colors), n)]
if (overlay and (n > 1)):
column_keys = list(columns.keys())[::(- 1)]
values = list(columns.values())[::(- 1)]
colors = list(colors)[::(- 1)]
if (counted_values is not None):
vargs['weights'] = np.transpose(values)
values = np.repeat(counted_values, n).reshape((- 1), n)
vargs.setdefault('histtype', 'stepfilled')
figure = plt.figure(figsize=(width, height))
plt.hist(values, color=colors, **vargs)
axis = figure.get_axes()[0]
_vertical_x(axis)
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
if unit:
axis.set_xlabel((('(' + unit) + ')'), fontsize=16)
plt.legend(columns.keys(), loc=2, bbox_to_anchor=(1.05, 1))
type(self).plots.append(axis)
else:
(_, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))
if ((isinstance(bins, numbers.Integral) and (bins > 76)) or (hasattr(bins, '__len__') and (len(bins) > 76))):
vargs.setdefault('histtype', 'stepfilled')
if (n == 1):
axes = [axes]
for (axis, label, color) in zip(axes, columns.keys(), colors):
if vargs['normed']:
axis.set_ylabel(('Percent per ' + (unit if unit else 'unit')))
axis.yaxis.set_major_formatter(percentage)
else:
axis.set_ylabel('Count')
x_unit = (((' (' + unit) + ')') if unit else )
if (counted_values is None):
values = columns[label]
axis.set_xlabel((label + x_unit), fontsize=16)
else:
values = counted_values
vargs['weights'] = columns[label]
axis.set_xlabel((label.rstrip(' count') + x_unit), fontsize=16)
axis.hist(values, color=color, **vargs)
_vertical_x(axis)
type(self).plots.append(axis)<|docstring|>Plots one histogram for each column in columns. If no column is
specificed, plot all columns.
Kwargs:
overlay (bool): If True, plots 1 chart with all the histograms
overlaid on top of each other (instead of the default behavior
of one histogram for each column in the table). Also adds a
legend that matches each bar color to its column.
bins (list or int): Lower bound for each bin in the
histogram or number of bins. If None, bins will
be chosen automatically.
bin_column (column name or index): A column of bin lower bounds.
All other columns are treated as counts of these bins.
If None, each value in each row is assigned a count of 1.
counts (column name or index): Deprecated name for bin_column.
vargs: Additional arguments that get passed into :func:plt.hist.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist
for additional arguments that can be passed into vargs. These
include: `range`, `normed`, `cumulative`, and `orientation`,
to name a few.
>>> t = Table().with_columns(
... 'count', make_array(9, 3, 3, 1),
... 'points', make_array(1, 2, 2, 10))
>>> t
count | points
9 | 1
3 | 2
3 | 2
1 | 10
>>> t.hist() # doctest: +SKIP
<histogram of values in count>
<histogram of values in points>
>>> t = Table().with_columns(
... 'value', make_array(101, 102, 103),
... 'proportion', make_array(0.25, 0.5, 0.25))
>>> t.hist(bin_column='value') # doctest: +SKIP
<histogram of values weighted by corresponding proportions><|endoftext|> |
697dedfa6c97170e57fded7aab15826ea318fdfe28955953a69aaa55cc7117a4 | def boxplot(self, **vargs):
"Plots a boxplot for the table.\n\n Every column must be numerical.\n\n Kwargs:\n vargs: Additional arguments that get passed into `plt.boxplot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot\n for additional arguments that can be passed into vargs. These include\n `vert` and `showmeans`.\n\n Returns:\n None\n\n Raises:\n ValueError: The Table contains columns with non-numerical values.\n\n >>> table = Table().with_columns(\n ... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),\n ... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))\n >>> table\n test1 | test2\n 92.5 | 89\n 88 | 84\n 72 | 74\n 71 | 66\n 99 | 92\n 100 | 99\n 95 | 88\n 83 | 81\n 94 | 95\n 93 | 94\n >>> table.boxplot() # doctest: +SKIP\n <boxplot of test1 and boxplot of test2 side-by-side on the same figure>\n "
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs) | Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure> | digital-assyriology-review/datascience/tables.py | boxplot | ds-modules/NESTUD-190A | 6 | python | def boxplot(self, **vargs):
"Plots a boxplot for the table.\n\n Every column must be numerical.\n\n Kwargs:\n vargs: Additional arguments that get passed into `plt.boxplot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot\n for additional arguments that can be passed into vargs. These include\n `vert` and `showmeans`.\n\n Returns:\n None\n\n Raises:\n ValueError: The Table contains columns with non-numerical values.\n\n >>> table = Table().with_columns(\n ... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),\n ... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))\n >>> table\n test1 | test2\n 92.5 | 89\n 88 | 84\n 72 | 74\n 71 | 66\n 99 | 92\n 100 | 99\n 95 | 88\n 83 | 81\n 94 | 95\n 93 | 94\n >>> table.boxplot() # doctest: +SKIP\n <boxplot of test1 and boxplot of test2 side-by-side on the same figure>\n "
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs) | def boxplot(self, **vargs):
"Plots a boxplot for the table.\n\n Every column must be numerical.\n\n Kwargs:\n vargs: Additional arguments that get passed into `plt.boxplot`.\n See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot\n for additional arguments that can be passed into vargs. These include\n `vert` and `showmeans`.\n\n Returns:\n None\n\n Raises:\n ValueError: The Table contains columns with non-numerical values.\n\n >>> table = Table().with_columns(\n ... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),\n ... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))\n >>> table\n test1 | test2\n 92.5 | 89\n 88 | 84\n 72 | 74\n 71 | 66\n 99 | 92\n 100 | 99\n 95 | 88\n 83 | 81\n 94 | 95\n 93 | 94\n >>> table.boxplot() # doctest: +SKIP\n <boxplot of test1 and boxplot of test2 side-by-side on the same figure>\n "
for col in self:
if any((isinstance(cell, np.flexible) for cell in self[col])):
raise ValueError("The column '{0}' contains non-numerical values. A histogram cannot be drawn for this table.".format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs)<|docstring|>Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure><|endoftext|> |
c8328a9f5965fdc63f5af9d8477c17f3e52de3df3a0763303a54eeb0e15edeaa | def __getitem__(self, row_indices_or_slice):
"Return a new Table with selected rows taken by index.\n\n Args:\n ``row_indices_or_slice`` (integer or array of integers):\n The row index, list of row indices or a slice of row indices to\n be selected.\n\n Returns:\n A new instance of ``Table`` with selected rows in order\n corresponding to ``row_indices_or_slice``.\n\n Raises:\n ``IndexError``, if any ``row_indices_or_slice`` is out of bounds\n with respect to column length.\n\n >>> grades = Table().with_columns('letter grade',\n ... make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> grades\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> grades.take(0)\n letter grade | gpa\n A+ | 4\n >>> grades.take(-1)\n letter grade | gpa\n B- | 2.7\n >>> grades.take(make_array(2, 1, 0))\n letter grade | gpa\n A- | 3.7\n A | 4\n A+ | 4\n >>> grades.take[:3]\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(np.arange(0,3))\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(10)\n Traceback (most recent call last):\n ...\n IndexError: index 10 is out of bounds for axis 0 with size 6\n "
if isinstance(row_indices_or_slice, collections.Iterable):
columns = [np.take(column, row_indices_or_slice, axis=0) for column in self._table._columns.values()]
return self._table._with_columns(columns)
rows = self._table.rows[row_indices_or_slice]
if isinstance(rows, Table.Row):
rows = [rows]
return self._table._with_columns(zip(*rows)) | Return a new Table with selected rows taken by index.
Args:
``row_indices_or_slice`` (integer or array of integers):
The row index, list of row indices or a slice of row indices to
be selected.
Returns:
A new instance of ``Table`` with selected rows in order
corresponding to ``row_indices_or_slice``.
Raises:
``IndexError``, if any ``row_indices_or_slice`` is out of bounds
with respect to column length.
>>> grades = Table().with_columns('letter grade',
... make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),
... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))
>>> grades
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
B- | 2.7
>>> grades.take(0)
letter grade | gpa
A+ | 4
>>> grades.take(-1)
letter grade | gpa
B- | 2.7
>>> grades.take(make_array(2, 1, 0))
letter grade | gpa
A- | 3.7
A | 4
A+ | 4
>>> grades.take[:3]
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
>>> grades.take(np.arange(0,3))
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
>>> grades.take(10)
Traceback (most recent call last):
...
IndexError: index 10 is out of bounds for axis 0 with size 6 | digital-assyriology-review/datascience/tables.py | __getitem__ | ds-modules/NESTUD-190A | 6 | python | def __getitem__(self, row_indices_or_slice):
"Return a new Table with selected rows taken by index.\n\n Args:\n ``row_indices_or_slice`` (integer or array of integers):\n The row index, list of row indices or a slice of row indices to\n be selected.\n\n Returns:\n A new instance of ``Table`` with selected rows in order\n corresponding to ``row_indices_or_slice``.\n\n Raises:\n ``IndexError``, if any ``row_indices_or_slice`` is out of bounds\n with respect to column length.\n\n >>> grades = Table().with_columns('letter grade',\n ... make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> grades\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> grades.take(0)\n letter grade | gpa\n A+ | 4\n >>> grades.take(-1)\n letter grade | gpa\n B- | 2.7\n >>> grades.take(make_array(2, 1, 0))\n letter grade | gpa\n A- | 3.7\n A | 4\n A+ | 4\n >>> grades.take[:3]\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(np.arange(0,3))\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(10)\n Traceback (most recent call last):\n ...\n IndexError: index 10 is out of bounds for axis 0 with size 6\n "
if isinstance(row_indices_or_slice, collections.Iterable):
columns = [np.take(column, row_indices_or_slice, axis=0) for column in self._table._columns.values()]
return self._table._with_columns(columns)
rows = self._table.rows[row_indices_or_slice]
if isinstance(rows, Table.Row):
rows = [rows]
return self._table._with_columns(zip(*rows)) | def __getitem__(self, row_indices_or_slice):
"Return a new Table with selected rows taken by index.\n\n Args:\n ``row_indices_or_slice`` (integer or array of integers):\n The row index, list of row indices or a slice of row indices to\n be selected.\n\n Returns:\n A new instance of ``Table`` with selected rows in order\n corresponding to ``row_indices_or_slice``.\n\n Raises:\n ``IndexError``, if any ``row_indices_or_slice`` is out of bounds\n with respect to column length.\n\n >>> grades = Table().with_columns('letter grade',\n ... make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> grades\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> grades.take(0)\n letter grade | gpa\n A+ | 4\n >>> grades.take(-1)\n letter grade | gpa\n B- | 2.7\n >>> grades.take(make_array(2, 1, 0))\n letter grade | gpa\n A- | 3.7\n A | 4\n A+ | 4\n >>> grades.take[:3]\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(np.arange(0,3))\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n >>> grades.take(10)\n Traceback (most recent call last):\n ...\n IndexError: index 10 is out of bounds for axis 0 with size 6\n "
if isinstance(row_indices_or_slice, collections.Iterable):
columns = [np.take(column, row_indices_or_slice, axis=0) for column in self._table._columns.values()]
return self._table._with_columns(columns)
rows = self._table.rows[row_indices_or_slice]
if isinstance(rows, Table.Row):
rows = [rows]
return self._table._with_columns(zip(*rows))<|docstring|>Return a new Table with selected rows taken by index.
Args:
``row_indices_or_slice`` (integer or array of integers):
The row index, list of row indices or a slice of row indices to
be selected.
Returns:
A new instance of ``Table`` with selected rows in order
corresponding to ``row_indices_or_slice``.
Raises:
``IndexError``, if any ``row_indices_or_slice`` is out of bounds
with respect to column length.
>>> grades = Table().with_columns('letter grade',
... make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),
... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))
>>> grades
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
B- | 2.7
>>> grades.take(0)
letter grade | gpa
A+ | 4
>>> grades.take(-1)
letter grade | gpa
B- | 2.7
>>> grades.take(make_array(2, 1, 0))
letter grade | gpa
A- | 3.7
A | 4
A+ | 4
>>> grades.take[:3]
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
>>> grades.take(np.arange(0,3))
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
>>> grades.take(10)
Traceback (most recent call last):
...
IndexError: index 10 is out of bounds for axis 0 with size 6<|endoftext|> |
0ea46dc45fc74c393c99917bd40ea77d0bef0e97bba689533e4191b21affbbc9 | def __getitem__(self, row_indices_or_slice):
"Return a new Table without a sequence of rows excluded by number.\n\n Args:\n ``row_indices_or_slice`` (integer or list of integers or slice):\n The row index, list of row indices or a slice of row indices\n to be excluded.\n\n Returns:\n A new instance of ``Table``.\n\n >>> t = Table().with_columns(\n ... 'letter grade', make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> t\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> t.exclude(4)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B- | 2.7\n >>> t.exclude(-1)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n >>> t.exclude(make_array(1, 3, 4))\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n >>> t.exclude(range(3))\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n Note that ``exclude`` also supports NumPy-like indexing and slicing:\n\n >>> t.exclude[:3]\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n >>> t.exclude[1, 3, 4]\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n "
if isinstance(row_indices_or_slice, collections.Iterable):
without_row_indices = set(row_indices_or_slice)
rows = [row for (index, row) in enumerate(self._table.rows[:]) if (index not in without_row_indices)]
return self._table._with_columns(zip(*rows))
row_slice = row_indices_or_slice
if (not isinstance(row_slice, slice)):
row_slice %= self._table.num_rows
row_slice = slice(row_slice, (row_slice + 1))
rows = itertools.chain(self._table.rows[:(row_slice.start or 0)], self._table.rows[row_slice.stop:])
return self._table._with_columns(zip(*rows)) | Return a new Table without a sequence of rows excluded by number.
Args:
``row_indices_or_slice`` (integer or list of integers or slice):
The row index, list of row indices or a slice of row indices
to be excluded.
Returns:
A new instance of ``Table``.
>>> t = Table().with_columns(
... 'letter grade', make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),
... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))
>>> t
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
B- | 2.7
>>> t.exclude(4)
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B- | 2.7
>>> t.exclude(-1)
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
>>> t.exclude(make_array(1, 3, 4))
letter grade | gpa
A+ | 4
A- | 3.7
B- | 2.7
>>> t.exclude(range(3))
letter grade | gpa
B+ | 3.3
B | 3
B- | 2.7
Note that ``exclude`` also supports NumPy-like indexing and slicing:
>>> t.exclude[:3]
letter grade | gpa
B+ | 3.3
B | 3
B- | 2.7
>>> t.exclude[1, 3, 4]
letter grade | gpa
A+ | 4
A- | 3.7
B- | 2.7 | digital-assyriology-review/datascience/tables.py | __getitem__ | ds-modules/NESTUD-190A | 6 | python | def __getitem__(self, row_indices_or_slice):
"Return a new Table without a sequence of rows excluded by number.\n\n Args:\n ``row_indices_or_slice`` (integer or list of integers or slice):\n The row index, list of row indices or a slice of row indices\n to be excluded.\n\n Returns:\n A new instance of ``Table``.\n\n >>> t = Table().with_columns(\n ... 'letter grade', make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> t\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> t.exclude(4)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B- | 2.7\n >>> t.exclude(-1)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n >>> t.exclude(make_array(1, 3, 4))\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n >>> t.exclude(range(3))\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n Note that ``exclude`` also supports NumPy-like indexing and slicing:\n\n >>> t.exclude[:3]\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n >>> t.exclude[1, 3, 4]\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n "
if isinstance(row_indices_or_slice, collections.Iterable):
without_row_indices = set(row_indices_or_slice)
rows = [row for (index, row) in enumerate(self._table.rows[:]) if (index not in without_row_indices)]
return self._table._with_columns(zip(*rows))
row_slice = row_indices_or_slice
if (not isinstance(row_slice, slice)):
row_slice %= self._table.num_rows
row_slice = slice(row_slice, (row_slice + 1))
rows = itertools.chain(self._table.rows[:(row_slice.start or 0)], self._table.rows[row_slice.stop:])
return self._table._with_columns(zip(*rows)) | def __getitem__(self, row_indices_or_slice):
"Return a new Table without a sequence of rows excluded by number.\n\n Args:\n ``row_indices_or_slice`` (integer or list of integers or slice):\n The row index, list of row indices or a slice of row indices\n to be excluded.\n\n Returns:\n A new instance of ``Table``.\n\n >>> t = Table().with_columns(\n ... 'letter grade', make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),\n ... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))\n >>> t\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n B- | 2.7\n >>> t.exclude(4)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B- | 2.7\n >>> t.exclude(-1)\n letter grade | gpa\n A+ | 4\n A | 4\n A- | 3.7\n B+ | 3.3\n B | 3\n >>> t.exclude(make_array(1, 3, 4))\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n >>> t.exclude(range(3))\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n Note that ``exclude`` also supports NumPy-like indexing and slicing:\n\n >>> t.exclude[:3]\n letter grade | gpa\n B+ | 3.3\n B | 3\n B- | 2.7\n\n >>> t.exclude[1, 3, 4]\n letter grade | gpa\n A+ | 4\n A- | 3.7\n B- | 2.7\n "
if isinstance(row_indices_or_slice, collections.Iterable):
without_row_indices = set(row_indices_or_slice)
rows = [row for (index, row) in enumerate(self._table.rows[:]) if (index not in without_row_indices)]
return self._table._with_columns(zip(*rows))
row_slice = row_indices_or_slice
if (not isinstance(row_slice, slice)):
row_slice %= self._table.num_rows
row_slice = slice(row_slice, (row_slice + 1))
rows = itertools.chain(self._table.rows[:(row_slice.start or 0)], self._table.rows[row_slice.stop:])
return self._table._with_columns(zip(*rows))<|docstring|>Return a new Table without a sequence of rows excluded by number.
Args:
``row_indices_or_slice`` (integer or list of integers or slice):
The row index, list of row indices or a slice of row indices
to be excluded.
Returns:
A new instance of ``Table``.
>>> t = Table().with_columns(
... 'letter grade', make_array('A+', 'A', 'A-', 'B+', 'B', 'B-'),
... 'gpa', make_array(4, 4, 3.7, 3.3, 3, 2.7))
>>> t
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
B- | 2.7
>>> t.exclude(4)
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B- | 2.7
>>> t.exclude(-1)
letter grade | gpa
A+ | 4
A | 4
A- | 3.7
B+ | 3.3
B | 3
>>> t.exclude(make_array(1, 3, 4))
letter grade | gpa
A+ | 4
A- | 3.7
B- | 2.7
>>> t.exclude(range(3))
letter grade | gpa
B+ | 3.3
B | 3
B- | 2.7
Note that ``exclude`` also supports NumPy-like indexing and slicing:
>>> t.exclude[:3]
letter grade | gpa
B+ | 3.3
B | 3
B- | 2.7
>>> t.exclude[1, 3, 4]
letter grade | gpa
A+ | 4
A- | 3.7
B- | 2.7<|endoftext|> |
c85c550cf292a69bf91fa853a52181250e74281a3037697cc180eb60a8540596 | def item(self, index_or_label):
'Return the item at an index or label.'
if isinstance(index_or_label, numbers.Integral):
index = index_or_label
else:
index = self._table.column_index(index_or_label)
return self[index] | Return the item at an index or label. | digital-assyriology-review/datascience/tables.py | item | ds-modules/NESTUD-190A | 6 | python | def item(self, index_or_label):
if isinstance(index_or_label, numbers.Integral):
index = index_or_label
else:
index = self._table.column_index(index_or_label)
return self[index] | def item(self, index_or_label):
if isinstance(index_or_label, numbers.Integral):
index = index_or_label
else:
index = self._table.column_index(index_or_label)
return self[index]<|docstring|>Return the item at an index or label.<|endoftext|> |
d7b6658cd0fe49eba63131070ab6f0d3d56dd2ca4c35e2f90d684a70ffe55597 | @functools.wraps(attrs[0])
def method(*args, **vargs):
'Create a table from the results of calling attrs.'
columns = [attr(*args, **vargs) for attr in attrs]
return self._with_columns(columns) | Create a table from the results of calling attrs. | digital-assyriology-review/datascience/tables.py | method | ds-modules/NESTUD-190A | 6 | python | @functools.wraps(attrs[0])
def method(*args, **vargs):
columns = [attr(*args, **vargs) for attr in attrs]
return self._with_columns(columns) | @functools.wraps(attrs[0])
def method(*args, **vargs):
columns = [attr(*args, **vargs) for attr in attrs]
return self._with_columns(columns)<|docstring|>Create a table from the results of calling attrs.<|endoftext|> |
bed6576fdfc913c836aa838a3e85dbaead62831af8a7e15121b8ae9906dc1d8d | def train_test_proportions(freq, X, y):
'\n Split data into training (freq proportion) and testing (1-freq) datasets. Different classes will be represented in\n proportions from y, and observations will be shuffled.\n\n freq - percentage of data to go to train, between 0 and 1\n X - data\n y - classes\n '
assert ((freq < 1) and (0 < freq)), 'freq must be a proportion'
c = collections.Counter(y)
idx = {k: np.where((np.array(y) == k))[0] for k in c.keys()}
train_idx = []
test_idx = []
for k in c.keys():
n_to_choose_train = int(((c[k] * freq) + 0.5))
train_idx.extend(list(np.random.choice(idx[k], n_to_choose_train, replace=False)))
test_idx.extend([x for x in idx[k] if (x not in train_idx)])
if ((len(train_idx) == 0) or (len(test_idx) == 0)):
print('Frequence too high or two low, unable to form 2 groups')
raise SystemExit()
y_train = np.array(y)[train_idx]
y_test = np.array(y)[test_idx]
if (len(y) == X.shape[0]):
X_train = X[(train_idx, :)]
X_test = X[(test_idx, :)]
else:
X_train = X[(:, train_idx)]
X_test = X[(:, test_idx)]
return (X_train, X_test, y_train, y_test, train_idx, test_idx) | Split data into training (freq proportion) and testing (1-freq) datasets. Different classes will be represented in
proportions from y, and observations will be shuffled.
freq - percentage of data to go to train, between 0 and 1
X - data
y - classes | utils_general.py | train_test_proportions | AWebZen/FunctionalPrediction5000species | 1 | python | def train_test_proportions(freq, X, y):
'\n Split data into training (freq proportion) and testing (1-freq) datasets. Different classes will be represented in\n proportions from y, and observations will be shuffled.\n\n freq - percentage of data to go to train, between 0 and 1\n X - data\n y - classes\n '
assert ((freq < 1) and (0 < freq)), 'freq must be a proportion'
c = collections.Counter(y)
idx = {k: np.where((np.array(y) == k))[0] for k in c.keys()}
train_idx = []
test_idx = []
for k in c.keys():
n_to_choose_train = int(((c[k] * freq) + 0.5))
train_idx.extend(list(np.random.choice(idx[k], n_to_choose_train, replace=False)))
test_idx.extend([x for x in idx[k] if (x not in train_idx)])
if ((len(train_idx) == 0) or (len(test_idx) == 0)):
print('Frequence too high or two low, unable to form 2 groups')
raise SystemExit()
y_train = np.array(y)[train_idx]
y_test = np.array(y)[test_idx]
if (len(y) == X.shape[0]):
X_train = X[(train_idx, :)]
X_test = X[(test_idx, :)]
else:
X_train = X[(:, train_idx)]
X_test = X[(:, test_idx)]
return (X_train, X_test, y_train, y_test, train_idx, test_idx) | def train_test_proportions(freq, X, y):
'\n Split data into training (freq proportion) and testing (1-freq) datasets. Different classes will be represented in\n proportions from y, and observations will be shuffled.\n\n freq - percentage of data to go to train, between 0 and 1\n X - data\n y - classes\n '
assert ((freq < 1) and (0 < freq)), 'freq must be a proportion'
c = collections.Counter(y)
idx = {k: np.where((np.array(y) == k))[0] for k in c.keys()}
train_idx = []
test_idx = []
for k in c.keys():
n_to_choose_train = int(((c[k] * freq) + 0.5))
train_idx.extend(list(np.random.choice(idx[k], n_to_choose_train, replace=False)))
test_idx.extend([x for x in idx[k] if (x not in train_idx)])
if ((len(train_idx) == 0) or (len(test_idx) == 0)):
print('Frequence too high or two low, unable to form 2 groups')
raise SystemExit()
y_train = np.array(y)[train_idx]
y_test = np.array(y)[test_idx]
if (len(y) == X.shape[0]):
X_train = X[(train_idx, :)]
X_test = X[(test_idx, :)]
else:
X_train = X[(:, train_idx)]
X_test = X[(:, test_idx)]
return (X_train, X_test, y_train, y_test, train_idx, test_idx)<|docstring|>Split data into training (freq proportion) and testing (1-freq) datasets. Different classes will be represented in
proportions from y, and observations will be shuffled.
freq - percentage of data to go to train, between 0 and 1
X - data
y - classes<|endoftext|> |
21c5b7ddc4569d7bea20bceedce8700df3971cf4f8487bc2a7beab789c145261 | def is_enz(node):
'Tells if node is an enzyme (EC) or not'
split = node.split('.')
if (len(split) == 4):
return True
else:
return False | Tells if node is an enzyme (EC) or not | utils_general.py | is_enz | AWebZen/FunctionalPrediction5000species | 1 | python | def is_enz(node):
split = node.split('.')
if (len(split) == 4):
return True
else:
return False | def is_enz(node):
split = node.split('.')
if (len(split) == 4):
return True
else:
return False<|docstring|>Tells if node is an enzyme (EC) or not<|endoftext|> |
2430215b3611665b2970fc443c2d3443b160cf3975c4243f26f173a573948508 | def from_y_to_dict(y):
'From vector of values, build dict where keys are the values and dict values\n are indexes of vector with such values'
return {k: np.where((y == k))[0] for k in np.unique(y)} | From vector of values, build dict where keys are the values and dict values
are indexes of vector with such values | utils_general.py | from_y_to_dict | AWebZen/FunctionalPrediction5000species | 1 | python | def from_y_to_dict(y):
'From vector of values, build dict where keys are the values and dict values\n are indexes of vector with such values'
return {k: np.where((y == k))[0] for k in np.unique(y)} | def from_y_to_dict(y):
'From vector of values, build dict where keys are the values and dict values\n are indexes of vector with such values'
return {k: np.where((y == k))[0] for k in np.unique(y)}<|docstring|>From vector of values, build dict where keys are the values and dict values
are indexes of vector with such values<|endoftext|> |
6abbbf276bdffcc165b122b5f7b426c566998d7f274497557575dfb4fc01e67b | def common_nodes(nodes1, nodes2):
'\n Returns list of the intersection of two sets/lists of nodes\n '
nodes1 = set(nodes1)
nodes2 = set(nodes2)
return (nodes1 & nodes2) | Returns list of the intersection of two sets/lists of nodes | utils_general.py | common_nodes | AWebZen/FunctionalPrediction5000species | 1 | python | def common_nodes(nodes1, nodes2):
'\n \n '
nodes1 = set(nodes1)
nodes2 = set(nodes2)
return (nodes1 & nodes2) | def common_nodes(nodes1, nodes2):
'\n \n '
nodes1 = set(nodes1)
nodes2 = set(nodes2)
return (nodes1 & nodes2)<|docstring|>Returns list of the intersection of two sets/lists of nodes<|endoftext|> |
dd1bb10d1b1343cc2d458cd277c0f8ff17268ed5f75c29b806891d6bcdc20c32 | def invert_ko_to_ec_dict(ko_dict):
'\n Inverts KO to EC dict (ko_ec_dict.cpkl in backup_cpkl) so as to have a\n {ec : set of kos} list, only with the KOs that interest us (that are in ko_dict).\n '
inverted_dict = collections.defaultdict(set)
for (ko, ecs) in ko_dict.items():
if (ecs == ''):
continue
ecs = ecs.strip(' ').split()
for ec in ecs:
inverted_dict[ec].add(ko)
return inverted_dict | Inverts KO to EC dict (ko_ec_dict.cpkl in backup_cpkl) so as to have a
{ec : set of kos} list, only with the KOs that interest us (that are in ko_dict). | utils_general.py | invert_ko_to_ec_dict | AWebZen/FunctionalPrediction5000species | 1 | python | def invert_ko_to_ec_dict(ko_dict):
'\n Inverts KO to EC dict (ko_ec_dict.cpkl in backup_cpkl) so as to have a\n {ec : set of kos} list, only with the KOs that interest us (that are in ko_dict).\n '
inverted_dict = collections.defaultdict(set)
for (ko, ecs) in ko_dict.items():
if (ecs == ):
continue
ecs = ecs.strip(' ').split()
for ec in ecs:
inverted_dict[ec].add(ko)
return inverted_dict | def invert_ko_to_ec_dict(ko_dict):
'\n Inverts KO to EC dict (ko_ec_dict.cpkl in backup_cpkl) so as to have a\n {ec : set of kos} list, only with the KOs that interest us (that are in ko_dict).\n '
inverted_dict = collections.defaultdict(set)
for (ko, ecs) in ko_dict.items():
if (ecs == ):
continue
ecs = ecs.strip(' ').split()
for ec in ecs:
inverted_dict[ec].add(ko)
return inverted_dict<|docstring|>Inverts KO to EC dict (ko_ec_dict.cpkl in backup_cpkl) so as to have a
{ec : set of kos} list, only with the KOs that interest us (that are in ko_dict).<|endoftext|> |
e5b37a3b85c097b34d3ae8fa5e5090b3c89c62e0bb0e1078e87cb6f6b82063d0 | def sim_matrix(X):
'\n For a species x other thing (nodes) matrix, build a species x species\n jaccard index similarity matrix of species vectors.\n '
sim = np.zeros((len(X), len(X)))
for (i, pati) in enumerate(X):
for (j, pati2) in enumerate(X):
if (j > i):
break
if issparse(pati):
pati = pati.todense()
if issparse(pati2):
pati2 = pati2.todense()
sim[(i, j)] = jaccard_index(pati, pati2)
return ((sim + sim.T) - np.diag(sim.diagonal())) | For a species x other thing (nodes) matrix, build a species x species
jaccard index similarity matrix of species vectors. | utils_general.py | sim_matrix | AWebZen/FunctionalPrediction5000species | 1 | python | def sim_matrix(X):
'\n For a species x other thing (nodes) matrix, build a species x species\n jaccard index similarity matrix of species vectors.\n '
sim = np.zeros((len(X), len(X)))
for (i, pati) in enumerate(X):
for (j, pati2) in enumerate(X):
if (j > i):
break
if issparse(pati):
pati = pati.todense()
if issparse(pati2):
pati2 = pati2.todense()
sim[(i, j)] = jaccard_index(pati, pati2)
return ((sim + sim.T) - np.diag(sim.diagonal())) | def sim_matrix(X):
'\n For a species x other thing (nodes) matrix, build a species x species\n jaccard index similarity matrix of species vectors.\n '
sim = np.zeros((len(X), len(X)))
for (i, pati) in enumerate(X):
for (j, pati2) in enumerate(X):
if (j > i):
break
if issparse(pati):
pati = pati.todense()
if issparse(pati2):
pati2 = pati2.todense()
sim[(i, j)] = jaccard_index(pati, pati2)
return ((sim + sim.T) - np.diag(sim.diagonal()))<|docstring|>For a species x other thing (nodes) matrix, build a species x species
jaccard index similarity matrix of species vectors.<|endoftext|> |
49723903e003e0dd99debc4e56e507d4f80240ab928a376be43b348160d5ca83 | def fitting_tsne(X, n_comp, perp, metric='euclidean'):
'Fit t-SNE'
tsne = TSNE(n_components=n_comp, perplexity=perp, metric=metric)
X_tsne = tsne.fit_transform(X)
return X_tsne | Fit t-SNE | utils_general.py | fitting_tsne | AWebZen/FunctionalPrediction5000species | 1 | python | def fitting_tsne(X, n_comp, perp, metric='euclidean'):
tsne = TSNE(n_components=n_comp, perplexity=perp, metric=metric)
X_tsne = tsne.fit_transform(X)
return X_tsne | def fitting_tsne(X, n_comp, perp, metric='euclidean'):
tsne = TSNE(n_components=n_comp, perplexity=perp, metric=metric)
X_tsne = tsne.fit_transform(X)
return X_tsne<|docstring|>Fit t-SNE<|endoftext|> |
f55b95ce77f15172b56374790a88e69b224b8d17b0d037bb1db73ad9e9321082 | def plot_tsne(X_tsne, t_idx, perp):
'Plot t-SNE'
(t1_idx, t2_idx, t3_idx, t4_idx) = t_idx
colours = ['r', 'g', 'b', 'm']
plt.figure()
plt.scatter(X_tsne[(t1_idx, 0)], X_tsne[(t1_idx, 1)], c=colours[0], label='T0')
plt.scatter(X_tsne[(t2_idx, 0)], X_tsne[(t2_idx, 1)], c=colours[1], label='T1')
plt.scatter(X_tsne[(t3_idx, 0)], X_tsne[(t3_idx, 1)], c=colours[2], label='T2')
plt.scatter(X_tsne[(t4_idx, 0)], X_tsne[(t4_idx, 1)], c=colours[3], label='T3')
plt.legend()
plt.xlabel('t-SNE dim 1')
plt.ylabel('t-SNE dim 2')
plt.title(('Perplexity:' + str(perp)))
plt.show() | Plot t-SNE | utils_general.py | plot_tsne | AWebZen/FunctionalPrediction5000species | 1 | python | def plot_tsne(X_tsne, t_idx, perp):
(t1_idx, t2_idx, t3_idx, t4_idx) = t_idx
colours = ['r', 'g', 'b', 'm']
plt.figure()
plt.scatter(X_tsne[(t1_idx, 0)], X_tsne[(t1_idx, 1)], c=colours[0], label='T0')
plt.scatter(X_tsne[(t2_idx, 0)], X_tsne[(t2_idx, 1)], c=colours[1], label='T1')
plt.scatter(X_tsne[(t3_idx, 0)], X_tsne[(t3_idx, 1)], c=colours[2], label='T2')
plt.scatter(X_tsne[(t4_idx, 0)], X_tsne[(t4_idx, 1)], c=colours[3], label='T3')
plt.legend()
plt.xlabel('t-SNE dim 1')
plt.ylabel('t-SNE dim 2')
plt.title(('Perplexity:' + str(perp)))
plt.show() | def plot_tsne(X_tsne, t_idx, perp):
(t1_idx, t2_idx, t3_idx, t4_idx) = t_idx
colours = ['r', 'g', 'b', 'm']
plt.figure()
plt.scatter(X_tsne[(t1_idx, 0)], X_tsne[(t1_idx, 1)], c=colours[0], label='T0')
plt.scatter(X_tsne[(t2_idx, 0)], X_tsne[(t2_idx, 1)], c=colours[1], label='T1')
plt.scatter(X_tsne[(t3_idx, 0)], X_tsne[(t3_idx, 1)], c=colours[2], label='T2')
plt.scatter(X_tsne[(t4_idx, 0)], X_tsne[(t4_idx, 1)], c=colours[3], label='T3')
plt.legend()
plt.xlabel('t-SNE dim 1')
plt.ylabel('t-SNE dim 2')
plt.title(('Perplexity:' + str(perp)))
plt.show()<|docstring|>Plot t-SNE<|endoftext|> |
514e6d04905cedbe1b1cd1acde807f4be6ed5b2eb253f86df57b86c50affa93e | def _parse_get_pathway(txt):
'txt - list of lines of the entry'
assert (type(txt) == list), 'Takes list as input'
pathways = []
i = 0
line = txt[i].rstrip('\n')
while (((i + 1) < len(txt)) and (not line.startswith('PATHWAY'))):
i += 1
line = txt[i].rstrip('\n')
if line.startswith('PATHWAY'):
while (line.startswith('PATHWAY') or (line[0] == ' ')):
line = line.lstrip('PATHWAY ')
line_sp = line.split(' ')
if (len(line_sp) == 2):
pathways.append(tuple(line_sp))
else:
logger.error(('Uh oh, something wrong with the parsing : %s' % line))
i += 1
line = txt[i].rstrip('\n')
if (len(pathways) == 0):
logger.error('No pathway for entry.')
return pathways | txt - list of lines of the entry | utils_general.py | _parse_get_pathway | AWebZen/FunctionalPrediction5000species | 1 | python | def _parse_get_pathway(txt):
assert (type(txt) == list), 'Takes list as input'
pathways = []
i = 0
line = txt[i].rstrip('\n')
while (((i + 1) < len(txt)) and (not line.startswith('PATHWAY'))):
i += 1
line = txt[i].rstrip('\n')
if line.startswith('PATHWAY'):
while (line.startswith('PATHWAY') or (line[0] == ' ')):
line = line.lstrip('PATHWAY ')
line_sp = line.split(' ')
if (len(line_sp) == 2):
pathways.append(tuple(line_sp))
else:
logger.error(('Uh oh, something wrong with the parsing : %s' % line))
i += 1
line = txt[i].rstrip('\n')
if (len(pathways) == 0):
logger.error('No pathway for entry.')
return pathways | def _parse_get_pathway(txt):
assert (type(txt) == list), 'Takes list as input'
pathways = []
i = 0
line = txt[i].rstrip('\n')
while (((i + 1) < len(txt)) and (not line.startswith('PATHWAY'))):
i += 1
line = txt[i].rstrip('\n')
if line.startswith('PATHWAY'):
while (line.startswith('PATHWAY') or (line[0] == ' ')):
line = line.lstrip('PATHWAY ')
line_sp = line.split(' ')
if (len(line_sp) == 2):
pathways.append(tuple(line_sp))
else:
logger.error(('Uh oh, something wrong with the parsing : %s' % line))
i += 1
line = txt[i].rstrip('\n')
if (len(pathways) == 0):
logger.error('No pathway for entry.')
return pathways<|docstring|>txt - list of lines of the entry<|endoftext|> |
e30392b88c571ce6d62ea98ea00f7b4f162569133c7151f69890c60d205af81f | def split_in_n(n, y):
'\n Split my instances into n groups, with each class in its right proportion being represented in each\n split.\n\n /!\\ If there are too few instances of each class to distribute among splits, the splits will be imbalanced.\n\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n splits : list\n List of instances indices split into n lists.\n '
idx = {k: np.array_split(np.random.permutation(np.where((np.array(y) == k))[0]), n) for k in np.unique(y)}
splits = [[] for _ in xrange(n)]
for i in xrange(n):
for k in idx.keys():
splits[i].extend(list(idx[k][i]))
return splits | Split my instances into n groups, with each class in its right proportion being represented in each
split.
/!\ If there are too few instances of each class to distribute among splits, the splits will be imbalanced.
Parameters
----------
n : int
Number of splits.
y : array-like
Class array.
Returns
-------
splits : list
List of instances indices split into n lists. | utils_general.py | split_in_n | AWebZen/FunctionalPrediction5000species | 1 | python | def split_in_n(n, y):
'\n Split my instances into n groups, with each class in its right proportion being represented in each\n split.\n\n /!\\ If there are too few instances of each class to distribute among splits, the splits will be imbalanced.\n\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n splits : list\n List of instances indices split into n lists.\n '
idx = {k: np.array_split(np.random.permutation(np.where((np.array(y) == k))[0]), n) for k in np.unique(y)}
splits = [[] for _ in xrange(n)]
for i in xrange(n):
for k in idx.keys():
splits[i].extend(list(idx[k][i]))
return splits | def split_in_n(n, y):
'\n Split my instances into n groups, with each class in its right proportion being represented in each\n split.\n\n /!\\ If there are too few instances of each class to distribute among splits, the splits will be imbalanced.\n\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n splits : list\n List of instances indices split into n lists.\n '
idx = {k: np.array_split(np.random.permutation(np.where((np.array(y) == k))[0]), n) for k in np.unique(y)}
splits = [[] for _ in xrange(n)]
for i in xrange(n):
for k in idx.keys():
splits[i].extend(list(idx[k][i]))
return splits<|docstring|>Split my instances into n groups, with each class in its right proportion being represented in each
split.
/!\ If there are too few instances of each class to distribute among splits, the splits will be imbalanced.
Parameters
----------
n : int
Number of splits.
y : array-like
Class array.
Returns
-------
splits : list
List of instances indices split into n lists.<|endoftext|> |
d21c468e6cd2ea2cb1925cdbd2a72cb794a417ecf4d84f3eae58b6427c16433d | def crossvalidation_splits_n(n, y):
'\n For each group in splits, will be considered testing group, the others will\n be fused and considered training group. Will thus have n (train, test) data\n splits\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n train_test : iterator\n Iterator of (train indices, test indices) couples\n\n '
split = split_in_n(n, y)
train_test = []
for test in split:
train = []
for tr in split:
if (tr is not test):
train += tr
train_test.append((train, test))
return iter(train_test) | For each group in splits, will be considered testing group, the others will
be fused and considered training group. Will thus have n (train, test) data
splits
Parameters
----------
n : int
Number of splits.
y : array-like
Class array.
Returns
-------
train_test : iterator
Iterator of (train indices, test indices) couples | utils_general.py | crossvalidation_splits_n | AWebZen/FunctionalPrediction5000species | 1 | python | def crossvalidation_splits_n(n, y):
'\n For each group in splits, will be considered testing group, the others will\n be fused and considered training group. Will thus have n (train, test) data\n splits\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n train_test : iterator\n Iterator of (train indices, test indices) couples\n\n '
split = split_in_n(n, y)
train_test = []
for test in split:
train = []
for tr in split:
if (tr is not test):
train += tr
train_test.append((train, test))
return iter(train_test) | def crossvalidation_splits_n(n, y):
'\n For each group in splits, will be considered testing group, the others will\n be fused and considered training group. Will thus have n (train, test) data\n splits\n\n Parameters\n ----------\n n : int\n Number of splits.\n y : array-like\n Class array.\n\n Returns\n -------\n train_test : iterator\n Iterator of (train indices, test indices) couples\n\n '
split = split_in_n(n, y)
train_test = []
for test in split:
train = []
for tr in split:
if (tr is not test):
train += tr
train_test.append((train, test))
return iter(train_test)<|docstring|>For each group in splits, will be considered testing group, the others will
be fused and considered training group. Will thus have n (train, test) data
splits
Parameters
----------
n : int
Number of splits.
y : array-like
Class array.
Returns
-------
train_test : iterator
Iterator of (train indices, test indices) couples<|endoftext|> |
ae2e2f2a9853e4f149418e56cc8e315c8704e73ea55f1dc9d45cd2c00fcfde0b | def reorder_matrix(m, d):
'\n Reorder similarity matrix : put species in same cluster together.\n\n INPUT:\n m - similarity matrix\n d - medoid dictionary : {medoid : [list of species index in cluster]}\n\n OUTPUT :\n m in new order\n new_order - order of species indexes in matrix\n '
new_order = []
for (i, med_class) in enumerate(d.values()):
new_order.append(med_class)
return (m[(np.concatenate(new_order), :)], new_order) | Reorder similarity matrix : put species in same cluster together.
INPUT:
m - similarity matrix
d - medoid dictionary : {medoid : [list of species index in cluster]}
OUTPUT :
m in new order
new_order - order of species indexes in matrix | utils_general.py | reorder_matrix | AWebZen/FunctionalPrediction5000species | 1 | python | def reorder_matrix(m, d):
'\n Reorder similarity matrix : put species in same cluster together.\n\n INPUT:\n m - similarity matrix\n d - medoid dictionary : {medoid : [list of species index in cluster]}\n\n OUTPUT :\n m in new order\n new_order - order of species indexes in matrix\n '
new_order = []
for (i, med_class) in enumerate(d.values()):
new_order.append(med_class)
return (m[(np.concatenate(new_order), :)], new_order) | def reorder_matrix(m, d):
'\n Reorder similarity matrix : put species in same cluster together.\n\n INPUT:\n m - similarity matrix\n d - medoid dictionary : {medoid : [list of species index in cluster]}\n\n OUTPUT :\n m in new order\n new_order - order of species indexes in matrix\n '
new_order = []
for (i, med_class) in enumerate(d.values()):
new_order.append(med_class)
return (m[(np.concatenate(new_order), :)], new_order)<|docstring|>Reorder similarity matrix : put species in same cluster together.
INPUT:
m - similarity matrix
d - medoid dictionary : {medoid : [list of species index in cluster]}
OUTPUT :
m in new order
new_order - order of species indexes in matrix<|endoftext|> |
21ff6fcc3b34b56f76c61d42cf3234f325e00e2de24d846b5d8af5456fb8f561 | def plot_similitude_matrix_clusters(similitude, d, hba1c='no_fig', parameters_txt_xlim=[(- 6), (- 7)], wratio=8, xbar=0.92, label_hba1c='HbA1c', xlab_acc='Nodes in scope', ylab_acc='Patients', figsiz=(20, 13), xlim_hba1c=[], colormap='viridis', subset_sp=[], hba1c_barplot_width=15, colorbar=True):
"\n Heatmap of common node similarity by pair of species, with species in the order\n of clusters (contrasts to the other heatmap in the order of temperature classes).\n A colored line at the top gives the cluster\n\n Careful! Cluster order may vary (may not be the same as temperature classes\n even when there is a correspondance) : the cluster more or less corresponding\n to the first temperature class may not be the first one (and therefore won't\n be the first one in plot neither)\n "
if subset_sp:
d2 = {}
for k in d.keys():
d2[k] = np.array([sp for sp in d[k] if (sp in subset_sp)])
else:
d2 = d
(sim, new_order) = reorder_matrix(similitude, d2)
colors = ['k', 'mediumseagreen', 'dodgerblue', 'crimson', 'lightgrey', 'yellow', 'peru', 'magenta', 'b', 'darkorchid', 'brown', 'olive', 'wheat', 'purple', 'cadetblue', 'pink', 'red', 'grey', 'turquoise', 'lime', 'orange', 'salmon', 'cyan', 'g', 'hotpink', 'tan', 'lavender', 'teal', 'darkorange', 'seagreen']
fig = plt.figure(figsize=figsiz)
if (hba1c != 'no_fig'):
gs = gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[wratio, 1], wspace=0.0, hspace=0.0)
else:
gs = gridspec.GridSpec(nrows=1, ncols=1)
ax0 = plt.subplot(gs[0])
im = ax0.imshow(sim, vmin=0, vmax=1, cmap=colormap)
print(im.axes.get_position(), im.axes.get_position(original=True))
ax0.set_xlabel(xlab_acc)
for tick in ax0.xaxis.get_major_ticks():
tick.label1.set_horizontalalignment('right')
ax0.set_ylabel(ylab_acc)
ax0.set_ylim((- 0.5), sim.shape[0])
plt.subplots_adjust(left=0.07, right=0.9, top=0.98, bottom=0.07)
length = 0
pos = []
for (i, med_class) in enumerate(new_order):
pos.append(((((2 * length) + len(med_class)) - 1) / 2.0))
ax0.plot([(- 1), (- 1)], [length, ((length + len(med_class)) - 1)], lw=4, c=colors[i])
ax0.text(parameters_txt_xlim[0], ((((2 * length) + len(med_class)) - 1) / 2.0), ('Cl.' + str(i)), fontsize=10, fontweight='bold', color=colors[i])
if (i < (len(new_order) - 1)):
ax0.plot([(- 1), sim.shape[1]], [((length + len(med_class)) - 0.5), ((length + len(med_class)) - 0.5)], c='white')
length += len(med_class)
ax0.set_xlim(parameters_txt_xlim[1], sim.shape[1])
if (hba1c != 'no_fig'):
hba1c = np.array(hba1c)
ax1 = plt.subplot(gs[1], sharey=ax0)
ax1.barh(width=[np.mean(hba1c[order]) for order in new_order], y=pos, height=[hba1c_barplot_width for _ in new_order], xerr=[np.std(hba1c[order]) for order in new_order], capsize=5)
ax1.set_ylim((- 0.5), sim.shape[0])
if xlim_hba1c:
ax1.set_xlim(*xlim_hba1c)
ax1.set_xlabel(label_hba1c)
ax1.axes.get_yaxis().set_visible(False)
position = ax0.axes.get_position()
print(position, position.y0)
if (figsiz[0] < 5):
k = 0.05
elif (figsiz[0] < 10):
k = 0.02
else:
k = 0.01
if colorbar:
cbax = fig.add_axes([xbar, position.y0, k, (position.y1 - position.y0)])
fig.colorbar(im, cax=cbax)
fig.subplots_adjust(wspace=0.0) | Heatmap of common node similarity by pair of species, with species in the order
of clusters (contrasts to the other heatmap in the order of temperature classes).
A colored line at the top gives the cluster
Careful! Cluster order may vary (may not be the same as temperature classes
even when there is a correspondance) : the cluster more or less corresponding
to the first temperature class may not be the first one (and therefore won't
be the first one in plot neither) | utils_general.py | plot_similitude_matrix_clusters | AWebZen/FunctionalPrediction5000species | 1 | python | def plot_similitude_matrix_clusters(similitude, d, hba1c='no_fig', parameters_txt_xlim=[(- 6), (- 7)], wratio=8, xbar=0.92, label_hba1c='HbA1c', xlab_acc='Nodes in scope', ylab_acc='Patients', figsiz=(20, 13), xlim_hba1c=[], colormap='viridis', subset_sp=[], hba1c_barplot_width=15, colorbar=True):
"\n Heatmap of common node similarity by pair of species, with species in the order\n of clusters (contrasts to the other heatmap in the order of temperature classes).\n A colored line at the top gives the cluster\n\n Careful! Cluster order may vary (may not be the same as temperature classes\n even when there is a correspondance) : the cluster more or less corresponding\n to the first temperature class may not be the first one (and therefore won't\n be the first one in plot neither)\n "
if subset_sp:
d2 = {}
for k in d.keys():
d2[k] = np.array([sp for sp in d[k] if (sp in subset_sp)])
else:
d2 = d
(sim, new_order) = reorder_matrix(similitude, d2)
colors = ['k', 'mediumseagreen', 'dodgerblue', 'crimson', 'lightgrey', 'yellow', 'peru', 'magenta', 'b', 'darkorchid', 'brown', 'olive', 'wheat', 'purple', 'cadetblue', 'pink', 'red', 'grey', 'turquoise', 'lime', 'orange', 'salmon', 'cyan', 'g', 'hotpink', 'tan', 'lavender', 'teal', 'darkorange', 'seagreen']
fig = plt.figure(figsize=figsiz)
if (hba1c != 'no_fig'):
gs = gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[wratio, 1], wspace=0.0, hspace=0.0)
else:
gs = gridspec.GridSpec(nrows=1, ncols=1)
ax0 = plt.subplot(gs[0])
im = ax0.imshow(sim, vmin=0, vmax=1, cmap=colormap)
print(im.axes.get_position(), im.axes.get_position(original=True))
ax0.set_xlabel(xlab_acc)
for tick in ax0.xaxis.get_major_ticks():
tick.label1.set_horizontalalignment('right')
ax0.set_ylabel(ylab_acc)
ax0.set_ylim((- 0.5), sim.shape[0])
plt.subplots_adjust(left=0.07, right=0.9, top=0.98, bottom=0.07)
length = 0
pos = []
for (i, med_class) in enumerate(new_order):
pos.append(((((2 * length) + len(med_class)) - 1) / 2.0))
ax0.plot([(- 1), (- 1)], [length, ((length + len(med_class)) - 1)], lw=4, c=colors[i])
ax0.text(parameters_txt_xlim[0], ((((2 * length) + len(med_class)) - 1) / 2.0), ('Cl.' + str(i)), fontsize=10, fontweight='bold', color=colors[i])
if (i < (len(new_order) - 1)):
ax0.plot([(- 1), sim.shape[1]], [((length + len(med_class)) - 0.5), ((length + len(med_class)) - 0.5)], c='white')
length += len(med_class)
ax0.set_xlim(parameters_txt_xlim[1], sim.shape[1])
if (hba1c != 'no_fig'):
hba1c = np.array(hba1c)
ax1 = plt.subplot(gs[1], sharey=ax0)
ax1.barh(width=[np.mean(hba1c[order]) for order in new_order], y=pos, height=[hba1c_barplot_width for _ in new_order], xerr=[np.std(hba1c[order]) for order in new_order], capsize=5)
ax1.set_ylim((- 0.5), sim.shape[0])
if xlim_hba1c:
ax1.set_xlim(*xlim_hba1c)
ax1.set_xlabel(label_hba1c)
ax1.axes.get_yaxis().set_visible(False)
position = ax0.axes.get_position()
print(position, position.y0)
if (figsiz[0] < 5):
k = 0.05
elif (figsiz[0] < 10):
k = 0.02
else:
k = 0.01
if colorbar:
cbax = fig.add_axes([xbar, position.y0, k, (position.y1 - position.y0)])
fig.colorbar(im, cax=cbax)
fig.subplots_adjust(wspace=0.0) | def plot_similitude_matrix_clusters(similitude, d, hba1c='no_fig', parameters_txt_xlim=[(- 6), (- 7)], wratio=8, xbar=0.92, label_hba1c='HbA1c', xlab_acc='Nodes in scope', ylab_acc='Patients', figsiz=(20, 13), xlim_hba1c=[], colormap='viridis', subset_sp=[], hba1c_barplot_width=15, colorbar=True):
"\n Heatmap of common node similarity by pair of species, with species in the order\n of clusters (contrasts to the other heatmap in the order of temperature classes).\n A colored line at the top gives the cluster\n\n Careful! Cluster order may vary (may not be the same as temperature classes\n even when there is a correspondance) : the cluster more or less corresponding\n to the first temperature class may not be the first one (and therefore won't\n be the first one in plot neither)\n "
if subset_sp:
d2 = {}
for k in d.keys():
d2[k] = np.array([sp for sp in d[k] if (sp in subset_sp)])
else:
d2 = d
(sim, new_order) = reorder_matrix(similitude, d2)
colors = ['k', 'mediumseagreen', 'dodgerblue', 'crimson', 'lightgrey', 'yellow', 'peru', 'magenta', 'b', 'darkorchid', 'brown', 'olive', 'wheat', 'purple', 'cadetblue', 'pink', 'red', 'grey', 'turquoise', 'lime', 'orange', 'salmon', 'cyan', 'g', 'hotpink', 'tan', 'lavender', 'teal', 'darkorange', 'seagreen']
fig = plt.figure(figsize=figsiz)
if (hba1c != 'no_fig'):
gs = gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[wratio, 1], wspace=0.0, hspace=0.0)
else:
gs = gridspec.GridSpec(nrows=1, ncols=1)
ax0 = plt.subplot(gs[0])
im = ax0.imshow(sim, vmin=0, vmax=1, cmap=colormap)
print(im.axes.get_position(), im.axes.get_position(original=True))
ax0.set_xlabel(xlab_acc)
for tick in ax0.xaxis.get_major_ticks():
tick.label1.set_horizontalalignment('right')
ax0.set_ylabel(ylab_acc)
ax0.set_ylim((- 0.5), sim.shape[0])
plt.subplots_adjust(left=0.07, right=0.9, top=0.98, bottom=0.07)
length = 0
pos = []
for (i, med_class) in enumerate(new_order):
pos.append(((((2 * length) + len(med_class)) - 1) / 2.0))
ax0.plot([(- 1), (- 1)], [length, ((length + len(med_class)) - 1)], lw=4, c=colors[i])
ax0.text(parameters_txt_xlim[0], ((((2 * length) + len(med_class)) - 1) / 2.0), ('Cl.' + str(i)), fontsize=10, fontweight='bold', color=colors[i])
if (i < (len(new_order) - 1)):
ax0.plot([(- 1), sim.shape[1]], [((length + len(med_class)) - 0.5), ((length + len(med_class)) - 0.5)], c='white')
length += len(med_class)
ax0.set_xlim(parameters_txt_xlim[1], sim.shape[1])
if (hba1c != 'no_fig'):
hba1c = np.array(hba1c)
ax1 = plt.subplot(gs[1], sharey=ax0)
ax1.barh(width=[np.mean(hba1c[order]) for order in new_order], y=pos, height=[hba1c_barplot_width for _ in new_order], xerr=[np.std(hba1c[order]) for order in new_order], capsize=5)
ax1.set_ylim((- 0.5), sim.shape[0])
if xlim_hba1c:
ax1.set_xlim(*xlim_hba1c)
ax1.set_xlabel(label_hba1c)
ax1.axes.get_yaxis().set_visible(False)
position = ax0.axes.get_position()
print(position, position.y0)
if (figsiz[0] < 5):
k = 0.05
elif (figsiz[0] < 10):
k = 0.02
else:
k = 0.01
if colorbar:
cbax = fig.add_axes([xbar, position.y0, k, (position.y1 - position.y0)])
fig.colorbar(im, cax=cbax)
fig.subplots_adjust(wspace=0.0)<|docstring|>Heatmap of common node similarity by pair of species, with species in the order
of clusters (contrasts to the other heatmap in the order of temperature classes).
A colored line at the top gives the cluster
Careful! Cluster order may vary (may not be the same as temperature classes
even when there is a correspondance) : the cluster more or less corresponding
to the first temperature class may not be the first one (and therefore won't
be the first one in plot neither)<|endoftext|> |
e4a3bb3cd0e88f42bf9d1162747c51a4f9a369e5f56f53ee77e0a22f4feb8432 | def get_domain(codes):
'From list of KEGG codes, get organism domain'
domain = []
orgs = k.list('organism')
for code in codes:
hit = orgs.find((('\t' + code) + '\t'))
if (hit == (- 1)):
logger.error(('Did not find species %s' % code))
domain.append(None)
else:
dom = orgs[hit:].split('\n')[0].split('\t')[3].split(';')[1]
domain.append(dom)
return domain | From list of KEGG codes, get organism domain | utils_general.py | get_domain | AWebZen/FunctionalPrediction5000species | 1 | python | def get_domain(codes):
domain = []
orgs = k.list('organism')
for code in codes:
hit = orgs.find((('\t' + code) + '\t'))
if (hit == (- 1)):
logger.error(('Did not find species %s' % code))
domain.append(None)
else:
dom = orgs[hit:].split('\n')[0].split('\t')[3].split(';')[1]
domain.append(dom)
return domain | def get_domain(codes):
domain = []
orgs = k.list('organism')
for code in codes:
hit = orgs.find((('\t' + code) + '\t'))
if (hit == (- 1)):
logger.error(('Did not find species %s' % code))
domain.append(None)
else:
dom = orgs[hit:].split('\n')[0].split('\t')[3].split(';')[1]
domain.append(dom)
return domain<|docstring|>From list of KEGG codes, get organism domain<|endoftext|> |
be2ebb19f5dffddf9ad2a6e0c9dabc5b5ee61471baebeb2646fcfd0bdc9c87a8 | def get_pathways_from_nodes(nodes):
'\n Get KEGG pathways associated to nodes in list. Nodes are compound codes and EC codes.\n :param nodes: list of node names\n :return:\n\n '
global k
nodes = [nod.split('_')[0] for nod in nodes]
pathways = {}
for (p, cpd) in enumerate(nodes):
print(cpd)
ok = False
txt = k.get(('ec:' + cpd))
try:
int(txt)
txt = k.get(('cpd:' + cpd))
try:
int(txt)
except ValueError:
ok = True
txt = txt.split('\n')
except ValueError:
ok = True
txt = txt.split('\n')
if ok:
pathways[cpd] = _parse_get_pathway(txt)
else:
print(cpd, 'did not work')
try:
c = collections.Counter(np.concatenate(pathways.values())[(:, 1)])
except ValueError:
pathways_without = {k: v for (k, v) in pathways.items() if (len(v) > 0)}
c = collections.Counter(np.concatenate(pathways_without.values())[(:, 1)])
return (c, pathways) | Get KEGG pathways associated to nodes in list. Nodes are compound codes and EC codes.
:param nodes: list of node names
:return: | utils_general.py | get_pathways_from_nodes | AWebZen/FunctionalPrediction5000species | 1 | python | def get_pathways_from_nodes(nodes):
'\n Get KEGG pathways associated to nodes in list. Nodes are compound codes and EC codes.\n :param nodes: list of node names\n :return:\n\n '
global k
nodes = [nod.split('_')[0] for nod in nodes]
pathways = {}
for (p, cpd) in enumerate(nodes):
print(cpd)
ok = False
txt = k.get(('ec:' + cpd))
try:
int(txt)
txt = k.get(('cpd:' + cpd))
try:
int(txt)
except ValueError:
ok = True
txt = txt.split('\n')
except ValueError:
ok = True
txt = txt.split('\n')
if ok:
pathways[cpd] = _parse_get_pathway(txt)
else:
print(cpd, 'did not work')
try:
c = collections.Counter(np.concatenate(pathways.values())[(:, 1)])
except ValueError:
pathways_without = {k: v for (k, v) in pathways.items() if (len(v) > 0)}
c = collections.Counter(np.concatenate(pathways_without.values())[(:, 1)])
return (c, pathways) | def get_pathways_from_nodes(nodes):
'\n Get KEGG pathways associated to nodes in list. Nodes are compound codes and EC codes.\n :param nodes: list of node names\n :return:\n\n '
global k
nodes = [nod.split('_')[0] for nod in nodes]
pathways = {}
for (p, cpd) in enumerate(nodes):
print(cpd)
ok = False
txt = k.get(('ec:' + cpd))
try:
int(txt)
txt = k.get(('cpd:' + cpd))
try:
int(txt)
except ValueError:
ok = True
txt = txt.split('\n')
except ValueError:
ok = True
txt = txt.split('\n')
if ok:
pathways[cpd] = _parse_get_pathway(txt)
else:
print(cpd, 'did not work')
try:
c = collections.Counter(np.concatenate(pathways.values())[(:, 1)])
except ValueError:
pathways_without = {k: v for (k, v) in pathways.items() if (len(v) > 0)}
c = collections.Counter(np.concatenate(pathways_without.values())[(:, 1)])
return (c, pathways)<|docstring|>Get KEGG pathways associated to nodes in list. Nodes are compound codes and EC codes.
:param nodes: list of node names
:return:<|endoftext|> |
1e2b8d38b66e0ca1937a72cc222210e8fa0644468457e75b27d63866bd8c3634 | def parse_column_categories(self, categories):
'For categories with columns separated by a double space, a list of\n each column values will be created.'
for cat in categories:
if (not (cat in self.entry.keys())):
continue
values = [[val] for val in self.entry[cat][0].split(' ')]
for val in self.entry[cat][1:]:
for (i, col_val) in enumerate(val.split(' ')):
values[i].append(col_val)
self.entry[cat] = values | For categories with columns separated by a double space, a list of
each column values will be created. | utils_general.py | parse_column_categories | AWebZen/FunctionalPrediction5000species | 1 | python | def parse_column_categories(self, categories):
'For categories with columns separated by a double space, a list of\n each column values will be created.'
for cat in categories:
if (not (cat in self.entry.keys())):
continue
values = [[val] for val in self.entry[cat][0].split(' ')]
for val in self.entry[cat][1:]:
for (i, col_val) in enumerate(val.split(' ')):
values[i].append(col_val)
self.entry[cat] = values | def parse_column_categories(self, categories):
'For categories with columns separated by a double space, a list of\n each column values will be created.'
for cat in categories:
if (not (cat in self.entry.keys())):
continue
values = [[val] for val in self.entry[cat][0].split(' ')]
for val in self.entry[cat][1:]:
for (i, col_val) in enumerate(val.split(' ')):
values[i].append(col_val)
self.entry[cat] = values<|docstring|>For categories with columns separated by a double space, a list of
each column values will be created.<|endoftext|> |
940801ad5a635647f9aefb2b5da69726682ff2502bcd9c4e4f07325d52332842 | def __init__(self, dbfile, engine='SQLite', user=None, password=None, host='localhost', LOG=fLOG, attach=None):
'\n\n @param dbfile database file (use ``:memory:`` to avoid creating a file and using only memory)\n it can also contain several files separated by ;\n ``name_file ; nickname,second_file ; ...``\n @param engine SQLite or MySQL (if it is installed)\n @param user user if needed\n @param password password if needed\n @param host to connect to a MSSQL database\n @param LOG LOG function\n @param attach dictionary: { nickname: filename }, list of database to attach\n\n @warning If the folder does not exist, it will be created\n '
DatabaseJoinGroup.__init__(self)
DatabaseCore.__init__(self, sql_file=dbfile, engine=engine, user=user, password=password, host=host, LOG=LOG, attach=attach) | @param dbfile database file (use ``:memory:`` to avoid creating a file and using only memory)
it can also contain several files separated by ;
``name_file ; nickname,second_file ; ...``
@param engine SQLite or MySQL (if it is installed)
@param user user if needed
@param password password if needed
@param host to connect to a MSSQL database
@param LOG LOG function
@param attach dictionary: { nickname: filename }, list of database to attach
@warning If the folder does not exist, it will be created | src/pyensae/sql/database_main.py | __init__ | sdpython/pyensae | 28 | python | def __init__(self, dbfile, engine='SQLite', user=None, password=None, host='localhost', LOG=fLOG, attach=None):
'\n\n @param dbfile database file (use ``:memory:`` to avoid creating a file and using only memory)\n it can also contain several files separated by ;\n ``name_file ; nickname,second_file ; ...``\n @param engine SQLite or MySQL (if it is installed)\n @param user user if needed\n @param password password if needed\n @param host to connect to a MSSQL database\n @param LOG LOG function\n @param attach dictionary: { nickname: filename }, list of database to attach\n\n @warning If the folder does not exist, it will be created\n '
DatabaseJoinGroup.__init__(self)
DatabaseCore.__init__(self, sql_file=dbfile, engine=engine, user=user, password=password, host=host, LOG=LOG, attach=attach) | def __init__(self, dbfile, engine='SQLite', user=None, password=None, host='localhost', LOG=fLOG, attach=None):
'\n\n @param dbfile database file (use ``:memory:`` to avoid creating a file and using only memory)\n it can also contain several files separated by ;\n ``name_file ; nickname,second_file ; ...``\n @param engine SQLite or MySQL (if it is installed)\n @param user user if needed\n @param password password if needed\n @param host to connect to a MSSQL database\n @param LOG LOG function\n @param attach dictionary: { nickname: filename }, list of database to attach\n\n @warning If the folder does not exist, it will be created\n '
DatabaseJoinGroup.__init__(self)
DatabaseCore.__init__(self, sql_file=dbfile, engine=engine, user=user, password=password, host=host, LOG=LOG, attach=attach)<|docstring|>@param dbfile database file (use ``:memory:`` to avoid creating a file and using only memory)
it can also contain several files separated by ;
``name_file ; nickname,second_file ; ...``
@param engine SQLite or MySQL (if it is installed)
@param user user if needed
@param password password if needed
@param host to connect to a MSSQL database
@param LOG LOG function
@param attach dictionary: { nickname: filename }, list of database to attach
@warning If the folder does not exist, it will be created<|endoftext|> |
07c5ddfae662396ed33ccdded51a1e29703cc04f2d936b5a23483614ae28064d | @staticmethod
def schema_database(df, add_id=True):
'\n Returns the schema for a database which would contains this database.\n\n @param df pandas DataFrame\n @param add_id if True, adds an index "PRIMARYKEY"\n @return dictionary { index_column: (name, type) }\n '
schema = {i: (l, str) for (i, l) in enumerate(df.columns)}
if (add_id is not None):
if isinstance(add_id, bool):
if add_id:
add_id = 'PRIMARYKEY'
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
else:
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
if (len(df) > 0):
for (i, v) in enumerate(df.values[0]):
if (not isinstance(v, str)):
schema[i] = (schema[i][0], type(v))
return schema | Returns the schema for a database which would contains this database.
@param df pandas DataFrame
@param add_id if True, adds an index "PRIMARYKEY"
@return dictionary { index_column: (name, type) } | src/pyensae/sql/database_main.py | schema_database | sdpython/pyensae | 28 | python | @staticmethod
def schema_database(df, add_id=True):
'\n Returns the schema for a database which would contains this database.\n\n @param df pandas DataFrame\n @param add_id if True, adds an index "PRIMARYKEY"\n @return dictionary { index_column: (name, type) }\n '
schema = {i: (l, str) for (i, l) in enumerate(df.columns)}
if (add_id is not None):
if isinstance(add_id, bool):
if add_id:
add_id = 'PRIMARYKEY'
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
else:
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
if (len(df) > 0):
for (i, v) in enumerate(df.values[0]):
if (not isinstance(v, str)):
schema[i] = (schema[i][0], type(v))
return schema | @staticmethod
def schema_database(df, add_id=True):
'\n Returns the schema for a database which would contains this database.\n\n @param df pandas DataFrame\n @param add_id if True, adds an index "PRIMARYKEY"\n @return dictionary { index_column: (name, type) }\n '
schema = {i: (l, str) for (i, l) in enumerate(df.columns)}
if (add_id is not None):
if isinstance(add_id, bool):
if add_id:
add_id = 'PRIMARYKEY'
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
else:
schema[(- 1)] = (add_id, int, 'PRIMARYKEY', 'AUTOINCREMENT')
if (len(df) > 0):
for (i, v) in enumerate(df.values[0]):
if (not isinstance(v, str)):
schema[i] = (schema[i][0], type(v))
return schema<|docstring|>Returns the schema for a database which would contains this database.
@param df pandas DataFrame
@param add_id if True, adds an index "PRIMARYKEY"
@return dictionary { index_column: (name, type) }<|endoftext|> |
d55b8ae44c3754d1f07903841b28d7fbd62660155d99dbc52ec9541da14758fe | @staticmethod
def fill_sql_table(df, filename_or_database, tablename, add_id='idr', **kwargs):
'\n Returns a Database object, creates the database if it does not exists,\n same for the table.\n\n @param df pandas DataFrame\n @param filename_or_database filename or Database object,\n in that second case, we assume method connect was called before\n @param tablename table name\n @param add_id if != None then the function adds an id, it first takes the\n ``max(id)`` and goes on incrementing it\n @param kwargs sent to @see cl Database\n @return ``Database`` object (new or the one from the parameters),\n in both case, the database is not disconnected\n\n .. exref::\n :title: import a DataFrame into a SQL table\n :tag: SQL\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database.fill_sql_table(df, dbf, "mytable")\n\n This example could be replaced by:\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database(dbf)\n db.connect()\n db.import_dataframe(df, "mytable)\n db.close()\n '
schema = Database.schema_database(df, add_id)
if isinstance(filename_or_database, str):
db = Database(filename_or_database, **kwargs)
db.connect()
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
else:
db = filename_or_database
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
return db | Returns a Database object, creates the database if it does not exists,
same for the table.
@param df pandas DataFrame
@param filename_or_database filename or Database object,
in that second case, we assume method connect was called before
@param tablename table name
@param add_id if != None then the function adds an id, it first takes the
``max(id)`` and goes on incrementing it
@param kwargs sent to @see cl Database
@return ``Database`` object (new or the one from the parameters),
in both case, the database is not disconnected
.. exref::
:title: import a DataFrame into a SQL table
:tag: SQL
::
values = [ {"name":"A", "age":10, "score":34.5 },
{"name":"B", "age":20, "score":-34.5 }, ]
df = pandas.DataFrame(values)
dbf = "something.db3"
db = Database.fill_sql_table(df, dbf, "mytable")
This example could be replaced by:
::
values = [ {"name":"A", "age":10, "score":34.5 },
{"name":"B", "age":20, "score":-34.5 }, ]
df = pandas.DataFrame(values)
dbf = "something.db3"
db = Database(dbf)
db.connect()
db.import_dataframe(df, "mytable)
db.close() | src/pyensae/sql/database_main.py | fill_sql_table | sdpython/pyensae | 28 | python | @staticmethod
def fill_sql_table(df, filename_or_database, tablename, add_id='idr', **kwargs):
'\n Returns a Database object, creates the database if it does not exists,\n same for the table.\n\n @param df pandas DataFrame\n @param filename_or_database filename or Database object,\n in that second case, we assume method connect was called before\n @param tablename table name\n @param add_id if != None then the function adds an id, it first takes the\n ``max(id)`` and goes on incrementing it\n @param kwargs sent to @see cl Database\n @return ``Database`` object (new or the one from the parameters),\n in both case, the database is not disconnected\n\n .. exref::\n :title: import a DataFrame into a SQL table\n :tag: SQL\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database.fill_sql_table(df, dbf, "mytable")\n\n This example could be replaced by:\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database(dbf)\n db.connect()\n db.import_dataframe(df, "mytable)\n db.close()\n '
schema = Database.schema_database(df, add_id)
if isinstance(filename_or_database, str):
db = Database(filename_or_database, **kwargs)
db.connect()
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
else:
db = filename_or_database
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
return db | @staticmethod
def fill_sql_table(df, filename_or_database, tablename, add_id='idr', **kwargs):
'\n Returns a Database object, creates the database if it does not exists,\n same for the table.\n\n @param df pandas DataFrame\n @param filename_or_database filename or Database object,\n in that second case, we assume method connect was called before\n @param tablename table name\n @param add_id if != None then the function adds an id, it first takes the\n ``max(id)`` and goes on incrementing it\n @param kwargs sent to @see cl Database\n @return ``Database`` object (new or the one from the parameters),\n in both case, the database is not disconnected\n\n .. exref::\n :title: import a DataFrame into a SQL table\n :tag: SQL\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database.fill_sql_table(df, dbf, "mytable")\n\n This example could be replaced by:\n\n ::\n\n values = [ {"name":"A", "age":10, "score":34.5 },\n {"name":"B", "age":20, "score":-34.5 }, ]\n df = pandas.DataFrame(values)\n dbf = "something.db3"\n db = Database(dbf)\n db.connect()\n db.import_dataframe(df, "mytable)\n db.close()\n '
schema = Database.schema_database(df, add_id)
if isinstance(filename_or_database, str):
db = Database(filename_or_database, **kwargs)
db.connect()
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
else:
db = filename_or_database
if (tablename not in db.get_table_list()):
cursor = db.create_table(tablename, schema)
db.append_values(df.values, tablename, schema, cursor=cursor)
else:
db.append_values(df.values, tablename, schema)
return db<|docstring|>Returns a Database object, creates the database if it does not exists,
same for the table.
@param df pandas DataFrame
@param filename_or_database filename or Database object,
in that second case, we assume method connect was called before
@param tablename table name
@param add_id if != None then the function adds an id, it first takes the
``max(id)`` and goes on incrementing it
@param kwargs sent to @see cl Database
@return ``Database`` object (new or the one from the parameters),
in both case, the database is not disconnected
.. exref::
:title: import a DataFrame into a SQL table
:tag: SQL
::
values = [ {"name":"A", "age":10, "score":34.5 },
{"name":"B", "age":20, "score":-34.5 }, ]
df = pandas.DataFrame(values)
dbf = "something.db3"
db = Database.fill_sql_table(df, dbf, "mytable")
This example could be replaced by:
::
values = [ {"name":"A", "age":10, "score":34.5 },
{"name":"B", "age":20, "score":-34.5 }, ]
df = pandas.DataFrame(values)
dbf = "something.db3"
db = Database(dbf)
db.connect()
db.import_dataframe(df, "mytable)
db.close()<|endoftext|> |
8f6c698eb384c12af6b580a645158f79a42a2f3790b6887600f6f3c0feea5ea8 | def import_dataframe(self, df, tablename, add_id='idr'):
'\n Imports a DataFrame into a table.\n\n @param df pandas DataFrame\n @param tablename table name\n @param add_id an index, maybe to be added\n @return self\n '
return Database.fill_sql_table(df, self, tablename, add_id) | Imports a DataFrame into a table.
@param df pandas DataFrame
@param tablename table name
@param add_id an index, maybe to be added
@return self | src/pyensae/sql/database_main.py | import_dataframe | sdpython/pyensae | 28 | python | def import_dataframe(self, df, tablename, add_id='idr'):
'\n Imports a DataFrame into a table.\n\n @param df pandas DataFrame\n @param tablename table name\n @param add_id an index, maybe to be added\n @return self\n '
return Database.fill_sql_table(df, self, tablename, add_id) | def import_dataframe(self, df, tablename, add_id='idr'):
'\n Imports a DataFrame into a table.\n\n @param df pandas DataFrame\n @param tablename table name\n @param add_id an index, maybe to be added\n @return self\n '
return Database.fill_sql_table(df, self, tablename, add_id)<|docstring|>Imports a DataFrame into a table.
@param df pandas DataFrame
@param tablename table name
@param add_id an index, maybe to be added
@return self<|endoftext|> |
12833909d6000b897c88b8b2333df0ef57f260880001fdf8f5beee78d53e0d5e | def to_df(self, request):
'\n Converts a SQL request into a :epkg:`pandas:Dataframe`.\n\n @param request SQL request\n @return DataFrame\n '
import pandas
cols = self.get_sql_columns(request)
iter = self.execute_view(request, nolog=True)
return pandas.DataFrame(iter, columns=cols) | Converts a SQL request into a :epkg:`pandas:Dataframe`.
@param request SQL request
@return DataFrame | src/pyensae/sql/database_main.py | to_df | sdpython/pyensae | 28 | python | def to_df(self, request):
'\n Converts a SQL request into a :epkg:`pandas:Dataframe`.\n\n @param request SQL request\n @return DataFrame\n '
import pandas
cols = self.get_sql_columns(request)
iter = self.execute_view(request, nolog=True)
return pandas.DataFrame(iter, columns=cols) | def to_df(self, request):
'\n Converts a SQL request into a :epkg:`pandas:Dataframe`.\n\n @param request SQL request\n @return DataFrame\n '
import pandas
cols = self.get_sql_columns(request)
iter = self.execute_view(request, nolog=True)
return pandas.DataFrame(iter, columns=cols)<|docstring|>Converts a SQL request into a :epkg:`pandas:Dataframe`.
@param request SQL request
@return DataFrame<|endoftext|> |
f53be4cc112506cb805e0c60a2a253682fb5b7ac818d76ff06eb59f7dd8dfbc6 | def copy_to(self, db, subset=None):
'\n Copies all tables into db, we assume both database are not connected.\n\n @param db another database (possibly empty)\n @param subset list of tables to copy or None for all\n '
self.connect()
db.connect()
for tbl in self.get_table_list():
if ((subset is None) or (tbl in subset)):
self.LOG(('copy_to: create table ' + tbl))
sch = self.get_table_columns_list(tbl, True)
curins = db.create_table(tbl, sch)
cursor = self.execute(('SELECT * FROM %s' % tbl))
buffer = []
for row in cursor:
buffer.append(row)
if (len(buffer) >= 1000):
db.insert(tbl, buffer, cursor=curins)
buffer = []
if (len(buffer) > 0):
db.insert(tbl, buffer)
db.commit()
cursor.close()
self.close()
db.close() | Copies all tables into db, we assume both database are not connected.
@param db another database (possibly empty)
@param subset list of tables to copy or None for all | src/pyensae/sql/database_main.py | copy_to | sdpython/pyensae | 28 | python | def copy_to(self, db, subset=None):
'\n Copies all tables into db, we assume both database are not connected.\n\n @param db another database (possibly empty)\n @param subset list of tables to copy or None for all\n '
self.connect()
db.connect()
for tbl in self.get_table_list():
if ((subset is None) or (tbl in subset)):
self.LOG(('copy_to: create table ' + tbl))
sch = self.get_table_columns_list(tbl, True)
curins = db.create_table(tbl, sch)
cursor = self.execute(('SELECT * FROM %s' % tbl))
buffer = []
for row in cursor:
buffer.append(row)
if (len(buffer) >= 1000):
db.insert(tbl, buffer, cursor=curins)
buffer = []
if (len(buffer) > 0):
db.insert(tbl, buffer)
db.commit()
cursor.close()
self.close()
db.close() | def copy_to(self, db, subset=None):
'\n Copies all tables into db, we assume both database are not connected.\n\n @param db another database (possibly empty)\n @param subset list of tables to copy or None for all\n '
self.connect()
db.connect()
for tbl in self.get_table_list():
if ((subset is None) or (tbl in subset)):
self.LOG(('copy_to: create table ' + tbl))
sch = self.get_table_columns_list(tbl, True)
curins = db.create_table(tbl, sch)
cursor = self.execute(('SELECT * FROM %s' % tbl))
buffer = []
for row in cursor:
buffer.append(row)
if (len(buffer) >= 1000):
db.insert(tbl, buffer, cursor=curins)
buffer = []
if (len(buffer) > 0):
db.insert(tbl, buffer)
db.commit()
cursor.close()
self.close()
db.close()<|docstring|>Copies all tables into db, we assume both database are not connected.
@param db another database (possibly empty)
@param subset list of tables to copy or None for all<|endoftext|> |
835567680020f9b83a5933e436ed5f0c90c6cd03a432df5dcdbf1ed13a027e7d | def _start(queue, stop_event):
'\n Thread target function. Starts the ImageQueue._populate function which runs\n indefinitely until stop_event is set.\n\n Args:\n queue: A reference to the ImageQueue object onto which the threads\n apply.\n stop_event: An even that can be set in the main thread to stop\n population of the ImageQueue\n '
while (not stop_event.is_set()):
queue._populate() | Thread target function. Starts the ImageQueue._populate function which runs
indefinitely until stop_event is set.
Args:
queue: A reference to the ImageQueue object onto which the threads
apply.
stop_event: An even that can be set in the main thread to stop
population of the ImageQueue | mpunet/image/image_queue.py | _start | sandeepsinghsengar/MPUNet2Plus | 156 | python | def _start(queue, stop_event):
'\n Thread target function. Starts the ImageQueue._populate function which runs\n indefinitely until stop_event is set.\n\n Args:\n queue: A reference to the ImageQueue object onto which the threads\n apply.\n stop_event: An even that can be set in the main thread to stop\n population of the ImageQueue\n '
while (not stop_event.is_set()):
queue._populate() | def _start(queue, stop_event):
'\n Thread target function. Starts the ImageQueue._populate function which runs\n indefinitely until stop_event is set.\n\n Args:\n queue: A reference to the ImageQueue object onto which the threads\n apply.\n stop_event: An even that can be set in the main thread to stop\n population of the ImageQueue\n '
while (not stop_event.is_set()):
queue._populate()<|docstring|>Thread target function. Starts the ImageQueue._populate function which runs
indefinitely until stop_event is set.
Args:
queue: A reference to the ImageQueue object onto which the threads
apply.
stop_event: An even that can be set in the main thread to stop
population of the ImageQueue<|endoftext|> |
9c70517bb3b4f3e71f55a92bd778afba869fda4af6bc30b81fc514995317fb2b | def __init__(self, max_queue_size, image_pair_loader, entry_func=None, entry_func_kw=None, exit_func=None, exit_func_kw=None):
'\n Args:\n max_queue_size: Int, the maximum number of ImagePair objects\n to store in the queue at a given time\n image_pair_loader: The ImagePairLoader object from which images are\n fetched.\n entry_func: String giving name of method to call on the\n ImagePair object at queue entry time.\n entry_func_kw: Dict, keyword arguments to supply to entry_func\n exit_func: String giving name of method to call on the\n ImagePair object at queue exit time.\n exit_func_kw: Dict, keyword arguments to supply to exit_func\n '
self.queue = Queue(maxsize=max_queue_size)
self.image_pair_loader = image_pair_loader
self.load_new_prob = 1.0
self.entry_func = (entry_func, (entry_func_kw or {}))
self.exit_func = (exit_func, (exit_func_kw or {}))
self.threads = []
self.items_in_queue = 0
self._last = 0
self.no_new_counter = 0
self.num_times_in_queue = {image: 0 for image in self.image_pair_loader} | Args:
max_queue_size: Int, the maximum number of ImagePair objects
to store in the queue at a given time
image_pair_loader: The ImagePairLoader object from which images are
fetched.
entry_func: String giving name of method to call on the
ImagePair object at queue entry time.
entry_func_kw: Dict, keyword arguments to supply to entry_func
exit_func: String giving name of method to call on the
ImagePair object at queue exit time.
exit_func_kw: Dict, keyword arguments to supply to exit_func | mpunet/image/image_queue.py | __init__ | sandeepsinghsengar/MPUNet2Plus | 156 | python | def __init__(self, max_queue_size, image_pair_loader, entry_func=None, entry_func_kw=None, exit_func=None, exit_func_kw=None):
'\n Args:\n max_queue_size: Int, the maximum number of ImagePair objects\n to store in the queue at a given time\n image_pair_loader: The ImagePairLoader object from which images are\n fetched.\n entry_func: String giving name of method to call on the\n ImagePair object at queue entry time.\n entry_func_kw: Dict, keyword arguments to supply to entry_func\n exit_func: String giving name of method to call on the\n ImagePair object at queue exit time.\n exit_func_kw: Dict, keyword arguments to supply to exit_func\n '
self.queue = Queue(maxsize=max_queue_size)
self.image_pair_loader = image_pair_loader
self.load_new_prob = 1.0
self.entry_func = (entry_func, (entry_func_kw or {}))
self.exit_func = (exit_func, (exit_func_kw or {}))
self.threads = []
self.items_in_queue = 0
self._last = 0
self.no_new_counter = 0
self.num_times_in_queue = {image: 0 for image in self.image_pair_loader} | def __init__(self, max_queue_size, image_pair_loader, entry_func=None, entry_func_kw=None, exit_func=None, exit_func_kw=None):
'\n Args:\n max_queue_size: Int, the maximum number of ImagePair objects\n to store in the queue at a given time\n image_pair_loader: The ImagePairLoader object from which images are\n fetched.\n entry_func: String giving name of method to call on the\n ImagePair object at queue entry time.\n entry_func_kw: Dict, keyword arguments to supply to entry_func\n exit_func: String giving name of method to call on the\n ImagePair object at queue exit time.\n exit_func_kw: Dict, keyword arguments to supply to exit_func\n '
self.queue = Queue(maxsize=max_queue_size)
self.image_pair_loader = image_pair_loader
self.load_new_prob = 1.0
self.entry_func = (entry_func, (entry_func_kw or {}))
self.exit_func = (exit_func, (exit_func_kw or {}))
self.threads = []
self.items_in_queue = 0
self._last = 0
self.no_new_counter = 0
self.num_times_in_queue = {image: 0 for image in self.image_pair_loader}<|docstring|>Args:
max_queue_size: Int, the maximum number of ImagePair objects
to store in the queue at a given time
image_pair_loader: The ImagePairLoader object from which images are
fetched.
entry_func: String giving name of method to call on the
ImagePair object at queue entry time.
entry_func_kw: Dict, keyword arguments to supply to entry_func
exit_func: String giving name of method to call on the
ImagePair object at queue exit time.
exit_func_kw: Dict, keyword arguments to supply to exit_func<|endoftext|> |
958e3e8a0cf17831289c65c4481692a6075b12f0cf49b6a06f30baf52f6ed641 | def wait_N(self, N):
'\n Sleep until N images has been added to the queue\n\n Args:\n N: Int, number of images to wait for\n '
cur = self.items_in_queue
while (self.items_in_queue < ((cur + N) - 1)):
time.sleep(1) | Sleep until N images has been added to the queue
Args:
N: Int, number of images to wait for | mpunet/image/image_queue.py | wait_N | sandeepsinghsengar/MPUNet2Plus | 156 | python | def wait_N(self, N):
'\n Sleep until N images has been added to the queue\n\n Args:\n N: Int, number of images to wait for\n '
cur = self.items_in_queue
while (self.items_in_queue < ((cur + N) - 1)):
time.sleep(1) | def wait_N(self, N):
'\n Sleep until N images has been added to the queue\n\n Args:\n N: Int, number of images to wait for\n '
cur = self.items_in_queue
while (self.items_in_queue < ((cur + N) - 1)):
time.sleep(1)<|docstring|>Sleep until N images has been added to the queue
Args:
N: Int, number of images to wait for<|endoftext|> |
fb7b3ba29b934afce7f22d942dcfce4fb42a0ecfcea3bd94ec0ed6588fd45ca7 | @contextmanager
def get(self):
'\n Context manager method pulling an image from the queue and yielding it\n At yield return time the exit_func is called upon the image unless it\n has another reference later in the queue\n\n yields:\n an ImagePair from the queue\n '
if (self.items_in_queue < (0.1 * self.queue.maxsize)):
self.wait_N(N=3)
image = self.queue.get()
if self._last:
diff = (self.items_in_queue - self._last)
if ((diff > 0) or (self.items_in_queue >= (self.queue.maxsize - 1))):
self.load_new_prob *= 1.05
elif (diff < 0):
self.load_new_prob *= 0.95
else:
self._last = self.items_in_queue
(yield image)
self.items_in_queue -= 1
self.num_times_in_queue[image] -= 1
if (self.num_times_in_queue[image] == 0):
getattr(image, self.exit_func[0])(**self.exit_func[1])
image.load_state = None | Context manager method pulling an image from the queue and yielding it
At yield return time the exit_func is called upon the image unless it
has another reference later in the queue
yields:
an ImagePair from the queue | mpunet/image/image_queue.py | get | sandeepsinghsengar/MPUNet2Plus | 156 | python | @contextmanager
def get(self):
'\n Context manager method pulling an image from the queue and yielding it\n At yield return time the exit_func is called upon the image unless it\n has another reference later in the queue\n\n yields:\n an ImagePair from the queue\n '
if (self.items_in_queue < (0.1 * self.queue.maxsize)):
self.wait_N(N=3)
image = self.queue.get()
if self._last:
diff = (self.items_in_queue - self._last)
if ((diff > 0) or (self.items_in_queue >= (self.queue.maxsize - 1))):
self.load_new_prob *= 1.05
elif (diff < 0):
self.load_new_prob *= 0.95
else:
self._last = self.items_in_queue
(yield image)
self.items_in_queue -= 1
self.num_times_in_queue[image] -= 1
if (self.num_times_in_queue[image] == 0):
getattr(image, self.exit_func[0])(**self.exit_func[1])
image.load_state = None | @contextmanager
def get(self):
'\n Context manager method pulling an image from the queue and yielding it\n At yield return time the exit_func is called upon the image unless it\n has another reference later in the queue\n\n yields:\n an ImagePair from the queue\n '
if (self.items_in_queue < (0.1 * self.queue.maxsize)):
self.wait_N(N=3)
image = self.queue.get()
if self._last:
diff = (self.items_in_queue - self._last)
if ((diff > 0) or (self.items_in_queue >= (self.queue.maxsize - 1))):
self.load_new_prob *= 1.05
elif (diff < 0):
self.load_new_prob *= 0.95
else:
self._last = self.items_in_queue
(yield image)
self.items_in_queue -= 1
self.num_times_in_queue[image] -= 1
if (self.num_times_in_queue[image] == 0):
getattr(image, self.exit_func[0])(**self.exit_func[1])
image.load_state = None<|docstring|>Context manager method pulling an image from the queue and yielding it
At yield return time the exit_func is called upon the image unless it
has another reference later in the queue
yields:
an ImagePair from the queue<|endoftext|> |
91f3da00fa3070b43280f742532571857d73b1a55b7bc1942ebb77e09e533d93 | def start(self, n_threads=3):
'\n Start populating the queue in n_threads\n\n Args:\n n_threads: Number of threads to spin up\n '
for _ in range(n_threads):
stop_event = Event()
thread = Thread(target=_start, args=(self, stop_event))
thread.start()
self.threads.append((thread, stop_event)) | Start populating the queue in n_threads
Args:
n_threads: Number of threads to spin up | mpunet/image/image_queue.py | start | sandeepsinghsengar/MPUNet2Plus | 156 | python | def start(self, n_threads=3):
'\n Start populating the queue in n_threads\n\n Args:\n n_threads: Number of threads to spin up\n '
for _ in range(n_threads):
stop_event = Event()
thread = Thread(target=_start, args=(self, stop_event))
thread.start()
self.threads.append((thread, stop_event)) | def start(self, n_threads=3):
'\n Start populating the queue in n_threads\n\n Args:\n n_threads: Number of threads to spin up\n '
for _ in range(n_threads):
stop_event = Event()
thread = Thread(target=_start, args=(self, stop_event))
thread.start()
self.threads.append((thread, stop_event))<|docstring|>Start populating the queue in n_threads
Args:
n_threads: Number of threads to spin up<|endoftext|> |
dd79d35195f4d887a2c0d32ee9d39d54343f8ec0ac6763206ee9b71406da1ec9 | def stop(self):
'\n Stop populating the queue by invoking the stop event on all threads and\n wait for them to terminate.\n '
print(('Stopping %i threads' % len(self.threads)))
for (_, event) in self.threads:
event.set()
for (i, (t, _)) in enumerate(self.threads):
print((' %i/%i' % ((i + 1), len(self.threads))), end='\r', flush=True)
t.join()
print('') | Stop populating the queue by invoking the stop event on all threads and
wait for them to terminate. | mpunet/image/image_queue.py | stop | sandeepsinghsengar/MPUNet2Plus | 156 | python | def stop(self):
'\n Stop populating the queue by invoking the stop event on all threads and\n wait for them to terminate.\n '
print(('Stopping %i threads' % len(self.threads)))
for (_, event) in self.threads:
event.set()
for (i, (t, _)) in enumerate(self.threads):
print((' %i/%i' % ((i + 1), len(self.threads))), end='\r', flush=True)
t.join()
print() | def stop(self):
'\n Stop populating the queue by invoking the stop event on all threads and\n wait for them to terminate.\n '
print(('Stopping %i threads' % len(self.threads)))
for (_, event) in self.threads:
event.set()
for (i, (t, _)) in enumerate(self.threads):
print((' %i/%i' % ((i + 1), len(self.threads))), end='\r', flush=True)
t.join()
print()<|docstring|>Stop populating the queue by invoking the stop event on all threads and
wait for them to terminate.<|endoftext|> |
624181041ed0c48a15ee246de5a1ab2d8e5a5294f73f59fef50308e70cf3fc99 | @property
def unique_in_queue(self):
'\n Returns:\n Int, the current number of unique images in the queue\n '
return sum([bool(m) for m in self.num_times_in_queue.values()]) | Returns:
Int, the current number of unique images in the queue | mpunet/image/image_queue.py | unique_in_queue | sandeepsinghsengar/MPUNet2Plus | 156 | python | @property
def unique_in_queue(self):
'\n Returns:\n Int, the current number of unique images in the queue\n '
return sum([bool(m) for m in self.num_times_in_queue.values()]) | @property
def unique_in_queue(self):
'\n Returns:\n Int, the current number of unique images in the queue\n '
return sum([bool(m) for m in self.num_times_in_queue.values()])<|docstring|>Returns:
Int, the current number of unique images in the queue<|endoftext|> |
7f0516d84c018b82dbad0bfcae88eed8b49ea7363594590f193ae3aa571f0aa1 | def await_full(self):
'\n Halt main thread until queue object is populated to its max capacity\n '
while (self.items_in_queue < self.queue.maxsize):
print((' Data queue being populated %i/%i' % (self.items_in_queue, self.queue.maxsize)), end='\r', flush=True)
time.sleep(1) | Halt main thread until queue object is populated to its max capacity | mpunet/image/image_queue.py | await_full | sandeepsinghsengar/MPUNet2Plus | 156 | python | def await_full(self):
'\n \n '
while (self.items_in_queue < self.queue.maxsize):
print((' Data queue being populated %i/%i' % (self.items_in_queue, self.queue.maxsize)), end='\r', flush=True)
time.sleep(1) | def await_full(self):
'\n \n '
while (self.items_in_queue < self.queue.maxsize):
print((' Data queue being populated %i/%i' % (self.items_in_queue, self.queue.maxsize)), end='\r', flush=True)
time.sleep(1)<|docstring|>Halt main thread until queue object is populated to its max capacity<|endoftext|> |
54626c612d7d31f073866e636f7811d386e2682b7e96d96b993d660df7c74a59 | def _populate(self):
'\n Puts a random image into the queue. The ImagePair is either taken from\n the ImagePairLoader in an un-loaded state or from the already loaded,\n processed images stored in the current queue.\n\n This method should be continuously invoked from one of more threads\n to maintain a populated queue.\n '
load_new = ((np.random.rand() < self.load_new_prob) or (self.unique_in_queue < (0.2 * self.queue.maxsize)))
found = False
while (not found):
image = self.image_pair_loader.images[np.random.randint(len(self.image_pair_loader))]
already_loaded = bool(self.num_times_in_queue[image])
found = (load_new != already_loaded)
self.num_times_in_queue[image] += 1
if (getattr(image, 'load_state', None) != self.entry_func[0]):
image.load_state = self.entry_func[0]
getattr(image, self.entry_func[0])(**self.entry_func[1])
self.queue.put(image, block=True, timeout=None)
self.items_in_queue += 1 | Puts a random image into the queue. The ImagePair is either taken from
the ImagePairLoader in an un-loaded state or from the already loaded,
processed images stored in the current queue.
This method should be continuously invoked from one of more threads
to maintain a populated queue. | mpunet/image/image_queue.py | _populate | sandeepsinghsengar/MPUNet2Plus | 156 | python | def _populate(self):
'\n Puts a random image into the queue. The ImagePair is either taken from\n the ImagePairLoader in an un-loaded state or from the already loaded,\n processed images stored in the current queue.\n\n This method should be continuously invoked from one of more threads\n to maintain a populated queue.\n '
load_new = ((np.random.rand() < self.load_new_prob) or (self.unique_in_queue < (0.2 * self.queue.maxsize)))
found = False
while (not found):
image = self.image_pair_loader.images[np.random.randint(len(self.image_pair_loader))]
already_loaded = bool(self.num_times_in_queue[image])
found = (load_new != already_loaded)
self.num_times_in_queue[image] += 1
if (getattr(image, 'load_state', None) != self.entry_func[0]):
image.load_state = self.entry_func[0]
getattr(image, self.entry_func[0])(**self.entry_func[1])
self.queue.put(image, block=True, timeout=None)
self.items_in_queue += 1 | def _populate(self):
'\n Puts a random image into the queue. The ImagePair is either taken from\n the ImagePairLoader in an un-loaded state or from the already loaded,\n processed images stored in the current queue.\n\n This method should be continuously invoked from one of more threads\n to maintain a populated queue.\n '
load_new = ((np.random.rand() < self.load_new_prob) or (self.unique_in_queue < (0.2 * self.queue.maxsize)))
found = False
while (not found):
image = self.image_pair_loader.images[np.random.randint(len(self.image_pair_loader))]
already_loaded = bool(self.num_times_in_queue[image])
found = (load_new != already_loaded)
self.num_times_in_queue[image] += 1
if (getattr(image, 'load_state', None) != self.entry_func[0]):
image.load_state = self.entry_func[0]
getattr(image, self.entry_func[0])(**self.entry_func[1])
self.queue.put(image, block=True, timeout=None)
self.items_in_queue += 1<|docstring|>Puts a random image into the queue. The ImagePair is either taken from
the ImagePairLoader in an un-loaded state or from the already loaded,
processed images stored in the current queue.
This method should be continuously invoked from one of more threads
to maintain a populated queue.<|endoftext|> |
5fb913ac2ca7cedb7870ae9236bc4001d556dc62446e82cbe34e39ba851e7e83 | def check_sha1(filename, sha1_hash):
'Check whether the sha1 hash of the file content matches the expected hash.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n filename : str\n Path to the file.\n sha1_hash : str\n Expected sha1 hash in hexadecimal digits.\n\n Returns\n -------\n bool\n Whether the file content matches the expected hash.\n '
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if (not data):
break
sha1.update(data)
return (sha1.hexdigest() == sha1_hash) | Check whether the sha1 hash of the file content matches the expected hash.
Codes borrowed from mxnet/gluon/utils.py
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash. | knowledgegraphs/utils.py | check_sha1 | YaoShuang-long/KnowledgeGraphDataSets | 1 | python | def check_sha1(filename, sha1_hash):
'Check whether the sha1 hash of the file content matches the expected hash.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n filename : str\n Path to the file.\n sha1_hash : str\n Expected sha1 hash in hexadecimal digits.\n\n Returns\n -------\n bool\n Whether the file content matches the expected hash.\n '
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if (not data):
break
sha1.update(data)
return (sha1.hexdigest() == sha1_hash) | def check_sha1(filename, sha1_hash):
'Check whether the sha1 hash of the file content matches the expected hash.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n filename : str\n Path to the file.\n sha1_hash : str\n Expected sha1 hash in hexadecimal digits.\n\n Returns\n -------\n bool\n Whether the file content matches the expected hash.\n '
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if (not data):
break
sha1.update(data)
return (sha1.hexdigest() == sha1_hash)<|docstring|>Check whether the sha1 hash of the file content matches the expected hash.
Codes borrowed from mxnet/gluon/utils.py
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.<|endoftext|> |
fa3603e6b07ef22fcbf23cb58e3f745155b9ecf2394d03bd69f6dc056c804395 | def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True, log=True):
"Download a given URL.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n url : str\n URL to download.\n path : str, optional\n Destination path to store downloaded file. By default stores to the\n current directory with the same name as in url.\n overwrite : bool, optional\n Whether to overwrite the destination file if it already exists.\n sha1_hash : str, optional\n Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified\n but doesn't match.\n retries : integer, default 5\n The number of times to attempt downloading in case of failure or non 200 return codes.\n verify_ssl : bool, default True\n Verify SSL certificates.\n log : bool, default True\n Whether to print the progress for download\n\n Returns\n -------\n str\n The file path of the downloaded file.\n "
if (path is None):
fname = url.split('/')[(- 1)]
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
assert (retries >= 0), 'Number of retries should be at least 0'
if (not verify_ssl):
warnings.warn('Unverified HTTPS request is being made (verify_ssl=False). Adding certificate verification is strongly advised.')
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
while ((retries + 1) > 0):
try:
if log:
print(('Downloading %s from %s...' % (fname, url)))
r = requests.get(url, stream=True, verify=verify_ssl)
if (r.status_code != 200):
raise RuntimeError(('Failed downloading url %s' % url))
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if (sha1_hash and (not check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if (retries <= 0):
raise e
elif log:
print('download failed, retrying, {} attempt{} left'.format(retries, ('s' if (retries > 1) else '')))
return fname | Download a given URL.
Codes borrowed from mxnet/gluon/utils.py
Parameters
----------
url : str
URL to download.
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with the same name as in url.
overwrite : bool, optional
Whether to overwrite the destination file if it already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt downloading in case of failure or non 200 return codes.
verify_ssl : bool, default True
Verify SSL certificates.
log : bool, default True
Whether to print the progress for download
Returns
-------
str
The file path of the downloaded file. | knowledgegraphs/utils.py | download | YaoShuang-long/KnowledgeGraphDataSets | 1 | python | def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True, log=True):
"Download a given URL.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n url : str\n URL to download.\n path : str, optional\n Destination path to store downloaded file. By default stores to the\n current directory with the same name as in url.\n overwrite : bool, optional\n Whether to overwrite the destination file if it already exists.\n sha1_hash : str, optional\n Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified\n but doesn't match.\n retries : integer, default 5\n The number of times to attempt downloading in case of failure or non 200 return codes.\n verify_ssl : bool, default True\n Verify SSL certificates.\n log : bool, default True\n Whether to print the progress for download\n\n Returns\n -------\n str\n The file path of the downloaded file.\n "
if (path is None):
fname = url.split('/')[(- 1)]
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
assert (retries >= 0), 'Number of retries should be at least 0'
if (not verify_ssl):
warnings.warn('Unverified HTTPS request is being made (verify_ssl=False). Adding certificate verification is strongly advised.')
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
while ((retries + 1) > 0):
try:
if log:
print(('Downloading %s from %s...' % (fname, url)))
r = requests.get(url, stream=True, verify=verify_ssl)
if (r.status_code != 200):
raise RuntimeError(('Failed downloading url %s' % url))
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if (sha1_hash and (not check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if (retries <= 0):
raise e
elif log:
print('download failed, retrying, {} attempt{} left'.format(retries, ('s' if (retries > 1) else )))
return fname | def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True, log=True):
"Download a given URL.\n\n Codes borrowed from mxnet/gluon/utils.py\n\n Parameters\n ----------\n url : str\n URL to download.\n path : str, optional\n Destination path to store downloaded file. By default stores to the\n current directory with the same name as in url.\n overwrite : bool, optional\n Whether to overwrite the destination file if it already exists.\n sha1_hash : str, optional\n Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified\n but doesn't match.\n retries : integer, default 5\n The number of times to attempt downloading in case of failure or non 200 return codes.\n verify_ssl : bool, default True\n Verify SSL certificates.\n log : bool, default True\n Whether to print the progress for download\n\n Returns\n -------\n str\n The file path of the downloaded file.\n "
if (path is None):
fname = url.split('/')[(- 1)]
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
assert (retries >= 0), 'Number of retries should be at least 0'
if (not verify_ssl):
warnings.warn('Unverified HTTPS request is being made (verify_ssl=False). Adding certificate verification is strongly advised.')
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
while ((retries + 1) > 0):
try:
if log:
print(('Downloading %s from %s...' % (fname, url)))
r = requests.get(url, stream=True, verify=verify_ssl)
if (r.status_code != 200):
raise RuntimeError(('Failed downloading url %s' % url))
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if (sha1_hash and (not check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if (retries <= 0):
raise e
elif log:
print('download failed, retrying, {} attempt{} left'.format(retries, ('s' if (retries > 1) else )))
return fname<|docstring|>Download a given URL.
Codes borrowed from mxnet/gluon/utils.py
Parameters
----------
url : str
URL to download.
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with the same name as in url.
overwrite : bool, optional
Whether to overwrite the destination file if it already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt downloading in case of failure or non 200 return codes.
verify_ssl : bool, default True
Verify SSL certificates.
log : bool, default True
Whether to print the progress for download
Returns
-------
str
The file path of the downloaded file.<|endoftext|> |
5b742cd21d1a3d6706930627028b90997d4145dbf1c8338a088a7992d6850bda | def extract_archive(file, target_dir):
'Extract archive file.\n\n Parameters\n ----------\n file : str\n Absolute path of the archive file.\n target_dir : str\n Target directory of the archive to be uncompressed.\n '
if os.path.exists(target_dir):
return
if (file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz')):
archive = tarfile.open(file, 'r')
elif file.endswith('.zip'):
archive = zipfile.ZipFile(file, 'r')
else:
raise Exception(('Unrecognized file type: ' + file))
print('Extracting file to {}'.format(target_dir))
archive.extractall(path=target_dir)
archive.close() | Extract archive file.
Parameters
----------
file : str
Absolute path of the archive file.
target_dir : str
Target directory of the archive to be uncompressed. | knowledgegraphs/utils.py | extract_archive | YaoShuang-long/KnowledgeGraphDataSets | 1 | python | def extract_archive(file, target_dir):
'Extract archive file.\n\n Parameters\n ----------\n file : str\n Absolute path of the archive file.\n target_dir : str\n Target directory of the archive to be uncompressed.\n '
if os.path.exists(target_dir):
return
if (file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz')):
archive = tarfile.open(file, 'r')
elif file.endswith('.zip'):
archive = zipfile.ZipFile(file, 'r')
else:
raise Exception(('Unrecognized file type: ' + file))
print('Extracting file to {}'.format(target_dir))
archive.extractall(path=target_dir)
archive.close() | def extract_archive(file, target_dir):
'Extract archive file.\n\n Parameters\n ----------\n file : str\n Absolute path of the archive file.\n target_dir : str\n Target directory of the archive to be uncompressed.\n '
if os.path.exists(target_dir):
return
if (file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz')):
archive = tarfile.open(file, 'r')
elif file.endswith('.zip'):
archive = zipfile.ZipFile(file, 'r')
else:
raise Exception(('Unrecognized file type: ' + file))
print('Extracting file to {}'.format(target_dir))
archive.extractall(path=target_dir)
archive.close()<|docstring|>Extract archive file.
Parameters
----------
file : str
Absolute path of the archive file.
target_dir : str
Target directory of the archive to be uncompressed.<|endoftext|> |
5dd2599dd71df0080716d4cc8b6b9edbbda3233d48c8d4078c3e4a17700c2efa | @click.group()
def cli():
' SONiC PFC Watchdog ' | SONiC PFC Watchdog | pfcwd/main.py | cli | ljm625/sonic-utilities | 0 | python | @click.group()
def cli():
' ' | @click.group()
def cli():
' '<|docstring|>SONiC PFC Watchdog<|endoftext|> |
2304e95c935f68d31bb3f70efe4c68e3f3e9b107289fd62c4c192e7b717bc31b | @cli.group()
def show():
' Show PFC Watchdog information' | Show PFC Watchdog information | pfcwd/main.py | show | ljm625/sonic-utilities | 0 | python | @cli.group()
def show():
' ' | @cli.group()
def show():
' '<|docstring|>Show PFC Watchdog information<|endoftext|> |
c9a58f76ac6a25444aaa9b564e22f02c35d3950dcfd318ec05955a223ecce1d6 | @show.command()
@multi_asic_util.multi_asic_click_options
@click.option('-e', '--empty', is_flag=True)
@click.argument('queues', nargs=(- 1))
def stats(namespace, display, empty, queues):
' Show PFC Watchdog stats per queue '
if len(queues):
display = constants.DISPLAY_ALL
PfcwdCli(namespace, display).show_stats(empty, queues) | Show PFC Watchdog stats per queue | pfcwd/main.py | stats | ljm625/sonic-utilities | 0 | python | @show.command()
@multi_asic_util.multi_asic_click_options
@click.option('-e', '--empty', is_flag=True)
@click.argument('queues', nargs=(- 1))
def stats(namespace, display, empty, queues):
' '
if len(queues):
display = constants.DISPLAY_ALL
PfcwdCli(namespace, display).show_stats(empty, queues) | @show.command()
@multi_asic_util.multi_asic_click_options
@click.option('-e', '--empty', is_flag=True)
@click.argument('queues', nargs=(- 1))
def stats(namespace, display, empty, queues):
' '
if len(queues):
display = constants.DISPLAY_ALL
PfcwdCli(namespace, display).show_stats(empty, queues)<|docstring|>Show PFC Watchdog stats per queue<|endoftext|> |
2dc808f2630ebbb9777c66faa1832908274567fa50830aa2af507fd4d5062cd7 | @show.command()
@multi_asic_util.multi_asic_click_options
@click.argument('ports', nargs=(- 1))
def config(namespace, display, ports):
' Show PFC Watchdog configuration '
PfcwdCli(namespace, display).config(ports) | Show PFC Watchdog configuration | pfcwd/main.py | config | ljm625/sonic-utilities | 0 | python | @show.command()
@multi_asic_util.multi_asic_click_options
@click.argument('ports', nargs=(- 1))
def config(namespace, display, ports):
' '
PfcwdCli(namespace, display).config(ports) | @show.command()
@multi_asic_util.multi_asic_click_options
@click.argument('ports', nargs=(- 1))
def config(namespace, display, ports):
' '
PfcwdCli(namespace, display).config(ports)<|docstring|>Show PFC Watchdog configuration<|endoftext|> |
9eb8b5304a6cdd44a3a688706d870b2834ab9dc705d47ebcd18a45aceaff99be | @cli.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.argument('ports', nargs=(- 1))
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time):
'\n Start PFC watchdog on port(s). To config all ports, use all as input.\n\n Example:\n\n sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400\n\n '
PfcwdCli().start(action, restoration_time, ports, detection_time) | Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400 | pfcwd/main.py | start | ljm625/sonic-utilities | 0 | python | @cli.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.argument('ports', nargs=(- 1))
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time):
'\n Start PFC watchdog on port(s). To config all ports, use all as input.\n\n Example:\n\n sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400\n\n '
PfcwdCli().start(action, restoration_time, ports, detection_time) | @cli.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.argument('ports', nargs=(- 1))
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time):
'\n Start PFC watchdog on port(s). To config all ports, use all as input.\n\n Example:\n\n sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400\n\n '
PfcwdCli().start(action, restoration_time, ports, detection_time)<|docstring|>Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400<|endoftext|> |
6bd14c6589b24cf5cf5b96b3502eaa36af546edf6c710dfb6707b8c9c3824cd4 | @cli.command()
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval):
' Set PFC watchdog counter polling interval '
PfcwdCli().interval(poll_interval) | Set PFC watchdog counter polling interval | pfcwd/main.py | interval | ljm625/sonic-utilities | 0 | python | @cli.command()
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval):
' '
PfcwdCli().interval(poll_interval) | @cli.command()
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval):
' '
PfcwdCli().interval(poll_interval)<|docstring|>Set PFC watchdog counter polling interval<|endoftext|> |
892fbec213cfdb4f291a6831423385982f1a4aaa501335cc625f3736bd6e24a7 | @cli.command()
@click.argument('ports', nargs=(- 1))
def stop(ports):
' Stop PFC watchdog on port(s) '
PfcwdCli().stop(ports) | Stop PFC watchdog on port(s) | pfcwd/main.py | stop | ljm625/sonic-utilities | 0 | python | @cli.command()
@click.argument('ports', nargs=(- 1))
def stop(ports):
' '
PfcwdCli().stop(ports) | @cli.command()
@click.argument('ports', nargs=(- 1))
def stop(ports):
' '
PfcwdCli().stop(ports)<|docstring|>Stop PFC watchdog on port(s)<|endoftext|> |
4a81264b46e2da85417901caa1714edbdda4665c59f1d5a5fd66bafaf1f4b4a7 | @cli.command('start_default')
def start_default():
' Start PFC WD by default configurations '
PfcwdCli().start_default() | Start PFC WD by default configurations | pfcwd/main.py | start_default | ljm625/sonic-utilities | 0 | python | @cli.command('start_default')
def start_default():
' '
PfcwdCli().start_default() | @cli.command('start_default')
def start_default():
' '
PfcwdCli().start_default()<|docstring|>Start PFC WD by default configurations<|endoftext|> |
8a16f5aa202761209881b640245fe088abf0fd760c520af9bf3acba2b9ab6df7 | @cli.command('counter_poll')
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll):
' Enable/disable counter polling '
PfcwdCli().counter_poll(counter_poll) | Enable/disable counter polling | pfcwd/main.py | counter_poll | ljm625/sonic-utilities | 0 | python | @cli.command('counter_poll')
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll):
' '
PfcwdCli().counter_poll(counter_poll) | @cli.command('counter_poll')
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll):
' '
PfcwdCli().counter_poll(counter_poll)<|docstring|>Enable/disable counter polling<|endoftext|> |
0b394e76cd8578cc194cdb10337243fa7231fde6c2cff8444a9b06d2320bde68 | @cli.command('big_red_switch')
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch):
' Enable/disable BIG_RED_SWITCH mode '
PfcwdCli().big_red_switch(big_red_switch) | Enable/disable BIG_RED_SWITCH mode | pfcwd/main.py | big_red_switch | ljm625/sonic-utilities | 0 | python | @cli.command('big_red_switch')
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch):
' '
PfcwdCli().big_red_switch(big_red_switch) | @cli.command('big_red_switch')
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch):
' '
PfcwdCli().big_red_switch(big_red_switch)<|docstring|>Enable/disable BIG_RED_SWITCH mode<|endoftext|> |
c8b8a3eabc5c3ce5ba07d7a53110fcf5594965f7e74a1db4bd836bdcdca29fe6 | @nox.session(reuse_venv=True)
def lint(session):
'\n Run the linter.\n '
session.install('pre-commit')
session.run('pre-commit', 'run', '--all-files', *session.posargs) | Run the linter. | noxfile.py | lint | ink-splatters/plumbum | 1,918 | python | @nox.session(reuse_venv=True)
def lint(session):
'\n \n '
session.install('pre-commit')
session.run('pre-commit', 'run', '--all-files', *session.posargs) | @nox.session(reuse_venv=True)
def lint(session):
'\n \n '
session.install('pre-commit')
session.run('pre-commit', 'run', '--all-files', *session.posargs)<|docstring|>Run the linter.<|endoftext|> |
6031ea2917461fbaca32e6e46550186da4ffb2e7da105703eb349138f73faed9 | @nox.session(python=ALL_PYTHONS, reuse_venv=True)
def tests(session):
'\n Run the unit and regular tests.\n '
session.install('-e', '.[dev]')
session.run('pytest', '--cov', *session.posargs) | Run the unit and regular tests. | noxfile.py | tests | ink-splatters/plumbum | 1,918 | python | @nox.session(python=ALL_PYTHONS, reuse_venv=True)
def tests(session):
'\n \n '
session.install('-e', '.[dev]')
session.run('pytest', '--cov', *session.posargs) | @nox.session(python=ALL_PYTHONS, reuse_venv=True)
def tests(session):
'\n \n '
session.install('-e', '.[dev]')
session.run('pytest', '--cov', *session.posargs)<|docstring|>Run the unit and regular tests.<|endoftext|> |
41315579f337d6c47ce695c796820d9348e6beeebe69998985e9636a10258c1b | @nox.session(reuse_venv=True)
def docs(session):
'\n Build the docs. Pass "serve" to serve.\n '
session.install('-e', '.[docs]')
session.chdir('docs')
session.run('sphinx-build', '-M', 'html', '.', '_build')
if session.posargs:
if ('serve' in session.posargs):
print('Launching docs at http://localhost:8000/ - use Ctrl-C to quit')
session.run('python', '-m', 'http.server', '8000', '-d', '_build/html')
else:
print('Unsupported argument to docs') | Build the docs. Pass "serve" to serve. | noxfile.py | docs | ink-splatters/plumbum | 1,918 | python | @nox.session(reuse_venv=True)
def docs(session):
'\n \n '
session.install('-e', '.[docs]')
session.chdir('docs')
session.run('sphinx-build', '-M', 'html', '.', '_build')
if session.posargs:
if ('serve' in session.posargs):
print('Launching docs at http://localhost:8000/ - use Ctrl-C to quit')
session.run('python', '-m', 'http.server', '8000', '-d', '_build/html')
else:
print('Unsupported argument to docs') | @nox.session(reuse_venv=True)
def docs(session):
'\n \n '
session.install('-e', '.[docs]')
session.chdir('docs')
session.run('sphinx-build', '-M', 'html', '.', '_build')
if session.posargs:
if ('serve' in session.posargs):
print('Launching docs at http://localhost:8000/ - use Ctrl-C to quit')
session.run('python', '-m', 'http.server', '8000', '-d', '_build/html')
else:
print('Unsupported argument to docs')<|docstring|>Build the docs. Pass "serve" to serve.<|endoftext|> |
e87b1cf7070d84a0ddc7943a9463003aab0f9a57b521065ed94764785d126bb0 | @nox.session
def build(session):
'\n Build an SDist and wheel.\n '
session.install('build')
session.run('python', '-m', 'build') | Build an SDist and wheel. | noxfile.py | build | ink-splatters/plumbum | 1,918 | python | @nox.session
def build(session):
'\n \n '
session.install('build')
session.run('python', '-m', 'build') | @nox.session
def build(session):
'\n \n '
session.install('build')
session.run('python', '-m', 'build')<|docstring|>Build an SDist and wheel.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.