title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: Fix MultiIndex names handling in pd.concat | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 7664688ffa4f4..94e32da7eb191 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1170,6 +1170,7 @@ Indexing
- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
+- Bug in ``pd.concat()`` where the names of ``MultiIndex`` of resulting ``DataFrame`` are not handled correctly when ``None`` is presented in the names of ``MultiIndex`` of input ``DataFrame`` (:issue:`15787`)
I/O
^^^
diff --git a/pandas/indexes/api.py b/pandas/indexes/api.py
index a38453e0d2ccc..a3cb54ca97071 100644
--- a/pandas/indexes/api.py
+++ b/pandas/indexes/api.py
@@ -107,7 +107,7 @@ def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set([tuple(i.names) for i in indexes
- if all(n is not None for n in i.names)])
+ if any(n is not None for n in i.names)])
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py
index 623c5fa02fcb2..c61f2a3dc8066 100644
--- a/pandas/tests/tools/test_concat.py
+++ b/pandas/tests/tools/test_concat.py
@@ -1048,6 +1048,30 @@ def test_concat_multiindex_with_tz(self):
result = concat([df, df])
tm.assert_frame_equal(result, expected)
+ def test_concat_multiindex_with_none_in_index_names(self):
+ # GH 15787
+ index = pd.MultiIndex.from_product([[1], range(5)],
+ names=['level1', None])
+ df = pd.DataFrame({'col': range(5)}, index=index, dtype=np.int32)
+
+ result = concat([df, df], keys=[1, 2], names=['level2'])
+ index = pd.MultiIndex.from_product([[1, 2], [1], range(5)],
+ names=['level2', 'level1', None])
+ expected = pd.DataFrame({'col': list(range(5)) * 2},
+ index=index, dtype=np.int32)
+ assert_frame_equal(result, expected)
+
+ result = concat([df, df[:2]], keys=[1, 2], names=['level2'])
+ level2 = [1] * 5 + [2] * 2
+ level1 = [1] * 7
+ no_name = list(range(5)) + list(range(2))
+ tuples = list(zip(level2, level1, no_name))
+ index = pd.MultiIndex.from_tuples(tuples,
+ names=['level2', 'level1', None])
+ expected = pd.DataFrame({'col': no_name}, index=index,
+ dtype=np.int32)
+ assert_frame_equal(result, expected)
+
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
| This is a fix attempt for issue #15787.
The discrepancy between definition and corresponding implementation of so-called non-none names in function _get_consensus_names leads to this bug.
- [x] closes #15787
- [x] tests added / passed
- [ ] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15955 | 2017-04-09T14:20:46Z | 2017-04-10T12:10:24Z | null | 2017-04-10T16:39:01Z |
ENH: Style blocks | diff --git a/.gitignore b/.gitignore
index a509fcf736ea8..c953020f59342 100644
--- a/.gitignore
+++ b/.gitignore
@@ -103,3 +103,4 @@ doc/source/index.rst
doc/build/html/index.html
# Windows specific leftover:
doc/tmp.sv
+doc/source/templates/
diff --git a/MANIFEST.in b/MANIFEST.in
index b7a7e6039ac9a..31de3466cb357 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -25,3 +25,4 @@ global-exclude *.png
# recursive-include LICENSES *
include versioneer.py
include pandas/_version.py
+include pandas/formats/templates/*.tpl
diff --git a/ci/requirements-3.5_DOC.run b/ci/requirements-3.5_DOC.run
index 7ed60758612bb..9647ab53ab835 100644
--- a/ci/requirements-3.5_DOC.run
+++ b/ci/requirements-3.5_DOC.run
@@ -1,5 +1,6 @@
ipython
ipykernel
+ipywidgets
sphinx
nbconvert
nbformat
diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb
index 2b8bf35a913c1..06763b2a5e741 100644
--- a/doc/source/style.ipynb
+++ b/doc/source/style.ipynb
@@ -54,7 +54,7 @@
},
"outputs": [],
"source": [
- "import matplotlib\n",
+ "import matplotlib.pyplot\n",
"# We have this here to trigger matplotlib's font cache stuff.\n",
"# This cell is hidden from the output"
]
@@ -87,9 +87,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style"
@@ -107,9 +105,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.highlight_null().render().split('\\n')[:10]"
@@ -160,9 +156,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"s = df.style.applymap(color_negative_red)\n",
@@ -208,9 +202,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.apply(highlight_max)"
@@ -234,9 +226,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.\\\n",
@@ -290,9 +280,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.apply(highlight_max, color='darkorange', axis=None)"
@@ -340,9 +328,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.apply(highlight_max, subset=['B', 'C', 'D'])"
@@ -358,9 +344,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.applymap(color_negative_red,\n",
@@ -393,9 +377,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.format(\"{:.2%}\")"
@@ -411,9 +393,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.format({'B': \"{:0<4.0f}\", 'D': '{:+.2f}'})"
@@ -429,9 +409,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.format({\"B\": lambda x: \"±{:.2f}\".format(abs(x))})"
@@ -454,9 +432,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.highlight_null(null_color='red')"
@@ -472,9 +448,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"import seaborn as sns\n",
@@ -495,9 +469,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Uses the full color range\n",
@@ -507,9 +479,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Compress the color range\n",
@@ -529,9 +499,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.bar(subset=['A', 'B'], color='#d65f5f')"
@@ -547,9 +515,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.highlight_max(axis=0)"
@@ -558,9 +524,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.highlight_min(axis=0)"
@@ -576,9 +540,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.set_properties(**{'background-color': 'black',\n",
@@ -603,9 +565,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df2 = -df\n",
@@ -616,9 +576,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"style2 = df2.style\n",
@@ -671,9 +629,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"with pd.option_context('display.precision', 2):\n",
@@ -693,9 +649,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style\\\n",
@@ -728,9 +682,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"df.style.set_caption('Colormaps, with a caption.')\\\n",
@@ -756,9 +708,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"from IPython.display import HTML\n",
@@ -854,9 +804,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"from IPython.html import widgets\n",
@@ -892,16 +840,14 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"np.random.seed(25)\n",
"cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n",
- "df = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
+ "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
"\n",
- "df.style.background_gradient(cmap, axis=1)\\\n",
+ "bigdf.style.background_gradient(cmap, axis=1)\\\n",
" .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n",
" .set_caption(\"Hover to magify\")\\\n",
" .set_precision(2)\\\n",
@@ -924,29 +870,157 @@
"\n",
"### Subclassing\n",
"\n",
- "This section contains a bit of information about the implementation of `Styler`.\n",
- "Since the feature is so new all of this is subject to change, even more so than the end-use API.\n",
- "\n",
- "As users apply styles (via `.apply`, `.applymap` or one of the builtins), we don't actually calculate anything.\n",
- "Instead, we append functions and arguments to a list `self._todo`.\n",
- "When asked (typically in `.render` we'll walk through the list and execute each function (this is in `self._compute()`.\n",
- "These functions update an internal `defaultdict(list)`, `self.ctx` which maps DataFrame row / column positions to CSS attribute, value pairs.\n",
- "\n",
- "We take the extra step through `self._todo` so that we can export styles and set them on other `Styler`s.\n",
- "\n",
- "Rendering uses [Jinja](http://jinja.pocoo.org/) templates.\n",
- "The `.translate` method takes `self.ctx` and builds another dictionary ready to be passed into `Styler.template.render`, the Jinja template.\n",
- "\n",
- "\n",
- "### Alternate templates\n",
- "\n",
- "We've used [Jinja](http://jinja.pocoo.org/) templates to build up the HTML.\n",
- "The template is stored as a class variable ``Styler.template.``. Subclasses can override that.\n",
+ "If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template.\n",
+ "We'll show an example of extending the default template to insert a custom header before each table."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "from jinja2 import Environment, ChoiceLoader, FileSystemLoader\n",
+ "from IPython.display import HTML\n",
+ "from pandas.io.api import Styler"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%mkdir templates"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This next cell writes the custom template.\n",
+ "We extend the template `html.tpl`, which comes with pandas."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%file templates/myhtml.tpl\n",
+ "{% extends \"html.tpl\" %}\n",
+ "{% block table %}\n",
+ "<h1>{{ table_title|default(\"My Table\") }}</h1>\n",
+ "{{ super() }}\n",
+ "{% endblock table %}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now that we've created a template, we need to set up a subclass of ``pd.Styler`` that\n",
+ "knows about it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "class MyStyler(pd.Styler):\n",
+ " env = Environment(\n",
+ " loader=ChoiceLoader([\n",
+ " FileSystemLoader(\"templates\"), # contains ours\n",
+ " pd.Styler.loader, # the default\n",
+ " ])\n",
+ " )\n",
+ " template = env.get_template(\"myhtml.tpl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that we include the original loader in our environment's loader.\n",
+ "That's because we extend the original template, so the Jinja environment needs\n",
+ "to be able to find it.\n",
"\n",
- "```python\n",
- "class CustomStyle(Styler):\n",
- " template = Template(\"\"\"...\"\"\")\n",
- "```"
+ "Now we can use that custom styler. It's `__init__` takes a DataFrame."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "MyStyler(df)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Our custom template accepts a `table_title` keyword. We can provide the value in the `.render` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "HTML(MyStyler(df).render(table_title=\"Extending Example\"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "EasyStyler = pd.Styler.from_custom_template(\"templates\", \"myhtml.tpl\")\n",
+ "EasyStyler(df)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here's the template structure:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(\"template_structure.html\") as f:\n",
+ " structure = f.read()\n",
+ " \n",
+ "HTML(structure)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details."
]
}
],
diff --git a/doc/source/template_structure.html b/doc/source/template_structure.html
new file mode 100644
index 0000000000000..81dbe2b7d0217
--- /dev/null
+++ b/doc/source/template_structure.html
@@ -0,0 +1,60 @@
+<!--
+ This is an HTML fragment that gets included into a notebook & rst document
+
+ Inspired by nbconvert
+
+ https://github.com/jupyter/nbconvert/blob/8ac591a0b8694147d0f34bf6392594c2811c1395/docs/source/template_structure.html
+
+
+ -->
+<style type="text/css">
+ /* Overrides of notebook CSS for static HTML export */
+ body {
+ font-family: sans;
+ }
+ .template_block {
+ background-color: hsla(120, 60%, 70%, 0.2);
+ margin: 10px;
+ padding: 5px;
+ border: 1px solid hsla(120, 60%, 70%, 0.5);
+ border-left: 2px solid black;
+ }
+ .template_block pre {
+ background: transparent;
+ padding: 0;
+ }
+ .big_vertical_ellipsis {
+ font-size: 24pt;
+ }
+</style>
+
+<div class="template_block">before_style</div>
+<div class="template_block">style
+ <pre><style type="text/css"></pre>
+ <div class="template_block">table_styles</div>
+ <div class="template_block">before_cellstyle</div>
+ <div class="template_block">cellstyle</div>
+ <pre></style></pre>
+</div><!-- /style -->
+
+<div class="template_block" >before_table</div>
+
+<div class="template_block" >table
+ <pre><table ...></pre>
+ <div class="template_block">caption</div>
+
+ <div class="template_block" >thead
+ <div class="template_block" >before_head_rows</div>
+ <div class="template_block">head_tr (loop over headers)</div>
+ <div class="template_block" >after_head_rows</div>
+ </div>
+
+ <div class="template_block" >tbody
+ <div class="template_block" >before_rows</div>
+ <div class="template_block">tr (loop over data rows)</div>
+ <div class="template_block" >after_rows</div>
+ </div>
+ <pre></table></pre>
+</div><!-- /table -->
+
+<div class="template_block" >after_table</div>
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index da32de750e7de..ffbc4d0e9796c 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -482,6 +482,9 @@ Other Enhancements
- ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`)
- ``pd.read_html()`` will parse multiple header rows, creating a multiindex header. (:issue:`13434`).
- HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`)
+- ``pd.io.api.Styler`` template now has blocks for easier extension, :ref:`see the example notebook <style.ipynb#Subclassing>` (:issue:`15649`)
+- ``pd.io.api.Styler.render`` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`)
+
- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`)
- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
diff --git a/pandas/formats/style.py b/pandas/formats/style.py
index e712010a8b4f2..af02077bd5b41 100644
--- a/pandas/formats/style.py
+++ b/pandas/formats/style.py
@@ -10,7 +10,9 @@
from collections import defaultdict, MutableMapping
try:
- from jinja2 import Template
+ from jinja2 import (
+ PackageLoader, Environment, ChoiceLoader, FileSystemLoader
+ )
except ImportError:
msg = "pandas.Styler requires jinja2. "\
"Please install with `conda install Jinja2`\n"\
@@ -68,7 +70,9 @@ class Styler(object):
Attributes
----------
- template: Jinja Template
+ env : Jinja2 Environment
+ template: Jinja2 Template
+ loader : Jinja2 Loader
Notes
-----
@@ -103,56 +107,12 @@ class Styler(object):
--------
pandas.DataFrame.style
"""
- template = Template("""
- <style type="text/css" >
- {% for s in table_styles %}
- #T_{{uuid}} {{s.selector}} {
- {% for p,val in s.props %}
- {{p}}: {{val}};
- {% endfor %}
- }
- {% endfor %}
- {% for s in cellstyle %}
- #T_{{uuid}}{{s.selector}} {
- {% for p,val in s.props %}
- {{p}}: {{val}};
- {% endfor %}
- }
- {% endfor %}
- </style>
-
- <table id="T_{{uuid}}" {{ table_attributes }}>
- {% if caption %}
- <caption>{{caption}}</caption>
- {% endif %}
-
- <thead>
- {% for r in head %}
- <tr>
- {% for c in r %}
- {% if c.is_visible != False %}
- <{{c.type}} class="{{c.class}}" {{ c.attributes|join(" ") }}>
- {{c.value}}
- {% endif %}
- {% endfor %}
- </tr>
- {% endfor %}
- </thead>
- <tbody>
- {% for r in body %}
- <tr>
- {% for c in r %}
- {% if c.is_visible != False %}
- <{{c.type}} id="T_{{uuid}}{{c.id}}"
- class="{{c.class}}" {{ c.attributes|join(" ") }}>
- {{ c.display_value }}
- {% endif %}
- {% endfor %}
- </tr>
- {% endfor %}
- </tbody>
- </table>
- """)
+ loader = PackageLoader("pandas", "formats/templates")
+ env = Environment(
+ loader=loader,
+ trim_blocks=True,
+ )
+ template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None):
@@ -400,12 +360,22 @@ def format(self, formatter, subset=None):
self._display_funcs[(i, j)] = formatter
return self
- def render(self):
- """
+ def render(self, **kwargs):
+ r"""
Render the built up styles to HTML
.. versionadded:: 0.17.1
+ Parameters
+ ----------
+ **kwargs:
+ Any additional keyword arguments are passed through
+ to ``self.template.render``. This is useful when you
+ need to provide additional variables for a custom
+ template.
+
+ .. versionadded:: 0.20
+
Returns
-------
rendered: str
@@ -418,8 +388,22 @@ def render(self):
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
+
+ Pandas uses the following keys in render. Arguments passed
+ in ``**kwargs`` take precedence, so think carefuly if you want
+ to override them:
+
+ * head
+ * cellstyle
+ * body
+ * uuid
+ * precision
+ * table_styles
+ * caption
+ * table_attributes
"""
self._compute()
+ # TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
@@ -427,6 +411,7 @@ def render(self):
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
+ d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
@@ -961,6 +946,35 @@ def _highlight_extrema(data, color='yellow', max_=True):
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
+ @classmethod
+ def from_custom_template(cls, searchpath, name):
+ """
+ Factory function for creating a subclass of ``Styler``
+ with a custom template and Jinja environment.
+
+ Parameters
+ ----------
+ searchpath : str or list
+ Path or paths of directories containing the templates
+ name : str
+ Name of your custom template to use for rendering
+
+ Returns
+ -------
+ MyStyler : subclass of Styler
+ has the correct ``env`` and ``template`` class attributes set.
+ """
+ loader = ChoiceLoader([
+ FileSystemLoader(searchpath),
+ cls.loader,
+ ])
+
+ class MyStyler(cls):
+ env = Environment(loader=loader)
+ template = env.get_template(name)
+
+ return MyStyler
+
def _is_visible(idx_row, idx_col, lengths):
"""
diff --git a/pandas/formats/templates/html.tpl b/pandas/formats/templates/html.tpl
new file mode 100644
index 0000000000000..706db1ecdd961
--- /dev/null
+++ b/pandas/formats/templates/html.tpl
@@ -0,0 +1,70 @@
+{# Update the template_structure.html document too #}
+{%- block before_style -%}{%- endblock before_style -%}
+{% block style %}
+<style type="text/css" >
+{% block table_styles %}
+{% for s in table_styles %}
+ #T_{{uuid}} {{s.selector}} {
+ {% for p,val in s.props %}
+ {{p}}: {{val}};
+ {% endfor -%}
+ }
+{%- endfor -%}
+{% endblock table_styles %}
+{% block before_cellstyle %}{% endblock before_cellstyle %}
+{% block cellstyle %}
+{%- for s in cellstyle %}
+ #T_{{uuid}}{{s.selector}} {
+ {% for p,val in s.props %}
+ {{p}}: {{val}};
+ {% endfor %}
+ }
+{%- endfor -%}
+{%- endblock cellstyle %}
+</style>
+{%- endblock style %}
+{%- block before_table %}{% endblock before_table %}
+{%- block table %}
+<table id="T_{{uuid}}" {% if table_attributes %}{{ table_attributes }}{% endif %}>
+{%- block caption %}
+{%- if caption -%}
+ <caption>{{caption}}</caption>
+{%- endif -%}
+{%- endblock caption %}
+{%- block thead %}
+<thead>
+ {%- block before_head_rows %}{% endblock %}
+ {%- for r in head %}
+ {%- block head_tr scoped %}
+ <tr>
+ {%- for c in r %}
+ {%- if c.is_visible != False %}
+ <{{ c.type }} class="{{c.class}}" {{ c.attributes|join(" ") }}>{{c.value}}</{{ c.type }}>
+ {%- endif %}
+ {%- endfor %}
+ </tr>
+ {%- endblock head_tr %}
+ {%- endfor %}
+ {%- block after_head_rows %}{% endblock %}
+</thead>
+{%- endblock thead %}
+{%- block tbody %}
+<tbody>
+ {%- block before_rows %}{%- endblock before_rows %}
+ {%- for r in body %}
+ {%- block tr scoped %}
+ <tr>
+ {%- for c in r %}
+ {%- if c.is_visible != False %}
+ <{{ c.type }} id="T_{{ uuid }}{{ c.id }}" class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}>
+ {%- endif %}
+ {%- endfor %}
+ </tr>
+ {%- endblock tr %}
+ {%- endfor %}
+ {%- block after_rows %}{%- endblock after_rows %}
+</tbody>
+{%- endblock tbody %}
+</table>
+{%- endblock table %}
+{%- block after_table %}{% endblock after_table %}
diff --git a/pandas/io/api.py b/pandas/io/api.py
index e312e7bc2f300..4744d41472ff1 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -17,6 +17,23 @@
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.gbq import read_gbq
+try:
+ from pandas.formats.style import Styler
+except ImportError:
+ from pandas.compat import add_metaclass as _add_metaclass
+ from pandas.util.importing import _UnSubclassable
+
+ # We want to *not* raise an ImportError upon importing this module
+ # We *do* want to raise an ImportError with a custom message
+ # when the class is instantiated or subclassed.
+ @_add_metaclass(_UnSubclassable)
+ class Styler(object):
+ msg = ("pandas.io.api.Styler requires jinja2. "
+ "Please install with `conda install jinja2` "
+ "or `pip install jinja2`")
+ def __init__(self, *args, **kargs):
+ raise ImportError(self.msg)
+
# deprecation, xref #13790
def Term(*args, **kwargs):
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index a15d7cf26cbea..6d92898042b23 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -49,7 +49,8 @@ class TestPDApi(Base, tm.TestCase):
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame',
'SparseSeries', 'TimeGrouper', 'Timedelta',
- 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
+ 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex',
+ 'Styler']
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'Panel4D',
diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/formats/test_style.py
index 44af0b8ebb085..08f8f2f32763d 100644
--- a/pandas/tests/formats/test_style.py
+++ b/pandas/tests/formats/test_style.py
@@ -1,6 +1,7 @@
-import pytest
-
import copy
+import textwrap
+
+import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
@@ -717,3 +718,32 @@ def test_background_gradient(self):
result = (df.style.background_gradient(subset=pd.IndexSlice[1, 'A'])
._compute().ctx)
self.assertEqual(result[(1, 0)], ['background-color: #fff7fb'])
+
+
+def test_block_names():
+ # catch accidental removal of a block
+ expected = {
+ 'before_style', 'style', 'table_styles', 'before_cellstyle',
+ 'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
+ 'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
+ 'before_rows', 'tr', 'after_rows',
+ }
+ result = set(Styler.template.blocks)
+ assert result == expected
+
+
+def test_from_custom_template(tmpdir):
+ p = tmpdir.mkdir("templates").join("myhtml.tpl")
+ p.write(textwrap.dedent("""\
+ {% extends "html.tpl" %}
+ {% block table %}
+ <h1>{{ table_title|default("My Table") }}</h1>
+ {{ super() }}
+ {% endblock table %}"""))
+ result = Styler.from_custom_template(str(tmpdir.join('templates')),
+ 'myhtml.tpl')
+ assert issubclass(result, Styler)
+ assert result.env is not Styler.env
+ assert result.template is not Styler.template
+ styler = result(pd.DataFrame({"A": [1, 2]}))
+ assert styler.render()
diff --git a/pandas/util/importing.py b/pandas/util/importing.py
new file mode 100644
index 0000000000000..9323fb97baac0
--- /dev/null
+++ b/pandas/util/importing.py
@@ -0,0 +1,10 @@
+class _UnSubclassable(type):
+ """
+ Metaclass to raise an ImportError when subclassed
+ """
+ msg = ""
+
+ def __init__(cls, name, bases, clsdict):
+ if len(cls.mro()) > 2:
+ raise ImportError(cls.msg)
+ super(_UnSubclassable, cls).__init__(name, bases, clsdict)
diff --git a/setup.py b/setup.py
index 6707af7eb0908..d8ee52f9b4f43 100755
--- a/setup.py
+++ b/setup.py
@@ -704,7 +704,8 @@ def pxd(name):
'data/html_encoding/*.html',
'json/data/*.json'],
'pandas.tests.tools': ['data/*.csv'],
- 'pandas.tests.tseries': ['data/*.pickle']
+ 'pandas.tests.tseries': ['data/*.pickle'],
+ 'pandas.formats': ['templates/*.tpl']
},
ext_modules=extensions,
maintainer_email=EMAIL,
| ENH: Add blocks to Styler template
This will make subclassing the Styler and extending
the templates easier.
REF: Move template to its own file
Use Environment and PackageLoader to load it | https://api.github.com/repos/pandas-dev/pandas/pulls/15954 | 2017-04-08T22:25:20Z | 2017-04-15T13:59:00Z | 2017-04-15T13:59:00Z | 2017-05-29T20:44:01Z |
TST: Add test decorators for redirecting stdout and stderr | diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 024e11e63a924..918938c1758ed 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -191,6 +191,7 @@ def test_latex_repr(self):
# GH 12182
self.assertIsNone(df._repr_latex_())
+ @tm.capture_stdout
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
@@ -198,11 +199,8 @@ def test_info(self):
frame = DataFrame(np.random.randn(5, 3))
- import sys
- sys.stdout = StringIO()
frame.info()
frame.info(verbose=False)
- sys.stdout = sys.__stdout__
def test_info_wide(self):
from pandas import set_option, reset_option
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index ab30301e710a6..6eadf2c61c974 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1254,6 +1254,7 @@ def test_regex_separator(self):
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
+ @tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
@@ -1265,22 +1266,18 @@ def test_verbose_import(self):
one,1,2,3
two,1,2,3"""
- buf = StringIO()
- sys.stdout = buf
+ # Engines are verbose in different ways.
+ self.read_csv(StringIO(text), verbose=True)
+ output = sys.stdout.getvalue()
- try: # engines are verbose in different ways
- self.read_csv(StringIO(text), verbose=True)
- if self.engine == 'c':
- self.assertIn('Tokenization took:', buf.getvalue())
- self.assertIn('Parser memory cleanup took:', buf.getvalue())
- else: # Python engine
- self.assertEqual(buf.getvalue(),
- 'Filled 3 NA values in column a\n')
- finally:
- sys.stdout = sys.__stdout__
+ if self.engine == 'c':
+ assert 'Tokenization took:' in output
+ assert 'Parser memory cleanup took:' in output
+ else: # Python engine
+ assert output == 'Filled 3 NA values in column a\n'
- buf = StringIO()
- sys.stdout = buf
+ # Reset the stdout buffer.
+ sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
@@ -1292,16 +1289,15 @@ def test_verbose_import(self):
seven,1,2,3
eight,1,2,3"""
- try: # engines are verbose in different ways
- self.read_csv(StringIO(text), verbose=True, index_col=0)
- if self.engine == 'c':
- self.assertIn('Tokenization took:', buf.getvalue())
- self.assertIn('Parser memory cleanup took:', buf.getvalue())
- else: # Python engine
- self.assertEqual(buf.getvalue(),
- 'Filled 1 NA values in column a\n')
- finally:
- sys.stdout = sys.__stdout__
+ self.read_csv(StringIO(text), verbose=True, index_col=0)
+ output = sys.stdout.getvalue()
+
+ # Engines are verbose in different ways.
+ if self.engine == 'c':
+ assert 'Tokenization took:' in output
+ assert 'Parser memory cleanup took:' in output
+ else: # Python engine
+ assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
@@ -1696,6 +1692,7 @@ class InvalidBuffer(object):
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(mock.Mock())
+ @tm.capture_stderr
def test_skip_bad_lines(self):
# see gh-15925
data = 'a\n1\n1,2,3\n4\n5,6,7'
@@ -1706,30 +1703,24 @@ def test_skip_bad_lines(self):
with tm.assertRaises(ParserError):
self.read_csv(StringIO(data), error_bad_lines=True)
- stderr = sys.stderr
expected = DataFrame({'a': [1, 4]})
- sys.stderr = StringIO()
- try:
- out = self.read_csv(StringIO(data),
- error_bad_lines=False,
- warn_bad_lines=False)
- tm.assert_frame_equal(out, expected)
+ out = self.read_csv(StringIO(data),
+ error_bad_lines=False,
+ warn_bad_lines=False)
+ tm.assert_frame_equal(out, expected)
- val = sys.stderr.getvalue()
- self.assertEqual(val, '')
- finally:
- sys.stderr = stderr
+ val = sys.stderr.getvalue()
+ assert val == ''
+ # Reset the stderr buffer.
sys.stderr = StringIO()
- try:
- out = self.read_csv(StringIO(data),
- error_bad_lines=False,
- warn_bad_lines=True)
- tm.assert_frame_equal(out, expected)
- val = sys.stderr.getvalue()
- self.assertTrue('Skipping line 3' in val)
- self.assertTrue('Skipping line 5' in val)
- finally:
- sys.stderr = stderr
+ out = self.read_csv(StringIO(data),
+ error_bad_lines=False,
+ warn_bad_lines=True)
+ tm.assert_frame_equal(out, expected)
+
+ val = sys.stderr.getvalue()
+ assert 'Skipping line 3' in val
+ assert 'Skipping line 5' in val
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index 510e3c689649c..2949254257d15 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -8,7 +8,6 @@
"""
import csv
-import sys
import pytest
import pandas.util.testing as tm
@@ -92,16 +91,9 @@ def test_BytesIO_input(self):
def test_single_line(self):
# see gh-6607: sniff separator
-
- buf = StringIO()
- sys.stdout = buf
-
- try:
- df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
- header=None, sep=None)
- tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
- finally:
- sys.stdout = sys.__stdout__
+ df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
+ header=None, sep=None)
+ tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
def test_skipfooter(self):
# see gh-6607
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index b6a9900b0b087..505dc16942f31 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -142,6 +142,7 @@ def test_integer_thousands_alt(self):
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
+ @tm.capture_stderr
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
@@ -165,19 +166,15 @@ def test_skip_bad_lines(self):
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
- stderr = sys.stderr
- sys.stderr = StringIO()
- try:
- reader = TextReader(StringIO(data), delimiter=':',
- header=None,
- error_bad_lines=False,
- warn_bad_lines=True)
- reader.read()
- val = sys.stderr.getvalue()
- self.assertTrue('Skipping line 4' in val)
- self.assertTrue('Skipping line 6' in val)
- finally:
- sys.stderr = stderr
+ reader = TextReader(StringIO(data), delimiter=':',
+ header=None,
+ error_bad_lines=False,
+ warn_bad_lines=True)
+ reader.read()
+ val = sys.stderr.getvalue()
+
+ assert 'Skipping line 4' in val
+ assert 'Skipping line 6' in val
def test_header_not_enough_lines(self):
data = ('skip this\n'
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 890f52e8c65e9..5318e8532c58e 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -23,7 +23,6 @@
import sqlite3
import csv
import os
-import sys
import warnings
import numpy as np
@@ -36,7 +35,7 @@
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
-from pandas.compat import StringIO, range, lrange, string_types, PY36
+from pandas.compat import range, lrange, string_types, PY36
from pandas.tseries.tools import format as date_format
import pandas.io.sql as sql
@@ -2220,6 +2219,7 @@ def test_schema(self):
cur = self.conn.cursor()
cur.execute(create_sql)
+ @tm.capture_stdout
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
@@ -2236,14 +2236,10 @@ def test_execute_fail(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
- try:
- sys.stdout = StringIO()
- self.assertRaises(Exception, sql.execute,
- 'INSERT INTO test VALUES("foo", "bar", 7)',
- self.conn)
- finally:
- sys.stdout = sys.__stdout__
+ with pytest.raises(Exception):
+ sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
+ @tm.capture_stdout
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
@@ -2259,12 +2255,9 @@ def test_execute_closed_connection(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
- try:
- sys.stdout = StringIO()
- self.assertRaises(Exception, tquery, "select * from test",
- con=self.conn)
- finally:
- sys.stdout = sys.__stdout__
+
+ with pytest.raises(Exception):
+ tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setUp()
@@ -2534,6 +2527,7 @@ def test_schema(self):
cur.execute(drop_sql)
cur.execute(create_sql)
+ @tm.capture_stdout
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
@@ -2553,14 +2547,10 @@ def test_execute_fail(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
- try:
- sys.stdout = StringIO()
- self.assertRaises(Exception, sql.execute,
- 'INSERT INTO test VALUES("foo", "bar", 7)',
- self.conn)
- finally:
- sys.stdout = sys.__stdout__
+ with pytest.raises(Exception):
+ sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
+ @tm.capture_stdout
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
@@ -2579,12 +2569,9 @@ def test_execute_closed_connection(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
- try:
- sys.stdout = StringIO()
- self.assertRaises(Exception, tquery, "select * from test",
- con=self.conn)
- finally:
- sys.stdout = sys.__stdout__
+
+ with pytest.raises(Exception):
+ tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setUp()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 48af366f24ea4..1527637ea3eff 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -12,7 +12,7 @@
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.types.api import is_list_like
-from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip, PY3)
+from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.formats.printing import pprint_thing
import pandas.util.testing as tm
from pandas.util.testing import slow
@@ -1558,8 +1558,8 @@ def test_line_label_none(self):
self.assertEqual(ax.get_legend().get_texts()[0].get_text(), 'None')
@slow
+ @tm.capture_stdout
def test_line_colors(self):
- import sys
from matplotlib import cm
custom_colors = 'rgcby'
@@ -1568,16 +1568,13 @@ def test_line_colors(self):
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
- tmp = sys.stderr
- sys.stderr = StringIO()
- try:
- tm.close()
- ax2 = df.plot(colors=custom_colors)
- lines2 = ax2.get_lines()
- for l1, l2 in zip(ax.get_lines(), lines2):
- self.assertEqual(l1.get_color(), l2.get_color())
- finally:
- sys.stderr = tmp
+ tm.close()
+
+ ax2 = df.plot(colors=custom_colors)
+ lines2 = ax2.get_lines()
+
+ for l1, l2 in zip(ax.get_lines(), lines2):
+ self.assertEqual(l1.get_color(), l2.get_color())
tm.close()
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 99a406a71b12b..188b96638344c 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -3,13 +3,15 @@
from datetime import datetime, timedelta
+import sys
+
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range)
from pandas.core.index import MultiIndex
-from pandas.compat import StringIO, lrange, range, u
+from pandas.compat import lrange, range, u
from pandas import compat
import pandas.util.testing as tm
@@ -112,20 +114,15 @@ def test_tidy_repr(self):
a.name = 'title1'
repr(a) # should not raise exception
+ @tm.capture_stderr
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
- import sys
-
- buf = StringIO()
- tmp = sys.stderr
- sys.stderr = buf
- try:
- # it works (with no Cython exception barf)!
- repr(s)
- finally:
- sys.stderr = tmp
- self.assertEqual(buf.getvalue(), '')
+ # It works (with no Cython exception barf)!
+ repr(s)
+
+ output = sys.stderr.getvalue()
+ assert output == ''
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 4e1719958e8b7..ca588e2a0432e 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -1,7 +1,6 @@
-from pandas.compat import StringIO, callable, signature
+from pandas.compat import callable, signature
from pandas._libs.lib import cache_readonly # noqa
import types
-import sys
import warnings
from textwrap import dedent
from functools import wraps, update_wrapper
@@ -196,17 +195,6 @@ def indent(text, indents=1):
return jointext.join(text.split('\n'))
-def suppress_stdout(f):
- def wrapped(*args, **kwargs):
- try:
- sys.stdout = StringIO()
- f(*args, **kwargs)
- finally:
- sys.stdout = sys.__stdout__
-
- return wrapped
-
-
def make_signature(func):
"""
Returns a string repr of the arg list of a func call, with any defaults
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9d7b004374318..ef0fa04548cab 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -38,7 +38,7 @@
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
- PY3
+ StringIO, PY3
)
from pandas.computation import expressions as expr
@@ -629,6 +629,105 @@ def _valid_locales(locales, normalize):
return list(filter(_can_set_locale, map(normalizer, locales)))
+# -----------------------------------------------------------------------------
+# Stdout / stderr decorators
+
+
+def capture_stdout(f):
+ """
+ Decorator to capture stdout in a buffer so that it can be checked
+ (or suppressed) during testing.
+
+ Parameters
+ ----------
+ f : callable
+ The test that is capturing stdout.
+
+ Returns
+ -------
+ f : callable
+ The decorated test ``f``, which captures stdout.
+
+ Examples
+ --------
+
+ >>> from pandas.util.testing import capture_stdout
+ >>>
+ >>> import sys
+ >>>
+ >>> @capture_stdout
+ ... def test_print_pass():
+ ... print("foo")
+ ... out = sys.stdout.getvalue()
+ ... assert out == "foo\n"
+ >>>
+ >>> @capture_stdout
+ ... def test_print_fail():
+ ... print("foo")
+ ... out = sys.stdout.getvalue()
+ ... assert out == "bar\n"
+ ...
+ AssertionError: assert 'foo\n' == 'bar\n'
+ """
+
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ sys.stdout = StringIO()
+ f(*args, **kwargs)
+ finally:
+ sys.stdout = sys.__stdout__
+
+ return wrapper
+
+
+def capture_stderr(f):
+ """
+ Decorator to capture stderr in a buffer so that it can be checked
+ (or suppressed) during testing.
+
+ Parameters
+ ----------
+ f : callable
+ The test that is capturing stderr.
+
+ Returns
+ -------
+ f : callable
+ The decorated test ``f``, which captures stderr.
+
+ Examples
+ --------
+
+ >>> from pandas.util.testing import capture_stderr
+ >>>
+ >>> import sys
+ >>>
+ >>> @capture_stderr
+ ... def test_stderr_pass():
+ ... sys.stderr.write("foo")
+ ... out = sys.stderr.getvalue()
+ ... assert out == "foo\n"
+ >>>
+ >>> @capture_stderr
+ ... def test_stderr_fail():
+ ... sys.stderr.write("foo")
+ ... out = sys.stderr.getvalue()
+ ... assert out == "bar\n"
+ ...
+ AssertionError: assert 'foo\n' == 'bar\n'
+ """
+
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ sys.stderr = StringIO()
+ f(*args, **kwargs)
+ finally:
+ sys.stderr = sys.__stderr__
+
+ return wrapper
+
# -----------------------------------------------------------------------------
# Console debugging tools
| Add testing decorators for redirecting `stdout` and `stderr`.
xref <a href="https://github.com/pandas-dev/pandas/pull/15925#discussion_r110283696">#15925 (comment)</a> | https://api.github.com/repos/pandas-dev/pandas/pulls/15952 | 2017-04-08T20:00:58Z | 2017-04-08T21:58:32Z | 2017-04-08T21:58:32Z | 2017-04-08T22:01:08Z |
DOC: Cleanup for nbsphinx output | diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb
index 7e408f96f6c28..38b39bad8b415 100644
--- a/doc/source/style.ipynb
+++ b/doc/source/style.ipynb
@@ -45,6 +45,20 @@
"Let's see some examples."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true,
+ "nbsphinx": "hidden"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib\n",
+ "# We have this here to trigger matplotlib's font cache stuff.\n",
+ "# This cell is hidden from the output"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t
index 2958678dc8221..1adaaf58d79c5 100644
--- a/doc/source/themes/nature_with_gtoc/static/nature.css_t
+++ b/doc/source/themes/nature_with_gtoc/static/nature.css_t
@@ -330,6 +330,12 @@ tbody tr:nth-child(odd) {
background: #f5f5f5;
}
+table td.data, table th.row_heading table th.col_heading {
+ font-family: monospace;
+ text-align: right;
+}
+
+
/**
* See also
*/
| Followup to https://github.com/pandas-dev/pandas/pull/15581
Using the `nbsphinx: hidden` metadata to hide the ouptut, so
readers don't see matplotlib's fc-list warning.
Make the tables monospaced in CSS. | https://api.github.com/repos/pandas-dev/pandas/pulls/15951 | 2017-04-08T18:47:55Z | 2017-04-08T20:58:17Z | 2017-04-08T20:58:17Z | 2017-05-29T20:43:57Z |
TST: clean up series/frame api tests inheritance a bit | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e8170b4bf2113..fd1cd3d0022c9 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1219,6 +1219,7 @@ Conversion
- Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`)
- Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`)
- Bug in the return type of ``pd.unique`` on a ``Categorical``, which was returning an ndarray and not a ``Categorical`` (:issue:`15903`)
+- Bug in ``Index.to_series()`` where the index was not copied (and so mutating later would change the original), (:issue:`15949`)
Indexing
^^^^^^^^
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index 91e2422873dd4..bf7975bcdb964 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -944,7 +944,9 @@ def to_series(self, **kwargs):
"""
from pandas import Series
- return Series(self._to_embed(), index=self, name=self.name)
+ return Series(self._to_embed(),
+ index=self._shallow_copy(),
+ name=self.name)
def _to_embed(self, keep_tz=False):
"""
diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_api.py
similarity index 100%
rename from pandas/tests/frame/test_misc_api.py
rename to pandas/tests/frame/test_api.py
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 647af92b42273..f90b37b66d200 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -484,7 +484,7 @@ def test_date_index_query_with_NaT_duplicates(self):
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
- res = df.query('index < 20130101 < dates3', engine=engine,
+ res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index ba76945834aff..08f8f8d48e705 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -38,6 +38,15 @@ def test_pickle_compat_construction(self):
# need an object to create with
self.assertRaises(TypeError, self._holder)
+ def test_to_series(self):
+ # assert that we are creating a copy of the index
+
+ idx = self.create_index()
+ s = idx.to_series()
+ assert s.values is not idx.values
+ assert s.index is not idx
+ assert s.name == idx.name
+
def test_shift(self):
# GH8083 test the base class for shift
diff --git a/pandas/tests/series/test_misc_api.py b/pandas/tests/series/test_api.py
similarity index 100%
rename from pandas/tests/series/test_misc_api.py
rename to pandas/tests/series/test_api.py
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index b8d1b92081858..5aca34fb86576 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -16,17 +16,16 @@
class TestSeriesQuantile(TestData, tm.TestCase):
def test_quantile(self):
- from numpy import percentile
q = self.ts.quantile(0.1)
- self.assertEqual(q, percentile(self.ts.valid(), 10))
+ self.assertEqual(q, np.percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
- self.assertEqual(q, percentile(self.ts.valid(), 90))
+ self.assertEqual(q, np.percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
- self.assertEqual(q, percentile(self.ts.valid(), 90))
+ self.assertEqual(q, np.percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
@@ -48,12 +47,11 @@ def test_quantile(self):
self.ts.quantile(invalid)
def test_quantile_multi(self):
- from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
- expected = pd.Series([percentile(self.ts.valid(), 10),
- percentile(self.ts.valid(), 90)],
+ expected = pd.Series([np.percentile(self.ts.valid(), 10),
+ np.percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
@@ -70,50 +68,44 @@ def test_quantile_multi(self):
[], dtype=float))
tm.assert_series_equal(result, expected)
+ @pytest.mark.skipif(_np_version_under1p9,
+ reason="Numpy version is under 1.9")
def test_quantile_interpolation(self):
# GH #10174
- if _np_version_under1p9:
- pytest.skip("Numpy version is under 1.9")
-
- from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
- self.assertEqual(q, percentile(self.ts.valid(), 10))
+ self.assertEqual(q, np.percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
- self.assertEqual(q1, percentile(self.ts.valid(), 10))
+ self.assertEqual(q1, np.percentile(self.ts.valid(), 10))
# test with and without interpolation keyword
self.assertEqual(q, q1)
+ @pytest.mark.skipif(_np_version_under1p9,
+ reason="Numpy version is under 1.9")
def test_quantile_interpolation_dtype(self):
# GH #10174
- if _np_version_under1p9:
- pytest.skip("Numpy version is under 1.9")
-
- from numpy import percentile
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
- self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
+ self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
- self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
+ self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
+ @pytest.mark.skipif(not _np_version_under1p9,
+ reason="Numpy version is greater 1.9")
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
- if not _np_version_under1p9:
- pytest.skip("Numpy version is greater than 1.9")
-
- from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
- self.assertEqual(q, percentile(self.ts.valid(), 10))
+ self.assertEqual(q, np.percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
- self.assertEqual(q1, percentile(self.ts.valid(), 10))
+ self.assertEqual(q1, np.percentile(self.ts.valid(), 10))
# interpolation other than linear
expErrMsg = "Interpolation methods other than "
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index ae1a1e35f1859..e6482d70e0ae3 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -22,7 +22,7 @@
from pandas.sparse.libsparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparseArray
-from pandas.tests.frame.test_misc_api import SharedWithSparse
+from pandas.tests.frame.test_api import SharedWithSparse
from pandas.tests.sparse.common import spmatrix # noqa: F401
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py
index 8aa85a5b7f396..83f0237841dbd 100644
--- a/pandas/tests/sparse/test_series.py
+++ b/pandas/tests/sparse/test_series.py
@@ -18,7 +18,7 @@
from pandas.sparse.libsparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries
-from pandas.tests.series.test_misc_api import SharedWithSparse
+from pandas.tests.series.test_api import SharedWithSparse
def _test_data1():
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 8fa842a836051..2c14d4f8ea79e 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -895,7 +895,9 @@ def to_series(self, keep_tz=False):
Series
"""
from pandas import Series
- return Series(self._to_embed(keep_tz), index=self, name=self.name)
+ return Series(self._to_embed(keep_tz),
+ index=self._shallow_copy(),
+ name=self.name)
def _to_embed(self, keep_tz=False):
"""
| https://api.github.com/repos/pandas-dev/pandas/pulls/15949 | 2017-04-08T14:46:33Z | 2017-04-10T12:12:01Z | 2017-04-10T12:12:01Z | 2017-04-10T12:12:13Z | |
COMPAT: 32bit compat on indexing for MI.remove_unused_levels | diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 3f84d8b292980..74c45aac8b620 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1224,6 +1224,7 @@ def _sort_levels_monotonic(self):
lev = lev.take(indexer)
# indexer to reorder the labels
+ indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
| https://api.github.com/repos/pandas-dev/pandas/pulls/15948 | 2017-04-08T13:43:06Z | 2017-04-08T14:32:09Z | 2017-04-08T14:32:09Z | 2017-04-08T15:56:35Z | |
MAINT: Refactor Python engine empty line funcs | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 10f8c53987471..95fbbce9de205 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2441,7 +2441,19 @@ def _check_for_bom(self, first_row):
# return an empty string.
return [""]
- def _empty(self, line):
+ def _is_line_empty(self, line):
+ """
+ Check if a line is empty or not.
+
+ Parameters
+ ----------
+ line : str, array-like
+ The line of data to check.
+
+ Returns
+ -------
+ boolean : Whether or not the line is empty.
+ """
return not line or all(not x for x in line)
def _next_line(self):
@@ -2454,11 +2466,12 @@ def _next_line(self):
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
- if not self.skip_blank_lines and (self._empty(self.data[
- self.pos - 1]) or line):
+ if (not self.skip_blank_lines and
+ (self._is_line_empty(
+ self.data[self.pos - 1]) or line)):
break
elif self.skip_blank_lines:
- ret = self._check_empty([line])
+ ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
@@ -2477,12 +2490,12 @@ def _next_line(self):
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
- ret = self._check_empty([line])
+ ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
- elif self._empty(orig_line) or line:
+ elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
@@ -2573,7 +2586,22 @@ def _check_comments(self, lines):
ret.append(rl)
return ret
- def _check_empty(self, lines):
+ def _remove_empty_lines(self, lines):
+ """
+ Iterate through the lines and remove any that are
+ either empty or contain only one whitespace value
+
+ Parameters
+ ----------
+ lines : array-like
+ The array of lines that we are to filter.
+
+ Returns
+ -------
+ filtered_lines : array-like
+ The same array of lines with the "empty" ones removed.
+ """
+
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
@@ -2816,7 +2844,7 @@ def _get_lines(self, rows=None):
lines = self._check_comments(lines)
if self.skip_blank_lines:
- lines = self._check_empty(lines)
+ lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
| The Python engine's `_empty` and `_check_empty` methods were uninformative and undocumented.
This commit renames them to `_is_line_empty` and `_remove_empty_lines` respectively and provides appropriate documentation.
xref <a href="https://github.com/pandas-dev/pandas/pull/15925#discussion_r110377233">#15925 (comment)</a>
| https://api.github.com/repos/pandas-dev/pandas/pulls/15946 | 2017-04-07T20:33:47Z | 2017-04-08T13:25:24Z | 2017-04-08T13:25:24Z | 2017-04-08T16:35:49Z |
BUG: Validate the skipfooter parameter in read_csv | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index d571c0f2d9620..95f5ed6916936 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1184,6 +1184,7 @@ I/O
- Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`)
- Bug in ``pd.read_csv()`` in which invalid values for ``nrows`` and ``chunksize`` were allowed (:issue:`15767`)
- Bug in ``pd.read_csv()`` for the Python engine in which unhelpful error messages were being raised when parsing errors occurred (:issue:`15910`)
+- Bug in ``pd.read_csv()`` in which the ``skipfooter`` parameter was not being properly validated (:issue:`15925`)
- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 10f8c53987471..a968a2b9623d9 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1036,6 +1036,37 @@ def _evaluate_usecols(usecols, names):
return usecols
+def _validate_skipfooter_arg(skipfooter):
+ """
+ Validate the 'skipfooter' parameter.
+
+ Checks whether 'skipfooter' is a non-negative integer.
+ Raises a ValueError if that is not the case.
+
+ Parameters
+ ----------
+ skipfooter : non-negative integer
+ The number of rows to skip at the end of the file.
+
+ Returns
+ -------
+ validated_skipfooter : non-negative integer
+ The original input if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : 'skipfooter' was not a non-negative integer.
+ """
+
+ if not is_integer(skipfooter):
+ raise ValueError("skipfooter must be an integer")
+
+ if skipfooter < 0:
+ raise ValueError("skipfooter cannot be negative")
+
+ return skipfooter
+
+
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
@@ -1880,7 +1911,7 @@ def __init__(self, f, **kwds):
else:
self.skipfunc = lambda x: x in self.skiprows
- self.skipfooter = kwds['skipfooter']
+ self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
@@ -2684,9 +2715,6 @@ def _get_index_name(self, columns):
return index_name, orig_names, columns
def _rows_to_cols(self, content):
- if self.skipfooter < 0:
- raise ValueError('skip footer cannot be negative')
-
col_len = self.num_original_columns
if self._implicit_index:
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index ee0f00506cef3..ab30301e710a6 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -546,7 +546,7 @@ def test_iterator(self):
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
- iterator=True, skipfooter=True)
+ iterator=True, skipfooter=1)
self.assertRaises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index 9a1eb94270e28..510e3c689649c 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -20,20 +20,22 @@
class PythonParserTests(object):
- def test_negative_skipfooter_raises(self):
- text = """#foo,a,b,c
-#foo,a,b,c
-#foo,a,b,c
-#foo,a,b,c
-#foo,a,b,c
-#foo,a,b,c
-1/1/2000,1.,2.,3.
-1/2/2000,4,5,6
-1/3/2000,7,8,9
-"""
+ def test_invalid_skipfooter(self):
+ text = "a\n1\n2"
+
+ # see gh-15925 (comment)
+ msg = "skipfooter must be an integer"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(text), skipfooter="foo")
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(text), skipfooter=1.5)
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(text), skipfooter=True)
- with tm.assertRaisesRegexp(
- ValueError, 'skip footer cannot be negative'):
+ msg = "skipfooter cannot be negative"
+ with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(text), skipfooter=-1)
def test_sniff_delimiter(self):
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 14146a3ad1e9a..9637b449de6da 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -112,8 +112,8 @@ def test_deprecated_args(self):
'as_recarray': True,
'buffer_lines': True,
'compact_ints': True,
- 'skip_footer': True,
'use_unsigned': True,
+ 'skip_footer': 1,
}
engines = 'c', 'python'
| Previously, the `skipfooter` parameter was assumed to be an integer, but that was not checked.
xref <a href="https://github.com/pandas-dev/pandas/pull/15925#discussion_r110283317">#15925 (comment)</a>
| https://api.github.com/repos/pandas-dev/pandas/pulls/15945 | 2017-04-07T20:21:29Z | 2017-04-08T13:24:44Z | 2017-04-08T13:24:44Z | 2017-04-08T16:35:56Z |
BUG/DOC: Add documentation in types/common.py | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0b98e57c606a3..436d51da6e873 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1145,6 +1145,7 @@ Conversion
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
- Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`)
- Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`)
+- Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`)
Indexing
^^^^^^^^
diff --git a/pandas/tests/types/test_common.py b/pandas/tests/types/test_common.py
index c15f219c8fad6..21772bab44d01 100644
--- a/pandas/tests/types/test_common.py
+++ b/pandas/tests/types/test_common.py
@@ -80,3 +80,30 @@ def test_dtype_equal_strict():
assert not is_dtype_equal(
pandas_dtype('datetime64[ns, US/Eastern]'),
pandas_dtype('datetime64[ns, CET]'))
+
+ # see gh-15941: no exception should be raised
+ assert not is_dtype_equal(None, None)
+
+
+def get_is_dtype_funcs():
+ """
+ Get all functions in pandas.types.common that
+ begin with 'is_' and end with 'dtype'
+
+ """
+ import pandas.types.common as com
+
+ fnames = [f for f in dir(com) if (f.startswith('is_') and
+ f.endswith('dtype'))]
+ return [getattr(com, fname) for fname in fnames]
+
+
+@pytest.mark.parametrize('func',
+ get_is_dtype_funcs(),
+ ids=lambda x: x.__name__)
+def test_get_dtype_error_catch(func):
+ # see gh-15941
+ #
+ # No exception should be raised.
+
+ assert not func(None)
diff --git a/pandas/types/common.py b/pandas/types/common.py
index 017805673defe..7ab2e068ac69f 100644
--- a/pandas/types/common.py
+++ b/pandas/types/common.py
@@ -31,6 +31,20 @@
def _ensure_float(arr):
+ """
+ Ensure that an array object has a float dtype if possible.
+
+ Parameters
+ ----------
+ arr : ndarray, Series
+ The array whose data type we want to enforce as float.
+
+ Returns
+ -------
+ float_arr : The original array cast to the float dtype if
+ possible. Otherwise, the original array is returned.
+ """
+
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
@@ -46,6 +60,20 @@ def _ensure_float(arr):
def _ensure_categorical(arr):
+ """
+ Ensure that an array-like object is a Categorical (if not already).
+
+ Parameters
+ ----------
+ arr : array-like
+ The array that we want to convert into a Categorical.
+
+ Returns
+ -------
+ cat_arr : The original array cast as a Categorical. If it already
+ is a Categorical, we return as is.
+ """
+
if not is_categorical(arr):
from pandas import Categorical
arr = Categorical(arr)
@@ -116,8 +144,40 @@ def is_categorical_dtype(arr_or_dtype):
def is_string_dtype(arr_or_dtype):
- dtype = _get_dtype(arr_or_dtype)
- return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
+ """
+ Check whether the provided array or dtype is of the string dtype.
+
+ Parameters
+ ----------
+ arr_or_dtype : ndarray, dtype, type
+ The array or dtype to check.
+
+ Returns
+ -------
+ boolean : Whether or not the array or dtype is of the string dtype.
+
+ Examples
+ --------
+ >>> is_string_dtype(str)
+ True
+ >>> is_string_dtype(object)
+ True
+ >>> is_string_dtype(int)
+ False
+ >>>
+ >>> is_string_dtype(np.array(['a', 'b']))
+ True
+ >>> is_string_dtype(np.array([1, 2]))
+ False
+ """
+
+ # TODO: gh-15585: consider making the checks stricter.
+
+ try:
+ dtype = _get_dtype(arr_or_dtype)
+ return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
+ except TypeError:
+ return False
def is_period_arraylike(arr):
@@ -209,8 +269,40 @@ def is_datetime64_ns_dtype(arr_or_dtype):
def is_timedelta64_ns_dtype(arr_or_dtype):
- tipo = _get_dtype(arr_or_dtype)
- return tipo == _TD_DTYPE
+ """
+ Check whether the provided array or dtype is of the timedelta64[ns] dtype.
+
+ This is a very specific dtype, so generic ones like `np.timedelta64`
+ will return False if passed into this function.
+
+ Parameters
+ ----------
+ arr_or_dtype : ndarray, dtype, type
+ The array or dtype to check.
+
+ Returns
+ -------
+ boolean : Whether or not the array or dtype
+ is of the timedelta64[ns] dtype.
+
+ Examples
+ --------
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]')
+ True
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]') # Wrong frequency
+ False
+ >>>
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
+ True
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
+ False
+ """
+
+ try:
+ tipo = _get_dtype(arr_or_dtype)
+ return tipo == _TD_DTYPE
+ except TypeError:
+ return False
def is_datetime_or_timedelta_dtype(arr_or_dtype):
@@ -220,10 +312,21 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
def _is_unorderable_exception(e):
"""
- return a boolean if we an unorderable exception error message
+ Check if the exception raised is an unorderable exception.
- These are different error message for PY>=3<=3.5 and PY>=3.6
+ The error message differs for 3 <= PY <= 3.5 and PY >= 3.6, so
+ we need to condition based on Python version.
+
+ Parameters
+ ----------
+ e : Exception or sub-class
+ The exception object to check.
+
+ Returns
+ -------
+ boolean : Whether or not the exception raised is an unorderable exception.
"""
+
if PY36:
return "'>' not supported between instances of" in str(e)
@@ -302,9 +405,39 @@ def is_numeric_dtype(arr_or_dtype):
def is_string_like_dtype(arr_or_dtype):
- # exclude object as its a mixed dtype
- dtype = _get_dtype(arr_or_dtype)
- return dtype.kind in ('S', 'U')
+ """
+ Check whether the provided array or dtype is of a string-like dtype.
+
+ Unlike `is_string_dtype`, the object dtype is excluded because it
+ is a mixed dtype.
+
+ Parameters
+ ----------
+ arr_or_dtype : ndarray, dtype, type
+ The array or dtype to check.
+
+ Returns
+ -------
+ boolean : Whether or not the array or dtype is of the string dtype.
+
+ Examples
+ --------
+ >>> is_string_like_dtype(str)
+ True
+ >>> is_string_like_dtype(object)
+ False
+ >>>
+ >>> is_string_like_dtype(np.array(['a', 'b']))
+ True
+ >>> is_string_like_dtype(np.array([1, 2]))
+ False
+ """
+
+ try:
+ dtype = _get_dtype(arr_or_dtype)
+ return dtype.kind in ('S', 'U')
+ except TypeError:
+ return False
def is_float_dtype(arr_or_dtype):
@@ -346,7 +479,22 @@ def is_complex_dtype(arr_or_dtype):
def _coerce_to_dtype(dtype):
- """ coerce a string / np.dtype to a dtype """
+ """
+ Coerce a string or np.dtype to a pandas or numpy
+ dtype if possible.
+
+ If we cannot convert to a pandas dtype initially,
+ we convert to a numpy dtype.
+
+ Parameters
+ ----------
+ dtype : The dtype that we want to coerce.
+
+ Returns
+ -------
+ pd_or_np_dtype : The coerced dtype.
+ """
+
if is_categorical_dtype(dtype):
dtype = CategoricalDtype()
elif is_datetime64tz_dtype(dtype):
@@ -359,8 +507,27 @@ def _coerce_to_dtype(dtype):
def _get_dtype(arr_or_dtype):
+ """
+ Get the dtype instance associated with an array
+ or dtype object.
+
+ Parameters
+ ----------
+ arr_or_dtype : ndarray, Series, dtype, type
+ The array-like or dtype object whose dtype we want to extract.
+
+ Returns
+ -------
+ obj_dtype : The extract dtype instance from the
+ passed in array or dtype object.
+
+ Raises
+ ------
+ TypeError : The passed in object is None.
+ """
+
if arr_or_dtype is None:
- raise TypeError
+ raise TypeError("Cannot deduce dtype from null object")
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
@@ -385,6 +552,21 @@ def _get_dtype(arr_or_dtype):
def _get_dtype_type(arr_or_dtype):
+ """
+ Get the type (NOT dtype) instance associated with
+ an array or dtype object.
+
+ Parameters
+ ----------
+ arr_or_dtype : ndarray, Series, dtype, type
+ The array-like or dtype object whose type we want to extract.
+
+ Returns
+ -------
+ obj_type : The extract type instance from the
+ passed in array or dtype object.
+ """
+
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
@@ -410,16 +592,27 @@ def _get_dtype_type(arr_or_dtype):
def _get_dtype_from_object(dtype):
- """Get a numpy dtype.type-style object. This handles the datetime64[ns]
- and datetime64[ns, TZ] compat
+ """
+ Get a numpy dtype.type-style object for a dtype object.
- Notes
- -----
- If nothing can be found, returns ``object``.
+ This methods also includes handling of the datetime64[ns] and
+ datetime64[ns, TZ] objects.
+
+ If no dtype can be found, we return ``object``.
+
+ Parameters
+ ----------
+ dtype : dtype, type
+ The dtype object whose numpy dtype.type-style
+ object we want to extract.
+
+ Returns
+ -------
+ dtype_object : The extracted numpy dtype.type-style object.
"""
- # type object from a dtype
if isinstance(dtype, type) and issubclass(dtype, np.generic):
+ # Type object from a dtype
return dtype
elif is_categorical(dtype):
return CategoricalDtype().type
@@ -429,7 +622,7 @@ def _get_dtype_from_object(dtype):
try:
_validate_date_like_dtype(dtype)
except TypeError:
- # should still pass if we don't have a datelike
+ # Should still pass if we don't have a date-like
pass
return dtype.type
elif isinstance(dtype, string_types):
@@ -444,10 +637,11 @@ def _get_dtype_from_object(dtype):
try:
return _get_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
- # handles cases like _get_dtype(int)
- # i.e., python objects that are valid dtypes (unlike user-defined
- # types, in general)
- # TypeError handles the float16 typecode of 'e'
+ # Handles cases like _get_dtype(int) i.e.,
+ # Python objects that are valid dtypes
+ # (unlike user-defined types, in general)
+ #
+ # TypeError handles the float16 type code of 'e'
# further handle internal types
pass
@@ -455,6 +649,21 @@ def _get_dtype_from_object(dtype):
def _validate_date_like_dtype(dtype):
+ """
+ Check whether the dtype is a date-like dtype. Raises an error if invalid.
+
+ Parameters
+ ----------
+ dtype : dtype, type
+ The dtype to check.
+
+ Raises
+ ------
+ TypeError : The dtype could not be casted to a date-like dtype.
+ ValueError : The dtype is an illegal date-like dtype (e.g. the
+ the frequency provided is too specific)
+ """
+
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
| Adds documentation for all internal functions in `types/common.py`
In addition, caught a bug in which some functions calling `_get_dtype` were not catching the `TypeError`. Documented those functions along the way too.
Partially addresses #15895. | https://api.github.com/repos/pandas-dev/pandas/pulls/15941 | 2017-04-07T17:31:33Z | 2017-04-07T22:42:30Z | 2017-04-07T22:42:30Z | 2017-04-08T00:40:39Z |
DEPR: deprecate pd.get_store as not api consistent and cluttering | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index cb9e2496757ef..d607d070bf2c6 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -913,13 +913,14 @@ Deprecations
- importing ``concat`` from ``pandas.tools.merge`` has been deprecated in favor of imports from the ``pandas`` namespace. This should only affect explict imports (:issue:`15358`)
- ``Series/DataFrame/Panel.consolidate()`` been deprecated as a public method. (:issue:`15483`)
- The ``as_indexer`` keyword of ``Series.str.match()`` has been deprecated (ignored keyword) (:issue:`15257`).
-- The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`)
+- The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`, :issue:`15940`)
* ``pd.pnow()``, replaced by ``Period.now()``
* ``pd.Term``, is removed, as it is not applicable to user code. Instead use in-line string expressions in the where clause when searching in HDFStore
* ``pd.Expr``, is removed, as it is not applicable to user code.
* ``pd.match()``, is removed.
* ``pd.groupby()``, replaced by using the ``.groupby()`` method directly on a ``Series/DataFrame``
+ * ``pd.get_store()``, replaced by a direct call to ``pd.HDFStore(...)``
.. _whatsnew_0200.prior_deprecations:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 9b525b76b0f17..802f460ecba07 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1323,6 +1323,13 @@ def _read_group(self, group, **kwargs):
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
+ warnings.warn(
+ "get_store is deprecated and be "
+ "removed in a future version\n"
+ "HDFStore(path, **kwargs) is the replacement",
+ FutureWarning,
+ stacklevel=6)
+
return HDFStore(path, **kwargs)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 7d1308d67668e..7301c87026114 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -2,6 +2,7 @@
from warnings import catch_warnings
+import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
@@ -63,7 +64,7 @@ class TestPDApi(Base, tm.TestCase):
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'eval',
- 'factorize', 'get_dummies', 'get_store',
+ 'factorize', 'get_dummies',
'infer_freq', 'isnull', 'lreshape',
'melt', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
@@ -102,7 +103,7 @@ class TestPDApi(Base, tm.TestCase):
'rolling_median', 'rolling_min', 'rolling_quantile',
'rolling_skew', 'rolling_std', 'rolling_sum',
'rolling_var', 'rolling_window', 'ordered_merge',
- 'pnow', 'match', 'groupby']
+ 'pnow', 'match', 'groupby', 'get_store']
def test_api(self):
@@ -140,6 +141,7 @@ def test_deprecation_access_obj(self):
class TestTopLevelDeprecations(tm.TestCase):
+
# top-level API deprecations
# GH 13790
@@ -168,6 +170,16 @@ def test_groupby(self):
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
+ # GH 15940
+
+ def test_get_store(self):
+ pytest.importorskip('tables')
+ with tm.ensure_clean() as path:
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ s = pd.get_store(path)
+ s.close()
+
class TestJson(tm.TestCase):
| https://api.github.com/repos/pandas-dev/pandas/pulls/15940 | 2017-04-07T14:02:46Z | 2017-04-07T15:21:05Z | 2017-04-07T15:21:05Z | 2017-04-07T15:23:01Z | |
DOC/TST: add pd.unique doc-string & buggy return of Categorical | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 7664688ffa4f4..4c0594c024774 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -593,6 +593,76 @@ result. On the other hand, this might have backward incompatibilities: e.g.
compared to numpy arrays, ``Index`` objects are not mutable. To get the original
ndarray, you can always convert explicitly using ``np.asarray(idx.hour)``.
+.. _whatsnew_0200.api_breaking.unique:
+
+pd.unique will now be consistent with extension types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In prior versions, using ``Series.unique()`` and ``pd.unique(Series)`` on ``Categorical`` and tz-aware
+datatypes would yield different return types. These are now made consistent. (:issue:`15903`)
+
+- Datetime tz-aware
+
+ Previous behaviour:
+
+ .. code-block:: ipython
+
+ # Series
+ In [5]: pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]).unique()
+ Out[5]: array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object)
+
+ In [6]: pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]))
+ Out[6]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]')
+
+ # Index
+ In [7]: pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]).unique()
+ Out[7]: DatetimeIndex(['2016-01-01 00:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq=None)
+
+ In [8]: pd.unique([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')])
+ Out[8]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]')
+
+ New Behavior:
+
+ .. ipython:: python
+
+ # Series, returns an array of Timestamp tz-aware
+ pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]).unique()
+ pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]))
+
+ # Index, returns a DatetimeIndex
+ pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]).unique()
+ pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
+ pd.Timestamp('20160101', tz='US/Eastern')]))
+
+- Categoricals
+
+ Previous behaviour:
+
+ .. code-block:: ipython
+
+ In [1]: pd.Series(pd.Categorical(list('baabc'))).unique()
+ Out[1]:
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ In [2]: pd.unique(pd.Series(pd.Categorical(list('baabc'))))
+ Out[2]: array(['b', 'a', 'c'], dtype=object)
+
+ New Behavior:
+
+ .. ipython:: python
+
+ # returns a Categorical
+ pd.Series(pd.Categorical(list('baabc'))).unique()
+ pd.unique(pd.Series(pd.Categorical(list('baabc'))).unique())
+
.. _whatsnew_0200.api_breaking.s3:
S3 File Handling
@@ -1148,6 +1218,7 @@ Conversion
- Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`)
- Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`)
- Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`)
+- Bug in the return type of ``pd.unique`` on a ``Categorical``, which was returning an ndarray and not a ``Categorical`` (:issue:`15903`)
Indexing
^^^^^^^^
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9b88ea23483bd..654e38e43b6c0 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -267,11 +267,85 @@ def match(to_match, values, na_sentinel=-1):
return result
-def unique1d(values):
+def unique(values):
"""
- Hash table-based unique
+ Hash table-based unique. Uniques are returned in order
+ of appearance. This does NOT sort.
+
+ Significantly faster than numpy.unique. Includes NA values.
+
+ Parameters
+ ----------
+ values : 1d array-like
+
+ Returns
+ -------
+ unique values.
+ - If the input is an Index, the return is an Index
+ - If the input is a Categorical dtype, the return is a Categorical
+ - If the input is a Series/ndarray, the return will be an ndarray
+
+ Examples
+ --------
+ pd.unique(pd.Series([2, 1, 3, 3]))
+ array([2, 1, 3])
+
+ >>> pd.unique(pd.Series([2] + [1] * 5))
+ array([2, 1])
+
+ >>> pd.unique(Series([pd.Timestamp('20160101'),
+ ... pd.Timestamp('20160101')]))
+ array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
+
+ >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
+ ... pd.Timestamp('20160101', tz='US/Eastern')]))
+ array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
+ dtype=object)
+
+ >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
+ ... pd.Timestamp('20160101', tz='US/Eastern')]))
+ DatetimeIndex(['2016-01-01 00:00:00-05:00'],
+ ... dtype='datetime64[ns, US/Eastern]', freq=None)
+
+ >>> pd.unique(list('baabc'))
+ array(['b', 'a', 'c'], dtype=object)
+
+ An unordered Categorical will return categories in the
+ order of appearance.
+
+ >>> pd.unique(Series(pd.Categorical(list('baabc'))))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ >>> pd.unique(Series(pd.Categorical(list('baabc'),
+ ... categories=list('abc'))))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ An ordered Categorical preserves the category ordering.
+
+ >>> pd.unique(Series(pd.Categorical(list('baabc'),
+ ... categories=list('abc'),
+ ... ordered=True)))
+ [b, a, c]
+ Categories (3, object): [a < b < c]
+
+ See Also
+ --------
+ pandas.Index.unique
+ pandas.Series.unique
+
"""
+
values = _ensure_arraylike(values)
+
+ # categorical is a fast-path
+ # this will coerce Categorical, CategoricalIndex,
+ # and category dtypes Series to same return of Category
+ if is_categorical_dtype(values):
+ values = getattr(values, '.values', values)
+ return values.unique()
+
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
@@ -279,10 +353,17 @@ def unique1d(values):
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
+ if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
+ # we are special casing datetime64tz_dtype
+ # to return an object array of tz-aware Timestamps
+
+ # TODO: it must return DatetimeArray with tz in pandas 2.0
+ uniques = uniques.asobject.values
+
return uniques
-unique = unique1d
+unique1d = unique
def isin(comps, values):
@@ -651,7 +732,7 @@ def mode(values):
if is_categorical_dtype(values):
if isinstance(values, Series):
- return Series(values.values.mode())
+ return Series(values.values.mode(), name=values.name)
return values.mode()
values, dtype, ndtype = _ensure_data(values)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 3401c7c59cb56..56bdeee6982d5 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -855,13 +855,24 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
_shared_docs['unique'] = (
"""
- Return %(unique)s of unique values in the object.
- Significantly faster than numpy.unique. Includes NA values.
- The order of the original is preserved.
+ Return unique values in the object. Uniques are returned in order
+ of appearance, this does NOT sort. Hash table-based unique.
+
+ Parameters
+ ----------
+ values : 1d array-like
Returns
-------
- uniques : %(unique)s
+ unique values.
+ - If the input is an Index, the return is an Index
+ - If the input is a Categorical dtype, the return is a Categorical
+ - If the input is a Series/ndarray, the return will be an ndarray
+
+ See Also
+ --------
+ pandas.unique
+ pandas.Categorical.unique
""")
@Appender(_shared_docs['unique'] % _indexops_doc_kwargs)
@@ -873,6 +884,7 @@ def unique(self):
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
+
return result
def nunique(self, dropna=True):
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 0fcf8664e755d..e3d6792604c4c 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1895,6 +1895,33 @@ def unique(self):
Returns
-------
unique values : ``Categorical``
+
+ Examples
+ --------
+ An unordered Categorical will return categories in the
+ order of appearance.
+
+ >>> pd.Categorical(list('baabc'))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ >>> pd.Categorical(list('baabc'), categories=list('abc'))
+ [b, a, c]
+ Categories (3, object): [b, a, c]
+
+ An ordered Categorical preserves the category ordering.
+
+ >>> pd.Categorical(list('baabc'),
+ ... categories=list('abc'),
+ ... ordered=True)
+ [b, a, c]
+ Categories (3, object): [a < b < c]
+
+ See Also
+ --------
+ pandas.unique
+ pandas.CategoricalIndex.unique
+
"""
# unlike np.unique, unique1d does not sort
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 760abc20351cf..5ee3ca73742ae 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1204,10 +1204,14 @@ def mode(self):
@Appender(base._shared_docs['unique'] % _shared_doc_kwargs)
def unique(self):
result = super(Series, self).unique()
+
if is_datetime64tz_dtype(self.dtype):
- # to return array of Timestamp with tz
- # ToDo: it must return DatetimeArray with tz in pandas 2.0
- return result.asobject.values
+ # we are special casing datetime64tz_dtype
+ # to return an object array of tz-aware Timestamps
+
+ # TODO: it must return DatetimeArray with tz in pandas 2.0
+ result = result.asobject.values
+
return result
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index d893183dae0ed..d9f81968c684d 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -6,7 +6,8 @@
from numpy import nan
from datetime import datetime
from itertools import permutations
-from pandas import Series, Categorical, CategoricalIndex, Index
+from pandas import (Series, Categorical, CategoricalIndex, Index,
+ Timestamp, DatetimeIndex)
import pandas as pd
from pandas import compat
@@ -34,7 +35,7 @@ def test_ints(self):
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
- s = pd.Series(np.arange(5), dtype=np.float32)
+ s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
self.assert_numpy_array_equal(result, expected)
@@ -204,20 +205,20 @@ def test_mixed(self):
def test_datelike(self):
# M8
- v1 = pd.Timestamp('20130101 09:00:00.00004')
- v2 = pd.Timestamp('20130101')
+ v1 = Timestamp('20130101 09:00:00.00004')
+ v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
self.assert_numpy_array_equal(labels, exp)
- exp = pd.DatetimeIndex([v1, v2])
+ exp = DatetimeIndex([v1, v2])
self.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
self.assert_numpy_array_equal(labels, exp)
- exp = pd.DatetimeIndex([v2, v1])
+ exp = DatetimeIndex([v2, v1])
self.assert_index_equal(uniques, exp)
# period
@@ -350,7 +351,7 @@ def test_datetime64_dtype_array_returned(self):
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
- s = pd.Series(dt_index)
+ s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
@@ -369,7 +370,7 @@ def test_timedelta64_dtype_array_returned(self):
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
- s = pd.Series(td_index)
+ s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
@@ -380,10 +381,119 @@ def test_timedelta64_dtype_array_returned(self):
self.assertEqual(result.dtype, expected.dtype)
def test_uint64_overflow(self):
- s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64)
+ s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
+ def test_categorical(self):
+
+ # we are expecting to return in the order
+ # of appearance
+ expected = pd.Categorical(list('bac'),
+ categories=list('bac'))
+
+ # we are expecting to return in the order
+ # of the categories
+ expected_o = pd.Categorical(list('bac'),
+ categories=list('abc'),
+ ordered=True)
+
+ # GH 15939
+ c = pd.Categorical(list('baabc'))
+ result = c.unique()
+ tm.assert_categorical_equal(result, expected)
+
+ result = algos.unique(c)
+ tm.assert_categorical_equal(result, expected)
+
+ c = pd.Categorical(list('baabc'), ordered=True)
+ result = c.unique()
+ tm.assert_categorical_equal(result, expected_o)
+
+ result = algos.unique(c)
+ tm.assert_categorical_equal(result, expected_o)
+
+ # Series of categorical dtype
+ s = Series(pd.Categorical(list('baabc')), name='foo')
+ result = s.unique()
+ tm.assert_categorical_equal(result, expected)
+
+ result = pd.unique(s)
+ tm.assert_categorical_equal(result, expected)
+
+ # CI -> return CI
+ ci = pd.CategoricalIndex(pd.Categorical(list('baabc'),
+ categories=list('bac')))
+ expected = pd.CategoricalIndex(expected)
+ result = ci.unique()
+ tm.assert_index_equal(result, expected)
+
+ result = pd.unique(ci)
+ tm.assert_index_equal(result, expected)
+
+ def test_datetime64tz_aware(self):
+ # GH 15939
+
+ result = Series(
+ pd.Index([Timestamp('20160101', tz='US/Eastern'),
+ Timestamp('20160101', tz='US/Eastern')])).unique()
+ expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
+ tz='US/Eastern')], dtype=object)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.Index([Timestamp('20160101', tz='US/Eastern'),
+ Timestamp('20160101', tz='US/Eastern')]).unique()
+ expected = DatetimeIndex(['2016-01-01 00:00:00'],
+ dtype='datetime64[ns, US/Eastern]', freq=None)
+ tm.assert_index_equal(result, expected)
+
+ result = pd.unique(
+ Series(pd.Index([Timestamp('20160101', tz='US/Eastern'),
+ Timestamp('20160101', tz='US/Eastern')])))
+ expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
+ tz='US/Eastern')], dtype=object)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.unique(pd.Index([Timestamp('20160101', tz='US/Eastern'),
+ Timestamp('20160101', tz='US/Eastern')]))
+ expected = DatetimeIndex(['2016-01-01 00:00:00'],
+ dtype='datetime64[ns, US/Eastern]', freq=None)
+ tm.assert_index_equal(result, expected)
+
+ def test_order_of_appearance(self):
+ # 9346
+ # light testing of guarantee of order of appearance
+ # these also are the doc-examples
+ result = pd.unique(Series([2, 1, 3, 3]))
+ tm.assert_numpy_array_equal(result,
+ np.array([2, 1, 3], dtype='int64'))
+
+ result = pd.unique(Series([2] + [1] * 5))
+ tm.assert_numpy_array_equal(result,
+ np.array([2, 1], dtype='int64'))
+
+ result = pd.unique(Series([Timestamp('20160101'),
+ Timestamp('20160101')]))
+ expected = np.array(['2016-01-01T00:00:00.000000000'],
+ dtype='datetime64[ns]')
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.unique(pd.Index(
+ [Timestamp('20160101', tz='US/Eastern'),
+ Timestamp('20160101', tz='US/Eastern')]))
+ expected = DatetimeIndex(['2016-01-01 00:00:00'],
+ dtype='datetime64[ns, US/Eastern]',
+ freq=None)
+ tm.assert_index_equal(result, expected)
+
+ result = pd.unique(list('aabc'))
+ expected = np.array(['a', 'b', 'c'], dtype=object)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.unique(Series(pd.Categorical(list('aabc'))))
+ expected = pd.Categorical(list('abc'))
+ tm.assert_categorical_equal(result, expected)
+
class TestIsin(tm.TestCase):
@@ -403,15 +513,15 @@ def test_basic(self):
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(pd.Series([1, 2]), [1])
+ result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(pd.Series([1, 2]), pd.Series([1]))
+ result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(pd.Series([1, 2]), set([1]))
+ result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@@ -419,11 +529,11 @@ def test_basic(self):
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(pd.Series(['a', 'b']), pd.Series(['a']))
+ result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- result = algos.isin(pd.Series(['a', 'b']), set(['a']))
+ result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@@ -520,33 +630,33 @@ def test_value_counts_nat(self):
self.assertEqual(len(vc), 1)
self.assertEqual(len(vc_with_na), 2)
- exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1})
+ exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
- s = pd.Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
- datetime(5000, 1, 1), datetime(6000, 1, 1),
- datetime(3000, 1, 1), datetime(3000, 1, 1)])
+ s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
+ datetime(5000, 1, 1), datetime(6000, 1, 1),
+ datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = pd.Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
- exp = pd.Series([3, 2, 1], index=exp_index)
+ exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
- res = pd.to_datetime(pd.Series(['2362-01-01', np.nan]),
+ res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
- exp = pd.Series(['2362-01-01', np.nan], dtype=object)
+ exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(pd.Categorical(list('aaabbc')))
result = s.value_counts()
- expected = pd.Series([3, 2, 1],
- index=pd.CategoricalIndex(['a', 'b', 'c']))
+ expected = Series([3, 2, 1],
+ index=pd.CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
@@ -559,11 +669,11 @@ def test_categorical_nans(self):
s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
- expected = pd.Series([4, 3, 2], index=pd.CategoricalIndex(
+ expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
- expected = pd.Series([
+ expected = Series([
4, 3, 2, 1
], index=pd.CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
@@ -573,12 +683,12 @@ def test_categorical_nans(self):
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
- expected = pd.Series([4, 3, 2], index=pd.CategoricalIndex(
+ expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
- expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
+ expected = Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
@@ -595,33 +705,33 @@ def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
- pd.Series([True, True, False]).value_counts(dropna=True),
- pd.Series([2, 1], index=[True, False]))
+ Series([True, True, False]).value_counts(dropna=True),
+ Series([2, 1], index=[True, False]))
tm.assert_series_equal(
- pd.Series([True, True, False]).value_counts(dropna=False),
- pd.Series([2, 1], index=[True, False]))
+ Series([True, True, False]).value_counts(dropna=False),
+ Series([2, 1], index=[True, False]))
tm.assert_series_equal(
- pd.Series([True, True, False, None]).value_counts(dropna=True),
- pd.Series([2, 1], index=[True, False]))
+ Series([True, True, False, None]).value_counts(dropna=True),
+ Series([2, 1], index=[True, False]))
tm.assert_series_equal(
- pd.Series([True, True, False, None]).value_counts(dropna=False),
- pd.Series([2, 1, 1], index=[True, False, np.nan]))
+ Series([True, True, False, None]).value_counts(dropna=False),
+ Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
- pd.Series([10.3, 5., 5.]).value_counts(dropna=True),
- pd.Series([2, 1], index=[5., 10.3]))
+ Series([10.3, 5., 5.]).value_counts(dropna=True),
+ Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
- pd.Series([10.3, 5., 5.]).value_counts(dropna=False),
- pd.Series([2, 1], index=[5., 10.3]))
+ Series([10.3, 5., 5.]).value_counts(dropna=False),
+ Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
- pd.Series([10.3, 5., 5., None]).value_counts(dropna=True),
- pd.Series([2, 1], index=[5., 10.3]))
+ Series([10.3, 5., 5., None]).value_counts(dropna=True),
+ Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
- result = pd.Series([10.3, 5., 5., None]).value_counts(dropna=False)
- expected = pd.Series([2, 1, 1], index=[5., 10.3, np.nan])
+ result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
+ expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
@@ -736,15 +846,15 @@ def test_numeric_object_likes(self):
tm.assert_numpy_array_equal(res_false, exp_false)
# series
- for s in [pd.Series(case), pd.Series(case, dtype='category')]:
+ for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
- tm.assert_series_equal(res_first, pd.Series(exp_first))
+ tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
- tm.assert_series_equal(res_last, pd.Series(exp_last))
+ tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
- tm.assert_series_equal(res_false, pd.Series(exp_false))
+ tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
@@ -753,8 +863,8 @@ def test_datetime_likes(self):
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
- cases = [np.array([pd.Timestamp(d) for d in dt]),
- np.array([pd.Timestamp(d, tz='US/Eastern') for d in dt]),
+ cases = [np.array([Timestamp(d) for d in dt]),
+ np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
@@ -788,16 +898,16 @@ def test_datetime_likes(self):
tm.assert_numpy_array_equal(res_false, exp_false)
# series
- for s in [pd.Series(case), pd.Series(case, dtype='category'),
- pd.Series(case, dtype=object)]:
+ for s in [Series(case), Series(case, dtype='category'),
+ Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
- tm.assert_series_equal(res_first, pd.Series(exp_first))
+ tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
- tm.assert_series_equal(res_last, pd.Series(exp_last))
+ tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
- tm.assert_series_equal(res_false, pd.Series(exp_false))
+ tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)]
@@ -939,7 +1049,7 @@ def test_lookup_overflow(self):
np.arange(len(xs), dtype=np.int64))
def test_get_unique(self):
- s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64)
+ s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
self.assert_numpy_array_equal(s.unique(), exp)
| closes #9346
| https://api.github.com/repos/pandas-dev/pandas/pulls/15939 | 2017-04-07T13:21:42Z | 2017-04-09T15:28:52Z | 2017-04-09T15:28:52Z | 2017-04-09T17:22:28Z |
ENH: Minor change to parallel_coordinates (#15908) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index fd1cd3d0022c9..382e57e0421c5 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -371,6 +371,8 @@ Other Enhancements
- :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels <advanced.shown_levels>`. (:issue:`15694`)
- ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`)
- ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`)
+- ``pd.read_csv()`` will now raise a ``csv.Error`` error whenever an end-of-file character is encountered in the middle of a data row (:issue:`15913`)
+- ``parallel_coordinates()`` has gained a ``sort_labels`` keyword arg that sorts class labels and the colours assigned to them (:issue:`15908`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 812f039f1a2c7..504c55bcfcfd0 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -241,6 +241,26 @@ def test_parallel_coordinates(self):
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(df, 'Name', colors=colors)
+ def test_parallel_coordinates_with_sorted_labels(self):
+ """ For #15908 """
+ from pandas.tools.plotting import parallel_coordinates
+
+ df = DataFrame({"feat": [i for i in range(30)],
+ "class": [2 for _ in range(10)] +
+ [3 for _ in range(10)] +
+ [1 for _ in range(10)]})
+ ax = parallel_coordinates(df, 'class', sort_labels=True)
+ polylines, labels = ax.get_legend_handles_labels()
+ color_label_tuples = \
+ zip([polyline.get_color() for polyline in polylines], labels)
+ ordered_color_label_tuples = sorted(color_label_tuples,
+ key=lambda x: x[1])
+ prev_next_tupels = zip([i for i in ordered_color_label_tuples[0:-1]],
+ [i for i in ordered_color_label_tuples[1:]])
+ for prev, nxt in prev_next_tupels:
+ # lables and colors are ordered strictly increasing
+ assert prev[1] < nxt[1] and prev[0] < nxt[0]
+
@slow
def test_radviz(self):
from pandas.tools.plotting import radviz
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index f70a2b0b22140..b15ccdacb6ab7 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -694,7 +694,8 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
- axvlines=True, axvlines_kwds=None, **kwds):
+ axvlines=True, axvlines_kwds=None, sort_labels=False,
+ **kwds):
"""Parallel coordinates plotting.
Parameters
@@ -718,6 +719,11 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
If true, vertical lines will be added at each xtick
axvlines_kwds: keywords, optional
Options to be passed to axvline method for vertical lines
+ sort_labels: bool, False
+ Sort class_column labels, useful when assigning colours
+
+ .. versionadded:: 0.20.0
+
kwds: keywords
Options to pass to matplotlib plotting method
@@ -774,6 +780,9 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
colormap=colormap, color_type='random',
color=color)
+ if sort_labels:
+ classes = sorted(classes)
+ color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
| Add option to sort class lables, add to test
- [x] closes #15908
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15935 | 2017-04-07T05:17:44Z | 2017-04-14T13:30:24Z | null | 2017-04-14T13:30:30Z |
BUG: Correct Timestamp localization with tz near DST (#11481) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 4e29e01415ba6..7664688ffa4f4 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1124,6 +1124,7 @@ Conversion
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
- Bug in ``Timestamp.replace`` with compat for passing long integers (:issue:`15030`)
- Bug in ``Timestamp`` returning UTC based time/date attributes when a timezone was provided (:issue:`13303`)
+- Bug in ``Timestamp`` incorrectly localizing timezones during construction (:issue:`11481`, :issue:`15777`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index cc1439711c1d4..ed0bb263ed6cf 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1569,7 +1569,9 @@ cpdef convert_str_to_tsobject(object ts, object tz, object unit,
ts = obj.value
if tz is not None:
# shift for _localize_tso
- ts = tz_convert_single(ts, tz, 'UTC')
+ ts = tz_localize_to_utc(np.array([ts], dtype='i8'), tz,
+ ambiguous='raise',
+ errors='raise')[0]
except ValueError:
try:
ts = parse_datetime_string(
@@ -4073,7 +4075,23 @@ except:
have_pytz = False
+@cython.boundscheck(False)
+@cython.wraparound(False)
def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
+ """
+ Convert the values (in i8) from timezone1 to timezone2
+
+ Parameters
+ ----------
+ vals : int64 ndarray
+ tz1 : string / timezone object
+ tz2 : string / timezone object
+
+ Returns
+ -------
+ int64 ndarray of converted
+ """
+
cdef:
ndarray[int64_t] utc_dates, tt, result, trans, deltas
Py_ssize_t i, j, pos, n = len(vals)
@@ -4175,6 +4193,23 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
def tz_convert_single(int64_t val, object tz1, object tz2):
+ """
+ Convert the val (in i8) from timezone1 to timezone2
+
+ This is a single timezone versoin of tz_convert
+
+ Parameters
+ ----------
+ val : int64
+ tz1 : string / timezone object
+ tz2 : string / timezone object
+
+ Returns
+ -------
+ int64 converted
+
+ """
+
cdef:
ndarray[int64_t] trans, deltas
Py_ssize_t pos
@@ -4374,7 +4409,7 @@ cpdef ndarray _unbox_utcoffsets(object transinfo):
def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
object errors='raise'):
"""
- Localize tzinfo-naive DateRange to given time zone (using pytz). If
+ Localize tzinfo-naive i8 to given time zone (using pytz). If
there are ambiguities in the values, raise AmbiguousTimeError.
Returns
@@ -4546,6 +4581,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
return result
+
cdef inline bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n):
cdef Py_ssize_t pivot, left = 0, right = n
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 0b6c0c601ac72..48410c1c73479 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1024,9 +1024,9 @@ def test_setitem_with_tz_dst(self):
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
- exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
- pd.Timestamp('2011-01-01 00:00', tz=tz),
- pd.Timestamp('2016-11-06 02:00', tz=tz)])
+ exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
+ pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
+ pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 1fc0e1b73df6b..3e1b29f4c282c 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -1,4 +1,5 @@
# pylint: disable-msg=E1101,W0612
+import pytest
import pytz
import numpy as np
from distutils.version import LooseVersion
@@ -159,6 +160,52 @@ def test_timestamp_constructed_by_date_and_tz_explicit(self):
self.assertEqual(result.hour, expected.hour)
self.assertEqual(result, expected)
+ def test_timestamp_constructor_near_dst_boundary(self):
+ # GH 11481 & 15777
+ # Naive string timestamps were being localized incorrectly
+ # with tz_convert_single instead of tz_localize_to_utc
+
+ for tz in ['Europe/Brussels', 'Europe/Prague']:
+ result = Timestamp('2015-10-25 01:00', tz=tz)
+ expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
+ assert result == expected
+
+ with pytest.raises(pytz.AmbiguousTimeError):
+ Timestamp('2015-10-25 02:00', tz=tz)
+
+ result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
+ expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
+ assert result == expected
+
+ with pytest.raises(pytz.NonExistentTimeError):
+ Timestamp('2017-03-26 02:00', tz='Europe/Paris')
+
+ # GH 11708
+ result = to_datetime("2015-11-18 15:30:00+05:30").tz_localize(
+ 'UTC').tz_convert('Asia/Kolkata')
+ expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
+ assert result == expected
+
+ # GH 15823
+ result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
+ expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
+ assert result == expected
+
+ result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
+ expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
+ assert result == expected
+
+ with pytest.raises(pytz.NonExistentTimeError):
+ Timestamp('2017-03-26 02:00', tz='Europe/Paris')
+ result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
+ expected = Timestamp(result.value).tz_localize(
+ 'UTC').tz_convert('Europe/Paris')
+ assert result == expected
+
+ result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
+ expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
+ assert result == expected
+
def test_timestamp_to_datetime_tzoffset(self):
# tzoffset
from dateutil.tz import tzoffset
@@ -517,8 +564,8 @@ def f():
freq="H"))
if dateutil.__version__ != LooseVersion('2.6.0'):
# GH 14621
- self.assertEqual(times[-1], Timestamp('2013-10-27 01:00', tz=tz,
- freq="H"))
+ self.assertEqual(times[-1], Timestamp('2013-10-27 01:00:00+0000',
+ tz=tz, freq="H"))
def test_ambiguous_nat(self):
tz = self.tz('US/Eastern')
| - [x] closes #11481, #15777
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
Seemed that the `Timestamp` constructor with a string incorrectly localized a naive time with `tz_convert_single` instead of `tz_localize_to_utc` which is similar to how `DatetimeIndex` localizes.
Now the `Timestamp` will raise an error if an ambiguous combination of string and timezone is passed. Should this be the default behavior?
I had to slightly change 2 existing tests. `test_timestamp_to_datetime_tzoffset` used a now ambiguous timestamp and `test_setitem_with_tz_dst` had an incorrect expected timestamp. | https://api.github.com/repos/pandas-dev/pandas/pulls/15934 | 2017-04-07T04:19:44Z | 2017-04-08T21:59:06Z | 2017-04-08T21:59:06Z | 2017-12-20T02:00:29Z |
DOC: Fix a typo in advanced.rst | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index f380070ddac79..0b81bc6d934e1 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -46,7 +46,7 @@ data with an arbitrary number of dimensions in lower dimensional data
structures like Series (1d) and DataFrame (2d).
In this section, we will show what exactly we mean by "hierarchical" indexing
-and how it integrates with the all of the pandas indexing functionality
+and how it integrates with all of the pandas indexing functionality
described above and in prior sections. Later, when discussing :ref:`group by
<groupby>` and :ref:`pivoting and reshaping data <reshaping>`, we'll show
non-trivial applications to illustrate how it aids in structuring data for
| https://api.github.com/repos/pandas-dev/pandas/pulls/15933 | 2017-04-07T04:06:09Z | 2017-04-07T12:08:57Z | 2017-04-07T12:08:57Z | 2017-04-07T12:09:00Z | |
TST: suppress some warnings | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 244f882f2c103..9b88ea23483bd 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -3,7 +3,7 @@
intended for public consumption
"""
from __future__ import division
-from warnings import warn
+from warnings import warn, catch_warnings
import numpy as np
from pandas import compat, _np_version_under1p8
@@ -110,7 +110,11 @@ def _ensure_data(values, dtype=None):
values = _ensure_uint64(values)
ndtype = dtype = 'uint64'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
- values = _ensure_float64(values)
+
+ # ignore the fact that we are casting to float
+ # which discards complex parts
+ with catch_warnings(record=True):
+ values = _ensure_float64(values)
ndtype = dtype = 'float64'
elif is_float_dtype(values) or is_float_dtype(dtype):
values = _ensure_float64(values)
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index aabce7ecb7066..4a0850734e134 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import pytest
+from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@@ -44,7 +45,8 @@ def test_error_rename():
except CParserError:
pass
- try:
- raise ParserError()
- except pd.parser.CParserError:
- pass
+ with catch_warnings(record=True):
+ try:
+ raise ParserError()
+ except pd.parser.CParserError:
+ pass
| https://api.github.com/repos/pandas-dev/pandas/pulls/15932 | 2017-04-07T02:55:59Z | 2017-04-07T12:03:55Z | 2017-04-07T12:03:55Z | 2017-04-07T12:04:54Z | |
DEPR: deprecate relableling dicts in groupby.agg | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index a37cbc96b2d8c..f46a00826a8d9 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -610,14 +610,6 @@ aggregation with, outputting a DataFrame:
r['A'].agg([np.sum, np.mean, np.std])
-If a dict is passed, the keys will be used to name the columns. Otherwise the
-function's name (stored in the function object) will be used.
-
-.. ipython:: python
-
- r['A'].agg({'result1' : np.sum,
- 'result2' : np.mean})
-
On a widowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index cbe3588104439..03ee5e0d67913 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -502,7 +502,7 @@ index are the group names and whose values are the sizes of each group.
Applying multiple functions at once
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-With grouped Series you can also pass a list or dict of functions to do
+With grouped ``Series`` you can also pass a list or dict of functions to do
aggregation with, outputting a DataFrame:
.. ipython:: python
@@ -510,23 +510,35 @@ aggregation with, outputting a DataFrame:
grouped = df.groupby('A')
grouped['C'].agg([np.sum, np.mean, np.std])
-If a dict is passed, the keys will be used to name the columns. Otherwise the
-function's name (stored in the function object) will be used.
+On a grouped ``DataFrame``, you can pass a list of functions to apply to each
+column, which produces an aggregated result with a hierarchical index:
.. ipython:: python
- grouped['D'].agg({'result1' : np.sum,
- 'result2' : np.mean})
+ grouped.agg([np.sum, np.mean, np.std])
-On a grouped DataFrame, you can pass a list of functions to apply to each
-column, which produces an aggregated result with a hierarchical index:
+
+The resulting aggregations are named for the functions themselves. If you
+need to rename, then you can add in a chained operation for a ``Series`` like this:
.. ipython:: python
- grouped.agg([np.sum, np.mean, np.std])
+ (grouped['C'].agg([np.sum, np.mean, np.std])
+ .rename(columns={'sum': 'foo',
+ 'mean': 'bar',
+ 'std': 'baz'})
+ )
+
+For a grouped ``DataFrame``, you can rename in a similar manner:
+
+.. ipython:: python
+
+ (grouped.agg([np.sum, np.mean, np.std])
+ .rename(columns={'sum': 'foo',
+ 'mean': 'bar',
+ 'std': 'baz'})
+ )
-Passing a dict of functions has different behavior by default, see the next
-section.
Applying different functions to DataFrame columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 61812684e7648..0a957772d785e 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1549,14 +1549,6 @@ You can pass a list or dict of functions to do aggregation with, outputting a Da
r['A'].agg([np.sum, np.mean, np.std])
-If a dict is passed, the keys will be used to name the columns. Otherwise the
-function's name (stored in the function object) will be used.
-
-.. ipython:: python
-
- r['A'].agg({'result1' : np.sum,
- 'result2' : np.mean})
-
On a resampled DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index defabee3cef8c..c243e4ef81b38 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -456,6 +456,88 @@ Convert to an xarray DataArray
p.to_xarray()
+.. _whatsnew_0200.api_breaking.deprecate_group_agg_dict:
+
+Deprecate groupby.agg() with a dictionary when renaming
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``.groupby(..).agg(..)``, ``.rolling(..).agg(..)``, and ``.resample(..).agg(..)`` syntax can accept a variable of inputs, including scalars,
+list, and a dict of column names to scalars or lists. This provides a useful syntax for constructing multiple
+(potentially different) aggregations.
+
+However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent
+between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility.
+
+1) We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed
+one to ``rename`` the resulting aggregation, but this had a completely different
+meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations.
+2) We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner.
+
+This is an illustrative example:
+
+.. ipython:: python
+
+ df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
+ 'B': range(5),
+ 'C': range(5)})
+ df
+
+Here is a typical useful syntax for computing different aggregations for different columns. This
+is a natural (and useful) syntax. We aggregate from the dict-to-list by taking the specified
+columns and applying the list of functions. This returns a ``MultiIndex`` for the columns.
+
+.. ipython:: python
+
+ df.groupby('A').agg({'B': 'sum', 'C': 'min'})
+
+Here's an example of the first deprecation (1), passing a dict to a grouped ``Series``. This
+is a combination aggregation & renaming:
+
+.. code-block:: ipython
+
+ In [6]: df.groupby('A').B.agg({'foo': 'count'})
+ FutureWarning: using a dict on a Series for aggregation
+ is deprecated and will be removed in a future version
+
+ Out[6]:
+ foo
+ A
+ 1 3
+ 2 2
+
+You can accomplish the same operation, more idiomatically by:
+
+.. ipython:: python
+
+ df.groupby('A').B.agg(['count']).rename({'count': 'foo'})
+
+
+Here's an example of the second deprecation (2), passing a dict-of-dict to a grouped ``DataFrame``:
+
+.. code-block:: python
+
+ In [23]: (df.groupby('A')
+ .agg({'B': {'foo': 'sum'}, 'C': {'bar': 'min'}})
+ )
+ FutureWarning: using a dict with renaming is deprecated and will be removed in a future version
+
+ Out[23]:
+ B C
+ foo bar
+ A
+ 1 3 0
+ 2 7 3
+
+
+You can accomplish nearly the same by:
+
+.. ipython:: python
+
+ (df.groupby('A')
+ .agg({'B': 'sum', 'C': 'min'})
+ .rename(columns={'B': 'foo', 'C': 'bar'})
+ )
+
.. _whatsnew.api_breaking.io_compat:
Possible incompat for HDF5 formats for pandas < 0.13.0
diff --git a/pandas/core/base.py b/pandas/core/base.py
index bdbfb7b949986..6566ee38c1ade 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1,6 +1,7 @@
"""
Base and utility classes for pandas objects.
"""
+import warnings
from pandas import compat
from pandas.compat import builtins
import numpy as np
@@ -290,7 +291,12 @@ class SelectionMixin(object):
}
@property
- def name(self):
+ def _selection_name(self):
+ """
+ return a name for myself; this would ideally be called
+ the 'name' property, but we cannot conflict with the
+ Series.name property which can be set
+ """
if self._selection is None:
return None # 'result'
else:
@@ -405,6 +411,26 @@ def aggregate(self, func, *args, **kwargs):
agg = aggregate
+ def _try_aggregate_string_function(self, arg, *args, **kwargs):
+ """
+ if arg is a string, then try to operate on it:
+ - try to find a function on ourselves
+ - try to find a numpy function
+ - raise
+
+ """
+ assert isinstance(arg, compat.string_types)
+
+ f = getattr(self, arg, None)
+ if f is not None:
+ return f(*args, **kwargs)
+
+ f = getattr(np, arg, None)
+ if f is not None:
+ return f(self, *args, **kwargs)
+
+ raise ValueError("{} is an unknown string function".format(arg))
+
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
@@ -424,18 +450,22 @@ def _aggregate(self, arg, *args, **kwargs):
how can be a string describe the required post-processing, or
None if not required
"""
-
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
+ _axis = kwargs.pop('_axis', None)
+ if _axis is None:
+ _axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
+
if isinstance(arg, compat.string_types):
- return getattr(self, arg)(*args, **kwargs), None
+ return self._try_aggregate_string_function(arg, *args,
+ **kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
- if self.axis != 0: # pragma: no cover
+ if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
@@ -454,7 +484,7 @@ def _aggregate(self, arg, *args, **kwargs):
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
- # ok
+ # ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
@@ -469,8 +499,28 @@ def _aggregate(self, arg, *args, **kwargs):
'for {0} with a nested '
'dictionary'.format(k))
+ # deprecation of nested renaming
+ # GH 15931
+ warnings.warn(
+ ("using a dict with renaming "
+ "is deprecated and will be removed in a future "
+ "version"),
+ FutureWarning, stacklevel=4)
+
arg = new_arg
+ else:
+ # deprecation of renaming keys
+ # GH 15931
+ keys = list(compat.iterkeys(arg))
+ if (isinstance(obj, ABCDataFrame) and
+ len(obj.columns.intersection(keys)) != len(keys)):
+ warnings.warn(
+ ("using a dict with renaming "
+ "is deprecated and will be removed in a future "
+ "version"),
+ FutureWarning, stacklevel=4)
+
from pandas.tools.concat import concat
def _agg_1dim(name, how, subset=None):
@@ -534,7 +584,7 @@ def _agg(arg, func):
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
- elif not len(sl - set(compat.iterkeys(arg))):
+ elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
@@ -555,32 +605,74 @@ def _agg(arg, func):
result = _agg(arg, _agg_2dim)
# combine results
+
+ def is_any_series():
+ # return a boolean if we have *any* nested series
+ return any([isinstance(r, ABCSeries)
+ for r in compat.itervalues(result)])
+
+ def is_any_frame():
+ # return a boolean if we have *any* nested series
+ return any([isinstance(r, ABCDataFrame)
+ for r in compat.itervalues(result)])
+
if isinstance(result, list):
- result = concat(result, keys=keys, axis=1)
- elif isinstance(list(compat.itervalues(result))[0],
- ABCDataFrame):
- result = concat([result[k] for k in keys], keys=keys, axis=1)
- else:
- from pandas import DataFrame
+ return concat(result, keys=keys, axis=1), True
+
+ elif is_any_frame():
+ # we have a dict of DataFrames
+ # return a MI DataFrame
+
+ return concat([result[k] for k in keys],
+ keys=keys, axis=1), True
+
+ elif isinstance(self, ABCSeries) and is_any_series():
+
+ # we have a dict of Series
+ # return a MI Series
+ try:
+ result = concat(result)
+ except TypeError:
+ # we want to give a nice error here if
+ # we have non-same sized objects, so
+ # we don't automatically broadcast
+
+ raise ValueError("cannot perform both aggregation "
+ "and transformation operations "
+ "simultaneously")
+
+ return result, True
+
+ # fall thru
+ from pandas import DataFrame, Series
+ try:
result = DataFrame(result)
+ except ValueError:
+
+ # we have a dict of scalars
+ result = Series(result,
+ name=getattr(self, 'name', None))
return result, True
- elif hasattr(arg, '__iter__'):
- return self._aggregate_multiple_funcs(arg, _level=_level), None
+ elif is_list_like(arg) and arg not in compat.string_types:
+ # we require a list, but not an 'str'
+ return self._aggregate_multiple_funcs(arg,
+ _level=_level,
+ _axis=_axis), None
else:
result = None
- cy_func = self._is_cython_func(arg)
- if cy_func and not args and not kwargs:
- return getattr(self, cy_func)(), None
+ f = self._is_cython_func(arg)
+ if f and not args and not kwargs:
+ return getattr(self, f)(), None
# caller can react
return result, True
- def _aggregate_multiple_funcs(self, arg, _level):
+ def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.tools.concat import concat
- if self.axis != 0:
+ if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
@@ -615,10 +707,30 @@ def _aggregate_multiple_funcs(self, arg, _level):
keys.append(col)
except (TypeError, DataError):
pass
+ except ValueError:
+ # cannot aggregate
+ continue
except SpecificationError:
raise
- return concat(results, keys=keys, axis=1)
+ # if we are empty
+ if not len(results):
+ raise ValueError("no results")
+
+ try:
+ return concat(results, keys=keys, axis=1)
+ except TypeError:
+
+ # we are concatting non-NDFrame objects,
+ # e.g. a list of scalars
+
+ from pandas.types.cast import is_nested_object
+ from pandas import Series
+ result = Series(results, index=keys, name=self.name)
+ if is_nested_object(result):
+ raise ValueError("cannot combine transform and "
+ "aggregation operations")
+ return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index add2987b8f452..5e55196803c22 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -722,7 +722,7 @@ def _python_apply_general(self, f):
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
- yield self.name, self._selected_obj
+ yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
@@ -921,9 +921,9 @@ def reset_identity(values):
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
- getattr(self, 'name', None) is not None):
+ getattr(self, '_selection_name', None) is not None):
- result.name = self.name
+ result.name = self._selection_name
return result
@@ -1123,7 +1123,7 @@ def size(self):
result = self.grouper.size()
if isinstance(self.obj, Series):
- result.name = getattr(self, 'name', None)
+ result.name = getattr(self.obj, 'name', None)
return result
@classmethod
@@ -2736,7 +2736,7 @@ class SeriesGroupBy(GroupBy):
exec(_def_str)
@property
- def name(self):
+ def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
@@ -2834,6 +2834,17 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
+
+ # show the deprecation, but only if we
+ # have not shown a higher level one
+ # GH 15931
+ if isinstance(self._selected_obj, Series) and _level <= 1:
+ warnings.warn(
+ ("using a dict on a Series for aggregation\n"
+ "is deprecated and will be removed in a future "
+ "version"),
+ FutureWarning, stacklevel=4)
+
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
@@ -2879,12 +2890,12 @@ def _aggregate_multiple_funcs(self, arg, _level):
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
- output = output[self.name]
+ output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
- name = self.name
+ name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
@@ -2902,7 +2913,7 @@ def _wrap_transformed_output(self, output, names=None):
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
- return Series([], name=self.name, index=keys)
+ return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
@@ -2915,7 +2926,7 @@ def _get_index():
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
- result.name = self.name
+ result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
@@ -2927,7 +2938,8 @@ def _get_index():
not_indexed_same=not_indexed_same)
else:
# GH #6265
- return Series(values, index=_get_index(), name=self.name)
+ return Series(values, index=_get_index(),
+ name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
@@ -3098,7 +3110,7 @@ def nunique(self, dropna=True):
return Series(res,
index=ri,
- name=self.name)
+ name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
@@ -3156,7 +3168,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
- names = self.grouper.names + [self.name]
+ names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
@@ -3191,7 +3203,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
if is_integer_dtype(out):
out = _ensure_int64(out)
- return Series(out, index=mi, name=self.name)
+ return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
@@ -3222,7 +3234,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
if is_integer_dtype(out):
out = _ensure_int64(out)
- return Series(out, index=mi, name=self.name)
+ return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
@@ -3235,7 +3247,7 @@ def count(self):
return Series(out,
index=self.grouper.result_index,
- name=self.name,
+ name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
@@ -3391,7 +3403,7 @@ def aggregate(self, arg, *args, **kwargs):
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
- [arg], _level=_level)
+ [arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
@@ -3623,7 +3635,8 @@ def first_non_None_value(values):
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
- return Series(values, index=key_index, name=self.name)
+ return Series(values, index=key_index,
+ name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
@@ -3647,8 +3660,9 @@ def first_non_None_value(values):
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
- # self.name not passed through to Series as the result
- # should not take the name of original selection of columns
+ # self._selection_name not passed through to Series as the
+ # result should not take the name of original selection
+ # of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py
index 52b35048b6762..c2d6422c50d02 100644
--- a/pandas/tests/groupby/test_aggregate.py
+++ b/pandas/tests/groupby/test_aggregate.py
@@ -14,7 +14,7 @@
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
- Series, Index, bdate_range)
+ Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
@@ -291,8 +291,10 @@ def test_aggregate_api_consistency(self):
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
- result = grouped[['D', 'C']].agg({'r': np.sum,
- 'r2': np.mean})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = grouped[['D', 'C']].agg({'r': np.sum,
+ 'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
@@ -302,6 +304,28 @@ def test_aggregate_api_consistency(self):
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
+ def test_agg_dict_renaming_deprecation(self):
+ # 15931
+ df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
+ 'B': range(5),
+ 'C': range(5)})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False) as w:
+ df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
+ 'C': {'bar': ['count', 'min']}})
+ assert "using a dict with renaming" in str(w[0].message)
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False) as w:
+ df.groupby('A').B.agg({'foo': 'count'})
+ assert "using a dict on a Series for aggregation" in str(
+ w[0].message)
+
def test_agg_compat(self):
# GH 12334
@@ -320,14 +344,19 @@ def test_agg_compat(self):
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
- result = g['D'].agg({'C': ['sum', 'std']})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
- result = g['D'].agg({'C': 'sum', 'D': 'std'})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
@@ -348,8 +377,10 @@ def f():
self.assertRaises(SpecificationError, f)
- result = g.agg({'C': {'ra': ['mean', 'std']},
- 'D': {'rb': ['mean', 'std']}})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = g.agg({'C': {'ra': ['mean', 'std']},
+ 'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
@@ -358,9 +389,14 @@ def f():
# same name as the original column
# GH9052
- expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
- result = g['D'].agg({'D': np.sum, 'result2': np.mean})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_agg_python_multiindex(self):
@@ -627,7 +663,6 @@ def test_agg_multiple_functions_too_many_lambdas(self):
self.assertRaises(SpecificationError, grouped.agg, funcs)
def test_more_flexible_frame_multi_function(self):
- from pandas import concat
grouped = self.df.groupby('A')
@@ -655,9 +690,12 @@ def foo(x):
def bar(x):
return np.std(x, ddof=1)
- d = OrderedDict([['C', np.mean], ['D', OrderedDict(
- [['foo', np.mean], ['bar', np.std]])]])
- result = grouped.aggregate(d)
+ # this uses column selection & renaming
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ d = OrderedDict([['C', np.mean], ['D', OrderedDict(
+ [['foo', np.mean], ['bar', np.std]])]])
+ result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
@@ -671,16 +709,29 @@ def test_multi_function_flexible_mix(self):
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', 'sum']])
- result = grouped.aggregate(d)
+
+ # this uses column selection & renaming
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = grouped.aggregate(d)
+
d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', ['sum']]])
- result2 = grouped.aggregate(d2)
+
+ # this uses column selection & renaming
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result2 = grouped.aggregate(d2)
d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', {'sum': 'sum'}]])
- expected = grouped.aggregate(d3)
+
+ # this uses column selection & renaming
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ expected = grouped.aggregate(d3)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 68955c954206e..8f3d8e2307f45 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -59,7 +59,10 @@ def checkit(dtype):
# complex agg
agged = grouped.aggregate([np.mean, np.std])
- agged = grouped.aggregate({'one': np.mean, 'two': np.std})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ agged = grouped.aggregate({'one': np.mean, 'two': np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
@@ -1262,7 +1265,9 @@ def test_frame_set_name_single(self):
result = grouped['C'].agg([np.mean, np.std])
self.assertEqual(result.index.name, 'A')
- result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
self.assertEqual(result.index.name, 'A')
def test_multi_iter(self):
@@ -1438,7 +1443,10 @@ def test_groupby_as_index_agg(self):
grouped = self.df.groupby('A', as_index=True)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
- result3 = grouped['C'].agg({'Q': np.sum})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# multi-key
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index d566f34b7eae8..5a4f282789eeb 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -233,7 +233,7 @@ def test_tab_completion(mframe):
expected = set(
['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter',
'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',
- 'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot',
+ 'mean', 'median', 'min', 'ngroups', 'nth', 'ohlc', 'plot',
'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',
'nunique', 'head', 'describe', 'cummax', 'quantile',
'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna',
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 5fc31e9321f31..9cd3b8b839a9b 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -134,16 +134,18 @@ def test_agg(self):
expected.columns = ['mean', 'sum']
tm.assert_frame_equal(result, expected)
- result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
+ with catch_warnings(record=True):
+ result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
tm.assert_frame_equal(result, expected, check_like=True)
- result = r.aggregate({'A': {'mean': 'mean',
- 'sum': 'sum'},
- 'B': {'mean2': 'mean',
- 'sum2': 'sum'}})
+ with catch_warnings(record=True):
+ result = r.aggregate({'A': {'mean': 'mean',
+ 'sum': 'sum'},
+ 'B': {'mean2': 'mean',
+ 'sum2': 'sum'}})
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
@@ -195,12 +197,14 @@ def f():
r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
- result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
- 'B': {'rb': ['mean', 'std']}})
+ with catch_warnings(record=True):
+ result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
+ 'B': {'rb': ['mean', 'std']}})
tm.assert_frame_equal(result, expected, check_like=True)
- result = r.agg({'A': {'ra': ['mean', 'std']},
- 'B': {'rb': ['mean', 'std']}})
+ with catch_warnings(record=True):
+ result = r.agg({'A': {'ra': ['mean', 'std']},
+ 'B': {'rb': ['mean', 'std']}})
expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (
'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
diff --git a/pandas/tests/tseries/test_resample.py b/pandas/tests/tseries/test_resample.py
index 9c66cae292c4e..98664c1ec118c 100755
--- a/pandas/tests/tseries/test_resample.py
+++ b/pandas/tests/tseries/test_resample.py
@@ -394,8 +394,10 @@ def test_agg_consistency(self):
r = df.resample('3T')
- expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
- result = r.agg({'r1': 'mean', 'r2': 'sum'})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
+ result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
@@ -459,7 +461,9 @@ def test_agg(self):
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
- result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
@@ -468,8 +472,10 @@ def test_agg(self):
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
- result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
- 'B': {'mean2': 'mean', 'sum2': 'sum'}})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
+ 'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
@@ -529,9 +535,12 @@ def test_agg_misc(self):
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
+
for t in cases:
- result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
- ('result2', np.mean)]))
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
+ ('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
@@ -557,7 +566,9 @@ def test_agg_misc(self):
# series like aggs
for t in cases:
- result = t['A'].agg({'A': ['sum', 'std']})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
@@ -572,15 +583,20 @@ def test_agg_misc(self):
('A', 'std'),
('B', 'mean'),
('B', 'std')])
- result = t['A'].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t['A'].agg({'A': ['sum', 'std'],
+ 'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
- t[['A']].agg({'A': ['sum', 'std'],
- 'B': ['mean', 'std']})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ t[['A']].agg({'A': ['sum', 'std'],
+ 'B': ['mean', 'std']})
self.assertRaises(SpecificationError, f)
@@ -617,12 +633,16 @@ def f():
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
- result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
- 'B': {'rb': ['mean', 'std']}})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
+ 'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
- result = t.agg({'A': {'ra': ['mean', 'std']},
- 'B': {'rb': ['mean', 'std']}})
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = t.agg({'A': {'ra': ['mean', 'std']},
+ 'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_selection_api_validation(self):
@@ -752,16 +772,7 @@ def test_resample_empty_series(self):
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
self.assertEqual(result.index.freq, expected.index.freq)
-
- if (method == 'size' and
- isinstance(result.index, PeriodIndex) and
- freq in ['M', 'D']):
- # GH12871 - TODO: name should propagate, but currently
- # doesn't on lower / same frequency with PeriodIndex
- assert_series_equal(result, expected, check_dtype=False)
-
- else:
- assert_series_equal(result, expected, check_dtype=False)
+ assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
@@ -1846,10 +1857,12 @@ def test_how_lambda_functions(self):
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
+ # this is a MI Series, so comparing the names of the results
+ # doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
- tm.assert_series_equal(result['foo'], foo_exp)
- tm.assert_series_equal(result['bar'], bar_exp)
+ tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
+ tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 580ce12de3333..85053dba0c18b 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -45,6 +45,23 @@ def maybe_convert_platform(values):
return values
+def is_nested_object(obj):
+ """
+ return a boolean if we have a nested object, e.g. a Series with 1 or
+ more Series elements
+
+ This may not be necessarily be performant.
+
+ """
+
+ if isinstance(obj, ABCSeries) and is_object_dtype(obj):
+
+ if any(isinstance(v, ABCSeries) for v in obj.values):
+ return True
+
+ return False
+
+
def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
| pre-curser to #14668
This is basically in the whatsnew, but:
```
In [1]: df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
...: 'B': range(5),
...: 'C':range(5)})
...: df
...:
Out[1]:
A B C
0 1 0 0
1 1 1 1
2 1 2 2
3 2 3 3
4 2 4 4
```
This is good; multiple aggregations on a dataframe with a dict-of-lists
```
In [2]: df.groupby('A').agg({'B': ['sum', 'max'],
...: 'C': ['count', 'min']})
...:
Out[2]:
B C
sum max count min
A
1 3 2 3 0
2 7 4 2 3
```
This is a dict on a grouped Series -> deprecated
```
In [3]: df.groupby('A').B.agg({'foo': 'count'})
FutureWarning: using a dictionary on a Series for aggregation
is deprecated and will be removed in a future version
Out[3]:
foo
A
1 3
2 2
```
Further this has to go as well, a nested dict that does renaming.
Note once we fix https://github.com/pandas-dev/pandas/issues/4160 (renaming with a level); the following becomes almost trivial to rename in-line.
```
In [4]: df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
FutureWarning: using a dictionary on a Series for aggregation
is deprecated and will be removed in a future version
Out[4]:
foo bar
sum max count min
A
1 3 2 3 0
2 7 4 2 3
```
Note: I will fix this message (as it doesn't actually apply here)
| https://api.github.com/repos/pandas-dev/pandas/pulls/15931 | 2017-04-07T02:44:53Z | 2017-04-13T10:18:04Z | 2017-04-13T10:18:04Z | 2017-11-30T20:22:28Z |
CLN: algos | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 80664a9ba3019..244f882f2c103 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -8,30 +8,22 @@
from pandas import compat, _np_version_under1p8
from pandas.types.cast import maybe_promote
-from pandas.types.generic import ABCSeries, ABCIndex
-from pandas.types.common import (is_unsigned_integer_dtype,
- is_signed_integer_dtype,
- is_integer_dtype,
- is_complex_dtype,
- is_categorical_dtype,
- is_extension_type,
- is_datetimetz,
- is_period_dtype,
- is_period_arraylike,
- is_numeric_dtype,
- is_float_dtype,
- is_bool_dtype,
- needs_i8_conversion,
- is_categorical,
- is_datetime64_dtype,
- is_timedelta64_dtype,
- is_scalar,
- _ensure_platform_int,
- _ensure_object,
- _ensure_float64,
- _ensure_uint64,
- _ensure_int64,
- is_list_like)
+from pandas.types.generic import (ABCSeries, ABCIndex,
+ ABCIndexClass, ABCCategorical)
+from pandas.types.common import (
+ is_unsigned_integer_dtype, is_signed_integer_dtype,
+ is_integer_dtype, is_complex_dtype,
+ is_categorical_dtype, is_sparse,
+ is_period_dtype,
+ is_numeric_dtype, is_float_dtype,
+ is_bool_dtype, needs_i8_conversion,
+ is_categorical, is_datetimetz,
+ is_datetime64_any_dtype, is_datetime64tz_dtype,
+ is_timedelta64_dtype,
+ is_scalar, is_list_like,
+ _ensure_platform_int, _ensure_object,
+ _ensure_float64, _ensure_uint64,
+ _ensure_int64)
from pandas.compat.numpy import _np_version_under1p10
from pandas.types.missing import isnull
@@ -45,40 +37,190 @@
# dtype access #
# --------------- #
-def _ensure_data_view(values):
+def _ensure_data(values, dtype=None):
"""
- helper routine to ensure that our data is of the correct
+ routine to ensure that our data is of the correct
input dtype for lower-level routines
+ This will coerce:
+ - ints -> int64
+ - uint -> uint64
+ - bool -> uint64 (TODO this should be uint8)
+ - datetimelike -> i8
+ - datetime64tz -> i8 (in local tz)
+ - categorical -> codes
+
Parameters
----------
values : array-like
+ dtype : pandas_dtype, optional
+ coerce to this dtype
+
+ Returns
+ -------
+ (ndarray, pandas_dtype, algo dtype as a string)
+
"""
- if needs_i8_conversion(values):
- values = values.view(np.int64)
- elif is_period_arraylike(values):
- from pandas.tseries.period import PeriodIndex
- values = PeriodIndex(values).asi8
- elif is_categorical_dtype(values):
- values = values.values.codes
- elif isinstance(values, (ABCSeries, ABCIndex)):
- values = values.values
-
- if is_signed_integer_dtype(values):
+ if (needs_i8_conversion(values) or
+ is_period_dtype(dtype) or
+ is_datetime64_any_dtype(dtype) or
+ is_timedelta64_dtype(dtype)):
+ if is_period_dtype(values) or is_period_dtype(dtype):
+ from pandas import PeriodIndex
+ values = PeriodIndex(values)
+ dtype = values.dtype
+ elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
+ from pandas import TimedeltaIndex
+ values = TimedeltaIndex(values)
+ dtype = values.dtype
+ else:
+ # Datetime
+ from pandas import DatetimeIndex
+ values = DatetimeIndex(values)
+ dtype = values.dtype
+
+ return values.asi8, dtype, 'int64'
+
+ elif is_categorical_dtype(values) or is_categorical_dtype(dtype):
+ values = getattr(values, 'values', values)
+ values = values.codes
+ dtype = 'category'
+
+ # we are actually coercing to int64
+ # until our algos suppport int* directly (not all do)
values = _ensure_int64(values)
- elif is_unsigned_integer_dtype(values):
- values = _ensure_uint64(values)
- elif is_complex_dtype(values):
- values = _ensure_float64(values)
- elif is_float_dtype(values):
- values = _ensure_float64(values)
- else:
+
+ return values, dtype, 'int64'
+
+ values = np.asarray(values)
+
+ try:
+ if is_bool_dtype(values) or is_bool_dtype(dtype):
+ # we are actually coercing to uint64
+ # until our algos suppport uint8 directly (see TODO)
+ values = values.astype('uint64')
+ dtype = 'bool'
+ ndtype = 'uint64'
+ elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
+ values = _ensure_int64(values)
+ ndtype = dtype = 'int64'
+ elif (is_unsigned_integer_dtype(values) or
+ is_unsigned_integer_dtype(dtype)):
+ values = _ensure_uint64(values)
+ ndtype = dtype = 'uint64'
+ elif is_complex_dtype(values) or is_complex_dtype(dtype):
+ values = _ensure_float64(values)
+ ndtype = dtype = 'float64'
+ elif is_float_dtype(values) or is_float_dtype(dtype):
+ values = _ensure_float64(values)
+ ndtype = dtype = 'float64'
+ else:
+ values = _ensure_object(values)
+ ndtype = dtype = 'object'
+
+ except (TypeError, ValueError):
+ # if we are trying to coerce to a dtype
+ # and it is incompat this will fall thru to here
values = _ensure_object(values)
+ ndtype = dtype = 'object'
+
+ return values, dtype, ndtype
+
+
+def _reconstruct_data(values, dtype, original):
+ """
+ reverse of _ensure_data
+
+ Parameters
+ ----------
+ values : ndarray
+ dtype : pandas_dtype
+ original : ndarray-like
+
+ Returns
+ -------
+ Index for extension types, otherwise ndarray casted to dtype
+
+ """
+ from pandas import Index
+ if is_categorical_dtype(dtype):
+ pass
+ elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
+ values = Index(original)._shallow_copy(values, name=None)
+ elif dtype is not None:
+ values = values.astype(dtype)
return values
+def _ensure_arraylike(values):
+ """
+ ensure that we are arraylike if not already
+ """
+ if not isinstance(values, (np.ndarray, ABCCategorical,
+ ABCIndexClass, ABCSeries)):
+ values = np.array(values)
+ return values
+
+
+_hashtables = {
+ 'float64': (htable.Float64HashTable, htable.Float64Vector),
+ 'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
+ 'int64': (htable.Int64HashTable, htable.Int64Vector),
+ 'string': (htable.StringHashTable, htable.ObjectVector),
+ 'object': (htable.PyObjectHashTable, htable.ObjectVector)
+}
+
+
+def _get_hashtable_algo(values):
+ """
+ Parameters
+ ----------
+ values : arraylike
+
+ Returns
+ -------
+ tuples(hashtable class,
+ vector class,
+ values,
+ dtype,
+ ndtype)
+ """
+ values, dtype, ndtype = _ensure_data(values)
+
+ if ndtype == 'object':
+
+ # its cheaper to use a String Hash Table than Object
+ if lib.infer_dtype(values) in ['string']:
+ ndtype = 'string'
+ else:
+ ndtype = 'object'
+
+ htable, table = _hashtables[ndtype]
+ return (htable, table, values, dtype, ndtype)
+
+
+def _get_data_algo(values, func_map):
+
+ if is_categorical_dtype(values):
+ values = values._values_for_rank()
+
+ values, dtype, ndtype = _ensure_data(values)
+ if ndtype == 'object':
+
+ # its cheaper to use a String Hash Table than Object
+ if lib.infer_dtype(values) in ['string']:
+ try:
+ f = func_map['string']
+ except KeyError:
+ pass
+
+ f = func_map.get(ndtype, func_map['object'])
+
+ return f, values
+
+
# --------------- #
# top-level algos #
# --------------- #
@@ -104,92 +246,41 @@ def match(to_match, values, na_sentinel=-1):
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
- if issubclass(values.dtype.type, string_types):
- values = np.array(values, dtype='O')
-
- f = lambda htype, caster: _match_object(to_match, values, htype, caster)
- result = _hashtable_algo(f, values, np.int64)
+ htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
+ to_match, _, _ = _ensure_data(to_match, dtype)
+ table = htable(min(len(to_match), 1000000))
+ table.map_locations(values)
+ result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
- from pandas.core.series import Series
+ from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
reshape(result.shape)
return result
-def _match_object(values, index, table_type, type_caster):
- values = type_caster(values)
- index = type_caster(index)
- table = table_type(min(len(index), 1000000))
- table.map_locations(index)
- return table.lookup(values)
-
-
-def unique(values):
- """
- Compute unique values (not necessarily sorted) efficiently from input array
- of values
-
- Parameters
- ----------
- values : array-like
-
- Returns
- -------
- uniques
- """
- values = com._asarray_tuplesafe(values)
-
- f = lambda htype, caster: _unique_object(values, htype, caster)
- return _hashtable_algo(f, values)
-
-
-def _unique_object(values, table_type, type_caster):
- values = type_caster(values)
- table = table_type(min(len(values), 1000000))
- uniques = table.unique(values)
- return type_caster(uniques)
-
-
def unique1d(values):
"""
Hash table-based unique
"""
- if np.issubdtype(values.dtype, np.floating):
- table = htable.Float64HashTable(len(values))
- uniques = np.array(table.unique(_ensure_float64(values)),
- dtype=np.float64)
- elif np.issubdtype(values.dtype, np.datetime64):
- table = htable.Int64HashTable(len(values))
- uniques = table.unique(_ensure_int64(values))
- uniques = uniques.view('M8[ns]')
- elif np.issubdtype(values.dtype, np.timedelta64):
- table = htable.Int64HashTable(len(values))
- uniques = table.unique(_ensure_int64(values))
- uniques = uniques.view('m8[ns]')
- elif np.issubdtype(values.dtype, np.signedinteger):
- table = htable.Int64HashTable(len(values))
- uniques = table.unique(_ensure_int64(values))
- elif np.issubdtype(values.dtype, np.unsignedinteger):
- table = htable.UInt64HashTable(len(values))
- uniques = table.unique(_ensure_uint64(values))
- else:
-
- # its cheaper to use a String Hash Table than Object
- if lib.infer_dtype(values) in ['string']:
- table = htable.StringHashTable(len(values))
- else:
- table = htable.PyObjectHashTable(len(values))
+ values = _ensure_arraylike(values)
+ original = values
+ htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
- uniques = table.unique(_ensure_object(values))
+ table = htable(len(values))
+ uniques = table.unique(values)
+ uniques = _reconstruct_data(uniques, dtype, original)
return uniques
+unique = unique1d
+
+
def isin(comps, values):
"""
Compute the isin boolean array
@@ -213,38 +304,11 @@ def isin(comps, values):
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
- from pandas import DatetimeIndex, TimedeltaIndex, PeriodIndex
-
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = np.array(list(values), dtype='object')
- if needs_i8_conversion(comps):
- if is_period_dtype(values):
- comps = PeriodIndex(comps)
- values = PeriodIndex(values)
- elif is_timedelta64_dtype(comps):
- comps = TimedeltaIndex(comps)
- values = TimedeltaIndex(values)
- else:
- comps = DatetimeIndex(comps)
- values = DatetimeIndex(values)
-
- values = values.asi8
- comps = comps.asi8
- elif is_bool_dtype(comps):
-
- try:
- comps = np.asarray(comps).view('uint8')
- values = np.asarray(values).view('uint8')
- except TypeError:
- # object array conversion will fail
- pass
- elif is_numeric_dtype(comps):
- comps = np.asarray(comps)
- values = np.asarray(values)
- else:
- comps = np.asarray(comps).astype(object)
- values = np.asarray(values).astype(object)
+ comps, dtype, _ = _ensure_data(comps)
+ values, _, _ = _ensure_data(values, dtype=dtype)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
@@ -396,53 +460,32 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
- from pandas import Index, Series, DatetimeIndex, PeriodIndex
-
- # handling possibilities here
- # - for a numpy datetimelike simply view as i8 then cast back
- # - bool handled as uint8 then cast back
- # - for an extension datetimelike view as i8 then
- # reconstruct from boxed values to transfer metadata
- dtype = None
- if needs_i8_conversion(values):
- if is_period_dtype(values):
- values = PeriodIndex(values)
- vals = values.asi8
- elif is_datetimetz(values):
- values = DatetimeIndex(values)
- vals = values.asi8
- else:
- # numpy dtype
- dtype = values.dtype
- vals = values.view(np.int64)
- elif is_bool_dtype(values):
- dtype = bool
- vals = np.asarray(values).view('uint8')
- else:
- vals = np.asarray(values)
- (hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
+ original = values
+ values, dtype, _ = _ensure_data(values)
+ (hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
- table = hash_klass(size_hint or len(vals))
+ table = hash_klass(size_hint or len(values))
uniques = vec_klass()
- check_nulls = not is_integer_dtype(values)
- labels = table.get_labels(vals, uniques, 0, na_sentinel, check_nulls)
+ check_nulls = not is_integer_dtype(original)
+ labels = table.get_labels(values, uniques, 0, na_sentinel, check_nulls)
labels = _ensure_platform_int(labels)
-
uniques = uniques.to_array()
if sort and len(uniques) > 0:
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
- if dtype is not None:
- uniques = uniques.astype(dtype)
+ uniques = _reconstruct_data(uniques, dtype, original)
- if isinstance(values, Index):
- uniques = values._shallow_copy(uniques, name=None)
- elif isinstance(values, Series):
+ # return original tenor
+ if isinstance(original, ABCIndexClass):
+ uniques = original._shallow_copy(uniques, name=None)
+ elif isinstance(original, ABCSeries):
+ from pandas import Index
uniques = Index(uniques)
+
return labels, uniques
@@ -471,7 +514,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
value_counts : Series
"""
- from pandas.core.series import Series
+ from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
@@ -483,17 +526,16 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
raise TypeError("bins argument only works with numeric data.")
values = cat.codes
- if is_extension_type(values) and not is_datetimetz(values):
+ if is_categorical_dtype(values) or is_sparse(values):
+
# handle Categorical and sparse,
- # datetime tz can be handeled in ndarray path
result = Series(values).values.value_counts(dropna=dropna)
result.name = name
counts = result.values
+
else:
- # ndarray path. pass original to handle DatetimeTzBlock
- keys, counts = _value_counts_arraylike(values, dropna=dropna)
+ keys, counts = _value_counts_arraylike(values, dropna)
- from pandas import Index, Series
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
@@ -513,60 +555,45 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
return result
-def _value_counts_arraylike(values, dropna=True):
- is_datetimetz_type = is_datetimetz(values)
- is_period_type = (is_period_dtype(values) or
- is_period_arraylike(values))
-
- orig = values
-
- from pandas.core.series import Series
- values = Series(values).values
- dtype = values.dtype
+def _value_counts_arraylike(values, dropna):
+ """
+ Parameters
+ ----------
+ values : arraylike
+ dropna : boolean
- if needs_i8_conversion(dtype) or is_period_type:
+ Returns
+ -------
+ (uniques, counts)
- from pandas.tseries.index import DatetimeIndex
- from pandas.tseries.period import PeriodIndex
+ """
+ values = _ensure_arraylike(values)
+ original = values
+ values, dtype, ndtype = _ensure_data(values)
- if is_period_type:
- # values may be an object
- values = PeriodIndex(values)
- freq = values.freq
+ if needs_i8_conversion(dtype):
+ # i8
- values = values.view(np.int64)
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
- # convert the keys back to the dtype we came in
- keys = keys.astype(dtype)
-
- # dtype handling
- if is_datetimetz_type:
- keys = DatetimeIndex._simple_new(keys, tz=orig.dtype.tz)
- elif is_period_type:
- keys = PeriodIndex._from_ordinals(keys, freq=freq)
-
- elif is_signed_integer_dtype(dtype):
- values = _ensure_int64(values)
- keys, counts = htable.value_count_int64(values, dropna)
- elif is_unsigned_integer_dtype(dtype):
- values = _ensure_uint64(values)
- keys, counts = htable.value_count_uint64(values, dropna)
- elif is_float_dtype(dtype):
- values = _ensure_float64(values)
- keys, counts = htable.value_count_float64(values, dropna)
else:
- values = _ensure_object(values)
- keys, counts = htable.value_count_object(values, dropna)
+ # ndarray like
+
+ # TODO: handle uint8
+ f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
+ keys, counts = f(values, dropna)
mask = isnull(values)
if not dropna and mask.any():
- keys = np.insert(keys, 0, np.NaN)
- counts = np.insert(counts, 0, mask.sum())
+ if not isnull(keys).any():
+ keys = np.insert(keys, 0, np.NaN)
+ counts = np.insert(counts, 0, mask.sum())
+
+ keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
@@ -593,33 +620,9 @@ def duplicated(values, keep='first'):
duplicated : ndarray
"""
- dtype = values.dtype
-
- # no need to revert to original type
- if needs_i8_conversion(dtype):
- values = values.view(np.int64)
- elif is_period_arraylike(values):
- from pandas.tseries.period import PeriodIndex
- values = PeriodIndex(values).asi8
- elif is_categorical_dtype(dtype):
- values = values.values.codes
- elif isinstance(values, (ABCSeries, ABCIndex)):
- values = values.values
-
- if is_signed_integer_dtype(dtype):
- values = _ensure_int64(values)
- duplicated = htable.duplicated_int64(values, keep=keep)
- elif is_unsigned_integer_dtype(dtype):
- values = _ensure_uint64(values)
- duplicated = htable.duplicated_uint64(values, keep=keep)
- elif is_float_dtype(dtype):
- values = _ensure_float64(values)
- duplicated = htable.duplicated_float64(values, keep=keep)
- else:
- values = _ensure_object(values)
- duplicated = htable.duplicated_object(values, keep=keep)
-
- return duplicated
+ values, dtype, ndtype = _ensure_data(values)
+ f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
+ return f(values, keep=keep)
def mode(values):
@@ -635,40 +638,34 @@ def mode(values):
-------
mode : Series
"""
+ from pandas import Series
- # must sort because hash order isn't necessarily defined.
- from pandas.core.series import Series
+ values = _ensure_arraylike(values)
+ original = values
- if isinstance(values, Series):
- constructor = values._constructor
- values = values.values
- else:
- values = np.asanyarray(values)
- constructor = Series
+ # categorical is a fast-path
+ if is_categorical_dtype(values):
- dtype = values.dtype
- if is_signed_integer_dtype(values):
- values = _ensure_int64(values)
- result = constructor(np.sort(htable.mode_int64(values)), dtype=dtype)
- elif is_unsigned_integer_dtype(values):
- values = _ensure_uint64(values)
- result = constructor(np.sort(htable.mode_uint64(values)), dtype=dtype)
- elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
- dtype = values.dtype
- values = values.view(np.int64)
- result = constructor(np.sort(htable.mode_int64(values)), dtype=dtype)
- elif is_categorical_dtype(values):
- result = constructor(values.mode())
- else:
+ if isinstance(values, Series):
+ return Series(values.values.mode())
+ return values.mode()
+
+ values, dtype, ndtype = _ensure_data(values)
+
+ # TODO: this should support float64
+ if ndtype not in ['int64', 'uint64', 'object']:
+ ndtype = 'object'
values = _ensure_object(values)
- res = htable.mode_object(values)
- try:
- res = np.sort(res)
- except TypeError as e:
- warn("Unable to sort modes: %s" % e)
- result = constructor(res, dtype=dtype)
- return result
+ f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
+ result = f(values)
+ try:
+ result = np.sort(result)
+ except TypeError as e:
+ warn("Unable to sort modes: %s" % e)
+
+ result = _reconstruct_data(result, original.dtype, original)
+ return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
@@ -859,6 +856,12 @@ def quantile(x, q, interpolation_method='fraction'):
values = np.sort(x)
+ def _interpolate(a, b, fraction):
+ """Returns the point at the given fraction between a and b, where
+ 'fraction' must be between 0 and 1.
+ """
+ return a + (b - a) * fraction
+
def _get_score(at):
if len(values) == 0:
return np.nan
@@ -887,261 +890,186 @@ def _get_score(at):
return algos.arrmap_float64(q, _get_score)
-def _interpolate(a, b, fraction):
- """Returns the point at the given fraction between a and b, where
- 'fraction' must be between 0 and 1.
- """
- return a + (b - a) * fraction
-
-
-def nsmallest(arr, n, keep='first'):
- """
- Find the indices of the n smallest values of a numpy array.
-
- Note: Fails silently with NaN.
- """
- if keep == 'last':
- arr = arr[::-1]
-
- narr = len(arr)
- n = min(n, narr)
-
- arr = _ensure_data_view(arr)
- kth_val = algos.kth_smallest(arr.copy(), n - 1)
- return _finalize_nsmallest(arr, kth_val, n, keep, narr)
-
+# --------------- #
+# select n #
+# --------------- #
-def nlargest(arr, n, keep='first'):
- """
- Find the indices of the n largest values of a numpy array.
+class SelectN(object):
- Note: Fails silently with NaN.
- """
- arr = _ensure_data_view(arr)
- return nsmallest(-arr, n, keep=keep)
+ def __init__(self, obj, n, keep):
+ self.obj = obj
+ self.n = n
+ self.keep = keep
+ if self.keep not in ('first', 'last'):
+ raise ValueError('keep must be either "first", "last"')
-def select_n_slow(dropped, n, keep, method):
- reverse_it = (keep == 'last' or method == 'nlargest')
- ascending = method == 'nsmallest'
- slc = np.s_[::-1] if reverse_it else np.s_[:]
- return dropped[slc].sort_values(ascending=ascending).head(n)
+ def nlargest(self):
+ return self.compute('nlargest')
+ def nsmallest(self):
+ return self.compute('nsmallest')
-_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
+ @staticmethod
+ def is_valid_dtype_n_method(dtype):
+ """
+ Helper function to determine if dtype is valid for
+ nsmallest/nlargest methods
+ """
+ return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
+ needs_i8_conversion(dtype))
-def _is_valid_dtype_n_method(dtype):
- """
- Helper function to determine if dtype is valid for
- nsmallest/nlargest methods
+class SelectNSeries(SelectN):
"""
- return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
- needs_i8_conversion(dtype))
-
-
-def select_n_series(series, n, keep, method):
- """Implement n largest/smallest for pandas Series
+ Implement n largest/smallest for Series
Parameters
----------
- series : pandas.Series object
+ obj : Series
n : int
keep : {'first', 'last'}, default 'first'
- method : str, {'nlargest', 'nsmallest'}
Returns
-------
nordered : Series
"""
- dtype = series.dtype
- if not _is_valid_dtype_n_method(dtype):
- raise TypeError("Cannot use method '{method}' with "
- "dtype {dtype}".format(method=method, dtype=dtype))
- if keep not in ('first', 'last'):
- raise ValueError('keep must be either "first", "last"')
+ def compute(self, method):
+
+ n = self.n
+ dtype = self.obj.dtype
+ if not self.is_valid_dtype_n_method(dtype):
+ raise TypeError("Cannot use method '{method}' with "
+ "dtype {dtype}".format(method=method,
+ dtype=dtype))
+
+ if n <= 0:
+ return self.obj[[]]
+
+ dropped = self.obj.dropna()
+
+ # slow method
+ if n >= len(self.obj):
- if n <= 0:
- return series[[]]
+ reverse_it = (self.keep == 'last' or method == 'nlargest')
+ ascending = method == 'nsmallest'
+ slc = np.s_[::-1] if reverse_it else np.s_[:]
+ return dropped[slc].sort_values(ascending=ascending).head(n)
- dropped = series.dropna()
+ # fast method
+ arr, _, _ = _ensure_data(dropped.values)
+ if method == 'nlargest':
+ arr = -arr
- if n >= len(series):
- return select_n_slow(dropped, n, keep, method)
+ if self.keep == 'last':
+ arr = arr[::-1]
- inds = _select_methods[method](dropped.values, n, keep)
- return dropped.iloc[inds]
+ narr = len(arr)
+ n = min(n, narr)
+ kth_val = algos.kth_smallest(arr.copy(), n - 1)
+ ns, = np.nonzero(arr <= kth_val)
+ inds = ns[arr[ns].argsort(kind='mergesort')][:n]
+ if self.keep == 'last':
+ # reverse indices
+ inds = narr - 1 - inds
-def select_n_frame(frame, columns, n, method, keep):
- """Implement n largest/smallest for pandas DataFrame
+ return dropped.iloc[inds]
+
+
+class SelectNFrame(SelectN):
+ """
+ Implement n largest/smallest for DataFrame
Parameters
----------
- frame : pandas.DataFrame object
- columns : list or str
+ obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
- method : str, {'nlargest', 'nsmallest'}
+ columns : list or str
Returns
-------
nordered : DataFrame
"""
- from pandas import Int64Index
- if not is_list_like(columns):
- columns = [columns]
- columns = list(columns)
- for column in columns:
- dtype = frame[column].dtype
- if not _is_valid_dtype_n_method(dtype):
- raise TypeError((
- "Column {column!r} has dtype {dtype}, cannot use method "
- "{method!r} with this dtype"
- ).format(column=column, dtype=dtype, method=method))
-
- def get_indexer(current_indexer, other_indexer):
- """Helper function to concat `current_indexer` and `other_indexer`
- depending on `method`
- """
- if method == 'nsmallest':
- return current_indexer.append(other_indexer)
- else:
- return other_indexer.append(current_indexer)
-
- # Below we save and reset the index in case index contains duplicates
- original_index = frame.index
- cur_frame = frame = frame.reset_index(drop=True)
- cur_n = n
- indexer = Int64Index([])
-
- for i, column in enumerate(columns):
-
- # For each column we apply method to cur_frame[column]. If it is the
- # last column in columns, or if the values returned are unique in
- # frame[column] we save this index and break
- # Otherwise we must save the index of the non duplicated values
- # and set the next cur_frame to cur_frame filtered on all duplcicated
- # values (#GH15297)
- series = cur_frame[column]
- values = getattr(series, method)(cur_n, keep=keep)
- is_last_column = len(columns) - 1 == i
- if is_last_column or values.nunique() == series.isin(values).sum():
-
- # Last column in columns or values are unique in series => values
- # is all that matters
- indexer = get_indexer(indexer, values.index)
- break
-
- duplicated_filter = series.duplicated(keep=False)
- duplicated = values[duplicated_filter]
- non_duplicated = values[~duplicated_filter]
- indexer = get_indexer(indexer, non_duplicated.index)
-
- # Must set cur frame to include all duplicated values to consider for
- # the next column, we also can reduce cur_n by the current length of
- # the indexer
- cur_frame = cur_frame[series.isin(duplicated)]
- cur_n = n - len(indexer)
-
- frame = frame.take(indexer)
-
- # Restore the index on frame
- frame.index = original_index.take(indexer)
- return frame
-
-
-def _finalize_nsmallest(arr, kth_val, n, keep, narr):
- ns, = np.nonzero(arr <= kth_val)
- inds = ns[arr[ns].argsort(kind='mergesort')][:n]
- if keep == 'last':
- # reverse indices
- return narr - 1 - inds
- else:
- return inds
-
-
-# ------- #
-# helpers #
-# ------- #
-
-def _hashtable_algo(f, values, return_dtype=None):
- """
- f(HashTable, type_caster) -> result
- """
-
- dtype = values.dtype
- if is_float_dtype(dtype):
- return f(htable.Float64HashTable, _ensure_float64)
- elif is_signed_integer_dtype(dtype):
- return f(htable.Int64HashTable, _ensure_int64)
- elif is_unsigned_integer_dtype(dtype):
- return f(htable.UInt64HashTable, _ensure_uint64)
- elif is_datetime64_dtype(dtype):
- return_dtype = return_dtype or 'M8[ns]'
- return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)
- elif is_timedelta64_dtype(dtype):
- return_dtype = return_dtype or 'm8[ns]'
- return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)
-
- # its cheaper to use a String Hash Table than Object
- if lib.infer_dtype(values) in ['string']:
- return f(htable.StringHashTable, _ensure_object)
-
- # use Object
- return f(htable.PyObjectHashTable, _ensure_object)
-
-
-_hashtables = {
- 'float64': (htable.Float64HashTable, htable.Float64Vector),
- 'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
- 'int64': (htable.Int64HashTable, htable.Int64Vector),
- 'string': (htable.StringHashTable, htable.ObjectVector),
- 'object': (htable.PyObjectHashTable, htable.ObjectVector)
-}
-
-
-def _get_data_algo(values, func_map):
-
- f = None
-
- if is_categorical_dtype(values):
- values = values._values_for_rank()
-
- if is_float_dtype(values):
- f = func_map['float64']
- values = _ensure_float64(values)
-
- elif needs_i8_conversion(values):
- f = func_map['int64']
- values = values.view('i8')
-
- elif is_signed_integer_dtype(values):
- f = func_map['int64']
- values = _ensure_int64(values)
-
- elif is_unsigned_integer_dtype(values):
- f = func_map['uint64']
- values = _ensure_uint64(values)
-
- else:
- values = _ensure_object(values)
-
- # its cheaper to use a String Hash Table than Object
- if lib.infer_dtype(values) in ['string']:
- try:
- f = func_map['string']
- except KeyError:
- pass
-
- if f is None:
- f = func_map['object']
-
- return f, values
-
-# ---- #
+ def __init__(self, obj, n, keep, columns):
+ super(SelectNFrame, self).__init__(obj, n, keep)
+ if not is_list_like(columns):
+ columns = [columns]
+ columns = list(columns)
+ self.columns = columns
+
+ def compute(self, method):
+
+ from pandas import Int64Index
+ n = self.n
+ frame = self.obj
+ columns = self.columns
+
+ for column in columns:
+ dtype = frame[column].dtype
+ if not self.is_valid_dtype_n_method(dtype):
+ raise TypeError((
+ "Column {column!r} has dtype {dtype}, cannot use method "
+ "{method!r} with this dtype"
+ ).format(column=column, dtype=dtype, method=method))
+
+ def get_indexer(current_indexer, other_indexer):
+ """Helper function to concat `current_indexer` and `other_indexer`
+ depending on `method`
+ """
+ if method == 'nsmallest':
+ return current_indexer.append(other_indexer)
+ else:
+ return other_indexer.append(current_indexer)
+
+ # Below we save and reset the index in case index contains duplicates
+ original_index = frame.index
+ cur_frame = frame = frame.reset_index(drop=True)
+ cur_n = n
+ indexer = Int64Index([])
+
+ for i, column in enumerate(columns):
+
+ # For each column we apply method to cur_frame[column].
+ # If it is the last column in columns, or if the values
+ # returned are unique in frame[column] we save this index
+ # and break
+ # Otherwise we must save the index of the non duplicated values
+ # and set the next cur_frame to cur_frame filtered on all
+ # duplcicated values (#GH15297)
+ series = cur_frame[column]
+ values = getattr(series, method)(cur_n, keep=self.keep)
+ is_last_column = len(columns) - 1 == i
+ if is_last_column or values.nunique() == series.isin(values).sum():
+
+ # Last column in columns or values are unique in
+ # series => values
+ # is all that matters
+ indexer = get_indexer(indexer, values.index)
+ break
+
+ duplicated_filter = series.duplicated(keep=False)
+ duplicated = values[duplicated_filter]
+ non_duplicated = values[~duplicated_filter]
+ indexer = get_indexer(indexer, non_duplicated.index)
+
+ # Must set cur frame to include all duplicated values
+ # to consider for the next column, we also can reduce
+ # cur_n by the current length of the indexer
+ cur_frame = cur_frame[series.isin(duplicated)]
+ cur_n = n - len(indexer)
+
+ frame = frame.take(indexer)
+
+ # Restore the index on frame
+ frame.index = original_index.take(indexer)
+ return frame
+
+
+# ------- ## ---- #
# take #
# ---- #
@@ -1534,23 +1462,41 @@ def func(arr, indexer, out, fill_value=np.nan):
def diff(arr, n, axis=0):
- """ difference of n between self,
- analagoust to s-s.shift(n) """
+ """
+ difference of n between self,
+ analagoust to s-s.shift(n)
+
+ Parameters
+ ----------
+ arr : ndarray
+ n : int
+ number of periods
+ axis : int
+ axis to shift on
+
+ Returns
+ -------
+ shifted
+
+ """
n = int(n)
na = np.nan
dtype = arr.dtype
+
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
- elif issubclass(dtype.type, np.integer):
- dtype = np.float64
- elif issubclass(dtype.type, np.bool_):
+
+ elif is_bool_dtype(dtype):
dtype = np.object_
+ elif is_integer_dtype(dtype):
+ dtype = np.float64
+
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3980bf6cdbc09..f6199be2d1fc9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3441,7 +3441,10 @@ def nlargest(self, n, columns, keep='first'):
1 10 b 2
2 8 d NaN
"""
- return algorithms.select_n_frame(self, columns, n, 'nlargest', keep)
+ return algorithms.SelectNFrame(self,
+ n=n,
+ keep=keep,
+ columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
@@ -3475,7 +3478,10 @@ def nsmallest(self, n, columns, keep='first'):
0 1 a 1
2 8 d NaN
"""
- return algorithms.select_n_frame(self, columns, n, 'nsmallest', keep)
+ return algorithms.SelectNFrame(self,
+ n=n,
+ keep=keep,
+ columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1aaa106d2c68f..d6a1a9d98faf4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1856,8 +1856,7 @@ def nlargest(self, n=5, keep='first'):
121637 4.240952
dtype: float64
"""
- return algorithms.select_n_series(self, n=n, keep=keep,
- method='nlargest')
+ return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep='first'):
"""Return the smallest `n` elements.
@@ -1903,8 +1902,7 @@ def nsmallest(self, n=5, keep='first'):
359919 -4.331927
dtype: float64
"""
- return algorithms.select_n_series(self, n=n, keep=keep,
- method='nsmallest')
+ return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index ac3a42c3cf122..d893183dae0ed 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -620,9 +620,9 @@ def test_dropna(self):
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
- tm.assert_series_equal(
- pd.Series([10.3, 5., 5., None]).value_counts(dropna=False),
- pd.Series([2, 1, 1], index=[5., 10.3, np.nan]))
+ result = pd.Series([10.3, 5., 5., None]).value_counts(dropna=False)
+ expected = pd.Series([2, 1, 1], index=[5., 10.3, np.nan])
+ tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
@@ -1356,16 +1356,19 @@ def test_uint64_overflow(self):
def test_categorical(self):
c = Categorical([1, 2])
- exp = Series([1, 2], dtype=np.int64)
- tm.assert_series_equal(algos.mode(c), exp)
+ exp = c
+ tm.assert_categorical_equal(algos.mode(c), exp)
+ tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
- exp = Series(['a'], dtype=object)
- tm.assert_series_equal(algos.mode(c), exp)
+ exp = Categorical(['a'], categories=[1, 'a'])
+ tm.assert_categorical_equal(algos.mode(c), exp)
+ tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
- exp = Series([1, 3], dtype=np.int64)
- tm.assert_series_equal(algos.mode(c), exp)
+ exp = Categorical([1, 3], categories=[1, 2, 3])
+ tm.assert_categorical_equal(algos.mode(c), exp)
+ tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
diff --git a/pandas/tests/types/test_dtypes.py b/pandas/tests/types/test_dtypes.py
index 8ef2868ae324f..e7b2edeb57714 100644
--- a/pandas/tests/types/test_dtypes.py
+++ b/pandas/tests/types/test_dtypes.py
@@ -149,6 +149,7 @@ def test_construction_from_string(self):
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
+ self.assertFalse(DatetimeTZDtype.is_dtype(None))
self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
diff --git a/pandas/types/common.py b/pandas/types/common.py
index a1f03e59a5e6e..017805673defe 100644
--- a/pandas/types/common.py
+++ b/pandas/types/common.py
@@ -359,6 +359,8 @@ def _coerce_to_dtype(dtype):
def _get_dtype(arr_or_dtype):
+ if arr_or_dtype is None:
+ raise TypeError
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
diff --git a/pandas/types/dtypes.py b/pandas/types/dtypes.py
index 43135ba94ab46..c3494df93476b 100644
--- a/pandas/types/dtypes.py
+++ b/pandas/types/dtypes.py
@@ -82,6 +82,8 @@ def is_dtype(cls, dtype):
return True
elif isinstance(dtype, np.dtype):
return False
+ elif dtype is None:
+ return False
try:
return cls.construct_from_string(dtype) is not None
except:
| - clean up select_n algos
- clean ensure_data
closes #15903
should make this much simpler going forward. All dtype conversions are now centralized.
Added some doc-strings as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/15929 | 2017-04-06T21:09:42Z | 2017-04-07T00:16:56Z | 2017-04-07T00:16:56Z | 2017-04-25T10:47:05Z |
ENH add fill_value feature to pd.get_dummies | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 0b98e57c606a3..4db70ef7825ea 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -372,6 +372,8 @@ Other Enhancements
- :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels <advanced.shown_levels>`. (:issue:`15694`)
- ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`)
- ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`)
+- ``pd.get_dummies()`` now accepts the ``fill_value`` keyword which specifies how to fill NaN values in the dummy variables. (:issue:`15923`)
+- ``pd.types.cast`` has a new function ``maybe_downcast_itemsize`` which can be used to reduce the width of numeric types. (:issue:`15923`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
@@ -382,6 +384,19 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Deprecate Automatic Zero Filling of Missing Values in ``pd.get_dummies``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`get_dummies` function currently fills NaN values with zero by default. This behavior is in conflict with the rest of the pandas API since NaN values should be filled with ``fillna`` or a ``fill_value`` keyword, and NaN values should be propagated through pandas transformations. In the future, :func:`get_dummies` will propagate NaN values by default. (:issue:`15923`)
+
+
+The recommended way to reproduce the current behavior of filling NaN values with zeros with the new, upcoming API is
+
+.. ipython: python
+
+ df = pd.get_dummies(df, fill_value=0)
+
+
.. _whatsnew_0200.api_breaking.deprecate_ix:
Deprecate .ix
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index b03c3d77928c7..b9090eb98e754 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -4,13 +4,15 @@
from pandas import compat
import itertools
import re
+import warnings
import numpy as np
from pandas.types.common import (_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion)
-from pandas.types.cast import maybe_promote
+from pandas.types.cast import (maybe_promote, infer_dtype_from_scalar,
+ maybe_downcast_itemsize)
from pandas.types.missing import notnull
import pandas.types.concat as _concat
@@ -1059,7 +1061,8 @@ def melt_stub(df, stub, i, j, value_vars, sep):
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
- columns=None, sparse=False, drop_first=False):
+ columns=None, sparse=False, drop_first=False,
+ fill_value=None):
"""
Convert categorical variable into dummy/indicator variables
@@ -1075,7 +1078,8 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
- Add a column to indicate NaNs, if False NaNs are ignored.
+ If True, add an extra dummy column to indicate NaNs, otherwise
+ no extra column is added.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
@@ -1091,6 +1095,19 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
first level.
.. versionadded:: 0.18.0
+ fill_value : scalar, default None
+ Value to fill NaNs with. If no missing values are found or NaN is not
+ used to fill them, the returned data type will be the smallest
+ width type that can represent the returned values. See
+ pandas.types.cast.maybe_downcast_itemsize for details. If NaNs are
+ present and NaN is used to fill them, then the smallest floating
+ point type (typically `np.float32`) will be used. Currently, the
+ default of `None` will fill with zeros. To do no filling of NaNs,
+ specify `fill_value=np.nan`. The default behavior of filling with
+ zeros will be deprecated in the future and using this default will
+ now raise a `FutureWarning`.
+
+ .. versionadded:: 0.20.0
Returns
-------
dummies : DataFrame or SparseDataFrame
@@ -1121,6 +1138,18 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
1 0 1 0
2 0 0 1
+ >>> pd.get_dummies(s1, fill_value=np.nan)
+ a b
+ 0 1 0
+ 1 0 1
+ 2 NaN NaN
+
+ >>> pd.get_dummies(s1, fill_value=np.nan, dummy_na=True)
+ a b NaN
+ 0 1 0 0
+ 1 0 1 0
+ 2 NaN NaN 1
+
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
@@ -1153,6 +1182,44 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
from pandas.tools.concat import concat
from itertools import cycle
+ # Deprecate filling NaN values with zeros, GH15926
+ # When this is finally deprecated, simply remove this block
+ # of code and change the default to np.nan in the function signature
+ # of `get_dummies`.
+ if fill_value is None:
+ warnings.warn('The default behavior of filling NaN values '
+ 'with zeros will be deprecated. Use '
+ '`df = pd.get_dummies(df, fill_value=0)` to reproduce '
+ 'this behavior', FutureWarning, 3)
+ fill_value = 0
+
+ # Infer the proper output dtype.
+ # GH15926
+ vals = data.values.ravel() if hasattr(data, 'values') else data
+ isnotfinite = []
+ for v in vals:
+ try:
+ isnotfinite.append(~np.isfinite(v))
+ except TypeError:
+ isnotfinite.append(False)
+ if np.any(isnotfinite):
+ output_dtype, fill_value = infer_dtype_from_scalar(fill_value)
+ # `maybe_downcast_itemsize` only accepts arrays, so make a one
+ # element array and then extract the value back out. GH15926
+ if 'float' in str(output_dtype) or fill_value is np.nan:
+ output_dtype, fill_value = maybe_downcast_itemsize(
+ np.array([np.float64(fill_value)]), 'float')
+ elif 'int' in str(output_dtype):
+ if fill_value >= 0:
+ fill_value = np.uint64(fill_value)
+ else:
+ fill_value = np.int64(fill_value)
+ output_dtype, fill_value \
+ = maybe_downcast_itemsize(np.array([fill_value]), 'unsigned')
+ fill_value = output_dtype(fill_value[0])
+ else:
+ output_dtype = np.uint8
+
if isinstance(data, DataFrame):
# determine columns being encoded
@@ -1197,17 +1264,22 @@ def check_len(item, name):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
- drop_first=drop_first)
+ drop_first=drop_first,
+ fill_value=fill_value,
+ output_dtype=output_dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
- sparse=sparse, drop_first=drop_first)
+ sparse=sparse, drop_first=drop_first,
+ fill_value=fill_value,
+ output_dtype=output_dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
- sparse=False, drop_first=False):
+ fill_value=np.nan, sparse=False, drop_first=False,
+ output_dtype=np.uint8):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
@@ -1221,17 +1293,23 @@ def get_empty_Frame(data, sparse):
else:
return SparseDataFrame(index=index, default_fill_value=0)
- # if all NaN
- if not dummy_na and len(levels) == 0:
+ # If we get all NaN and are not making a dummy col, then just return.
+ # GH15826
+ if len(levels) == 0 and not dummy_na:
return get_empty_Frame(data, sparse)
+ # Record NaN values before we munge the codes, GH15826
+ nan_codes_msk = codes == -1
+ num_orig_levels = len(levels)
codes = codes.copy()
if dummy_na:
- codes[codes == -1] = len(levels)
+ codes[nan_codes_msk] = num_orig_levels
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
- if drop_first and len(levels) == 1:
+ # test for length of levels was changed to `<=` from `==` to cover
+ # all NaN inputs, GH15826
+ if drop_first and len(levels) <= 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
@@ -1249,41 +1327,66 @@ def get_empty_Frame(data, sparse):
if sparse:
sparse_series = {}
N = len(data)
- sp_indices = [[] for _ in range(len(dummy_cols))]
- for ndx, code in enumerate(codes):
- if code == -1:
- # Blank entries if not dummy_na and code == -1, #GH4446
- continue
- sp_indices[code].append(ndx)
+ # Construct lists of inds and if the value is NaN.
+ # GH15926
+ sp_indices = [None] * len(dummy_cols)
+ sp_fill = [None] * len(dummy_cols)
+ for code in np.unique(codes[codes != -1]):
+ # Non-zero value in sparse array if value is of the level
+ # or the value is NaN and it is filled non-zero and
+ # and it is not the dummy column for NaNs.
+ # GH15926
+ sp_indices[code] = sorted(
+ np.where((codes == code) |
+ ((fill_value != 0) &
+ (code < num_orig_levels) &
+ nan_codes_msk))[0].tolist())
+
+ # Value is filled with `fill_value` if it is NaN
+ # and not in dummy col and fill value is non-zero.
+ # GH15926
+ sp_fill[code] = (nan_codes_msk[sp_indices[code]] &
+ (fill_value != 0) &
+ (code < num_orig_levels))
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
- for col, ixs in zip(dummy_cols, sp_indices):
- sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8),
- sparse_index=IntIndex(N, ixs), fill_value=0,
- dtype=np.uint8)
+ sp_fill = sp_fill[1:]
+
+ for col, ixs, fill in zip(dummy_cols, sp_indices, sp_fill):
+ sarr = np.ones(len(ixs), dtype=output_dtype)
+ sarr[fill] = fill_value # Fill with `fill_value`, GH15926
+ sarr = SparseArray(
+ sarr,
+ sparse_index=IntIndex(N, ixs),
+ fill_value=0,
+ dtype=output_dtype)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
- dtype=np.uint8)
+ dtype=output_dtype)
return out
else:
- dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0)
+ dummy_mat = np.eye(
+ number_of_cols, dtype=output_dtype).take(codes, axis=0)
- if not dummy_na:
- # reset NaN GH4446
- dummy_mat[codes == -1] = 0
+ # user specified fill value via `fill_value` GH15926
+ if dummy_na:
+ dummy_mat[nan_codes_msk, :-1] = fill_value
+ else:
+ dummy_mat[nan_codes_msk] = fill_value
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
- return DataFrame(dummy_mat, index=index, columns=dummy_cols)
+ return DataFrame(
+ dummy_mat, index=index, columns=dummy_cols, dtype=output_dtype)
def make_axis_dummies(frame, axis='minor', transform=None):
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index ee255c1863b41..10ecef6fe5a48 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -281,7 +281,8 @@ def test_basic_types(self):
Series({'uint8': 8}))
result = get_dummies(s_df, sparse=self.sparse, columns=['a'])
- expected = Series({'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
+ expected = Series(
+ {'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
@@ -326,6 +327,25 @@ def test_include_na(self):
dtype=np.uint8)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
+ def test_fill_value_na(self):
+ # Add `fill_value` keyword GH15926
+ s = ['a', 'b', np.nan]
+ res_na = get_dummies(
+ s, dummy_na=True, fill_value=np.nan, sparse=self.sparse)
+ exp_na = DataFrame({'a': [1, 0, np.nan],
+ 'b': [0, 1, np.nan],
+ np.nan: [0, 0, 1]},
+ dtype=np.float64)
+ exp_na = exp_na.reindex_axis(['a', 'b', np.nan], 1)
+ assert_frame_equal(res_na, exp_na)
+
+ res_just_na = get_dummies(
+ [nan], dummy_na=True, fill_value=np.nan, sparse=self.sparse)
+ exp_just_na = DataFrame([[1]],
+ columns=[np.nan],
+ dtype=np.float64)
+ assert_frame_equal(res_just_na, exp_just_na)
+
def test_unicode(self
): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
@@ -450,7 +470,7 @@ def test_dataframe_dummies_prefix_dict(self):
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
- df = self.df
+ df = self.df.copy()
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan],
@@ -470,6 +490,22 @@ def test_dataframe_dummies_with_na(self):
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
+ result = get_dummies(
+ df, dummy_na=True, fill_value=np.nan, sparse=self.sparse)
+ expected = DataFrame({'C': [1, 2, 3, np.nan],
+ 'A_a': [1, 0, 1, np.nan],
+ 'A_b': [0, 1, 0, np.nan],
+ 'A_nan': [0, 0, 0, 1],
+ 'B_b': [1, 1, 0, np.nan],
+ 'B_c': [0, 0, 1, np.nan],
+ 'B_nan': [0, 0, 0, 1]},
+ dtype=np.float64)
+ cols = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
+ expected[cols] = expected[cols].astype(np.float64)
+ expected = expected[
+ ['C', 'A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']]
+ assert_frame_equal(result, expected)
+
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
@@ -551,9 +587,19 @@ def test_basic_drop_first_NA(self):
['b', nan], 1)
assert_frame_equal(res_na, exp_na)
+ res_na = get_dummies(s_NA, fill_value=np.nan, sparse=self.sparse,
+ drop_first=True)
+ exp_na = DataFrame({'b': [0, 1, np.nan]}, dtype=np.float64)
+ assert_frame_equal(res_na, exp_na)
+
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse,
drop_first=True)
- exp_just_na = DataFrame(index=np.arange(1))
+ exp_just_na = DataFrame(index=range(1))
+ assert_frame_equal(res_just_na, exp_just_na)
+
+ res_just_na = get_dummies([nan], sparse=self.sparse,
+ drop_first=True)
+ exp_just_na = DataFrame(index=range(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self):
@@ -577,7 +623,7 @@ def test_dataframe_dummies_drop_first_with_categorical(self):
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self):
- df = self.df
+ df = self.df.copy()
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse,
drop_first=True)
@@ -588,12 +634,40 @@ def test_dataframe_dummies_drop_first_with_na(self):
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
+ expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
+ assert_frame_equal(result, expected)
+ result = get_dummies(df, dummy_na=True, fill_value=np.nan,
+ sparse=self.sparse, drop_first=True)
+ expected = DataFrame({'C': [1, 2, 3, np.nan],
+ 'A_b': [0, 1, 0, np.nan],
+ 'A_nan': [0, 0, 0, 1],
+ 'B_c': [0, 0, 1, np.nan],
+ 'B_nan': [0, 0, 0, 1]}, dtype=np.float64)
+ cols = ['A_b', 'B_c', 'A_nan', 'B_nan']
+ expected[cols] = expected[cols].astype(np.float64)
expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse,
drop_first=True)
+ expected = DataFrame({'C': [1, 2, 3, np.nan],
+ 'A_b': [0, 1, 0, 0],
+ 'B_c': [0, 0, 1, 0]},
+ dtype=np.float64)
+ cols = ['A_b', 'B_c']
+ expected[cols] = expected[cols].astype(np.uint8)
+ expected = expected[['C', 'A_b', 'B_c']]
+ assert_frame_equal(result, expected)
+
+ result = get_dummies(df, dummy_na=False, sparse=self.sparse,
+ drop_first=True, fill_value=np.nan)
+ expected = DataFrame({'C': [1, 2, 3, np.nan],
+ 'A_b': [0, 1, 0, np.nan],
+ 'B_c': [0, 0, 1, np.nan]},
+ dtype=np.float64)
+ cols = ['A_b', 'B_c']
+ expected[cols] = expected[cols].astype(np.float64)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py
index de6ef7af9d7f9..0ffdac1e878e3 100644
--- a/pandas/tests/types/test_cast.py
+++ b/pandas/tests/types/test_cast.py
@@ -16,7 +16,8 @@
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
- find_common_type)
+ find_common_type,
+ maybe_downcast_itemsize)
from pandas.types.dtypes import (CategoricalDtype,
DatetimeTZDtype, PeriodDtype)
from pandas.util import testing as tm
@@ -84,6 +85,180 @@ def test_datetime_with_timezone(self):
tm.assert_index_equal(res, exp)
+class TestMaybeDowncastItemSize(object):
+
+ @pytest.mark.parametrize(
+ "dtypec",
+ [np.float16, np.float32, np.float64])
+ def test_maybe_downcast_itemsize_float(self, dtypec):
+ # Make sure downcasting works for floats. GH15926
+
+ data = np.array([12], dtype=dtypec)
+ dtype, val = maybe_downcast_itemsize(data, 'float')
+ if np.dtype(dtypec).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec
+
+ @pytest.mark.parametrize(
+ "data, dtypec",
+ [(12, np.int8),
+ (12, np.int16),
+ (12, np.int32),
+ (12, np.int64),
+ (12, np.uint8),
+ (12, np.uint16),
+ (12, np.uint32),
+ (12, np.uint64),
+ (-12, np.int8),
+ (-12, np.int16),
+ (-12, np.int32),
+ (-12, np.int64)])
+ def test_maybe_downcast_itemsize_int(self, data, dtypec):
+ # Make sure downcasting works for ints. GH15926
+
+ data = np.array([data], dtype=dtypec)
+ dtype, val = maybe_downcast_itemsize(
+ data, downcast='integer')
+ assert dtype == np.int8
+ dtype, val = maybe_downcast_itemsize(
+ data, downcast='signed')
+ assert dtype == np.int8
+ dtype, val = maybe_downcast_itemsize(
+ data, downcast='unsigned')
+ if val >= 0:
+ assert dtype == np.uint8
+ else:
+ assert dtype == dtypec
+ dtype, val = maybe_downcast_itemsize(
+ data, downcast='float')
+ if np.dtype(dtypec).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec
+
+ @pytest.mark.parametrize(
+ "dtypec, dtypec_up",
+ [(np.uint8, np.uint16),
+ (np.uint16, np.uint32),
+ (np.uint32, np.uint64)])
+ def test_maybe_downcast_itemsize_uint_bounds(self, dtypec, dtypec_up):
+ # Make sure downcasting works at bounds for uint. GH15926
+
+ val = np.array([np.iinfo(dtypec).max], dtype=dtypec)
+
+ data = val - 1
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'unsigned')
+ assert dtype == dtypec
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'integer')
+ assert dtype == dtypec
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'signed')
+ assert dtype == dtypec
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'float')
+ if np.dtype(dtypec).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec
+
+ data = val.astype(dtypec_up) + 1
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'unsigned')
+ assert dtype == dtypec_up
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'integer')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec_up)).replace('uint', 'int'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'signed')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec_up)).replace('uint', 'int'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'float')
+ if np.dtype(dtypec_up).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec_up
+
+ @pytest.mark.parametrize(
+ "dtypec, dtypec_up",
+ [(np.float16, np.float32),
+ (np.float32, np.float64)])
+ def test_maybe_downcast_itemsize_float_bounds(self, dtypec, dtypec_up):
+ # Make sure downcasting works at bounds for float. GH15926
+
+ data = np.array(
+ [float(np.finfo(dtypec).min) * 2.0], dtype=dtypec_up)
+ dtype, val = maybe_downcast_itemsize(data, 'float')
+ assert dtype == dtypec_up
+
+ data = np.array(
+ [float(np.finfo(dtypec).max) * 2.0], dtype=dtypec_up)
+ dtype, _ = maybe_downcast_itemsize(data, 'float')
+ assert dtype == dtypec_up
+
+ data = np.array(
+ [float(np.finfo(dtypec).min) * 0.5], dtype=dtypec)
+ dtype, val = maybe_downcast_itemsize(data, 'float')
+ assert dtype == dtypec
+
+ data = np.array(
+ [float(np.finfo(dtypec).max) * 0.5], dtype=dtypec)
+ dtype, _ = maybe_downcast_itemsize(data, 'float')
+ assert dtype == dtypec
+
+ @pytest.mark.parametrize(
+ "dtypec, dtypec_up",
+ [(np.int8, np.int16),
+ (np.int16, np.int32),
+ (np.int32, np.int64)])
+ def test_maybe_downcast_itemsize_int_bounds(self, dtypec, dtypec_up):
+ # Make sure downcasting works at bounds for uint. GH15926
+
+ val = np.array([np.iinfo(dtypec).max], dtype=dtypec)
+
+ data = val - 1
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'unsigned')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec)).replace('int', 'uint'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'integer')
+ assert dtype == dtypec
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'signed')
+ assert dtype == dtypec
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'float')
+ if np.dtype(dtypec).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec
+
+ data = val.astype(dtypec_up) + 1
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'unsigned')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec)).replace('int', 'uint'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'integer')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec_up)).replace('uint', 'int'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'signed')
+ assert dtype \
+ == getattr(np, str(np.dtype(dtypec_up)).replace('uint', 'int'))
+ dtype, _ = maybe_downcast_itemsize(
+ data, 'float')
+ if np.dtype(dtypec_up).itemsize >= 4:
+ assert dtype == np.float32
+ else:
+ assert dtype == dtypec_up
+
+
class TestInferDtype(object):
def test_infer_dtype_from_scalar(self):
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index 263d2f16a4216..4f2c6bbd23951 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -9,7 +9,7 @@
is_decimal,
is_scalar as isscalar)
-from pandas.types.cast import maybe_downcast_to_dtype
+from pandas.types.cast import maybe_downcast_itemsize
import pandas as pd
from pandas.compat import reduce
@@ -159,9 +159,6 @@ def to_numeric(arg, errors='raise', downcast=None):
3 -3.0
dtype: float64
"""
- if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'):
- raise ValueError('invalid downcasting method provided')
-
is_series = False
is_index = False
is_scalar = False
@@ -206,31 +203,7 @@ def to_numeric(arg, errors='raise', downcast=None):
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values):
- typecodes = None
-
- if downcast in ('integer', 'signed'):
- typecodes = np.typecodes['Integer']
- elif downcast == 'unsigned' and np.min(values) >= 0:
- typecodes = np.typecodes['UnsignedInteger']
- elif downcast == 'float':
- typecodes = np.typecodes['Float']
-
- # pandas support goes only to np.float32,
- # as float dtypes smaller than that are
- # extremely rare and not well supported
- float_32_char = np.dtype(np.float32).char
- float_32_ind = typecodes.index(float_32_char)
- typecodes = typecodes[float_32_ind:]
-
- if typecodes is not None:
- # from smallest to largest
- for dtype in typecodes:
- if np.dtype(dtype).itemsize <= values.dtype.itemsize:
- values = maybe_downcast_to_dtype(values, dtype)
-
- # successful conversion
- if values.dtype == dtype:
- break
+ _, values = maybe_downcast_itemsize(values, downcast)
if is_series:
return pd.Series(values, index=arg.index, name=arg.name)
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 580ce12de3333..b724a1711f448 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -90,7 +90,11 @@ def trans(x): # noqa
return result
if issubclass(dtype.type, np.floating):
- return result.astype(dtype)
+ if np.allclose(result, trans(result).astype(dtype)):
+ return result.astype(dtype)
+ else:
+ return result
+
elif is_bool_dtype(dtype) or is_integer_dtype(dtype):
# if we don't have any elements, just astype it
@@ -312,6 +316,68 @@ def maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
+def maybe_downcast_itemsize(val, downcast):
+ """maybe downcast an itemsize
+
+ Parameters
+ ----------
+ val : any object with a numeric type
+ Value to maybe be downcasted.
+ downcast : str, one of {'integer', 'signed', 'unsigned', 'float'}
+ Downcast that resulting data to the smallest numerical dtype
+ possible according to the following rules:
+
+ - 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
+ - 'unsigned': smallest unsigned int dtype (min.: np.uint8)
+ - 'float': smallest float dtype (min.: np.float32)
+
+ Downcasting will only occur if the size
+ of the data's dtype is strictly larger than
+ the dtype it is to be cast to, so if none of the dtypes
+ checked satisfy that specification, no downcasting will be
+ performed on the data.
+
+ Values smaller than the minimums above will be returned as is.
+
+ .. versionadded:: 0.20.0
+ Returns
+ -------
+ dtype : a numpy dtype
+ val : the downcasted value
+ """
+
+ if downcast not in ('integer', 'signed', 'unsigned', 'float'):
+ raise ValueError('invalid downcasting method provided')
+
+ typecodes = None
+
+ if downcast in ('integer', 'signed'):
+ typecodes = np.typecodes['Integer']
+ elif downcast == 'unsigned' and np.min(val) >= 0:
+ typecodes = np.typecodes['UnsignedInteger']
+ elif downcast == 'float':
+ typecodes = np.typecodes['Float']
+
+ # pandas support goes only to np.float32,
+ # as float dtypes smaller than that are
+ # extremely rare and not well supported
+ float_32_char = np.dtype(np.float32).char
+ float_32_ind = typecodes.index(float_32_char)
+ typecodes = typecodes[float_32_ind:]
+
+ if typecodes is not None:
+ # from smallest to largest
+ for dtype in typecodes:
+ if np.dtype(dtype).itemsize <= val.dtype.itemsize:
+ val = maybe_downcast_to_dtype(val, dtype)
+
+ # successful conversion
+ if val.dtype == dtype:
+ break
+
+ return val.dtype.type, val
+
+
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
| - [x] closes #15923
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
Closes #15923 | https://api.github.com/repos/pandas-dev/pandas/pulls/15926 | 2017-04-06T20:12:51Z | 2017-08-01T22:54:15Z | null | 2017-08-01T22:54:15Z |
ENH: Support malformed row handling in Python engine | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5cec27c329a7f..f4676f3ad964e 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -342,11 +342,11 @@ error_bad_lines : boolean, default ``True``
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned. If
``False``, then these "bad lines" will dropped from the DataFrame that is
- returned (only valid with C parser). See :ref:`bad lines <io.bad_lines>`
+ returned. See :ref:`bad lines <io.bad_lines>`
below.
warn_bad_lines : boolean, default ``True``
If error_bad_lines is ``False``, and warn_bad_lines is ``True``, a warning for
- each "bad line" will be output (only valid with C parser).
+ each "bad line" will be output.
.. _io.dtypes:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 21b259e7663ba..28ad5bf6ea60d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -365,9 +365,10 @@ Other Enhancements
- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
-- ``pd.read_csv()`` will now raise a ``csv.Error`` error whenever an end-of-file character is encountered in the middle of a data row (:issue:`15913`)
- A new function has been added to a ``MultiIndex`` to facilitate :ref:`Removing Unused Levels <advanced.shown_levels>`. (:issue:`15694`)
- :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels <advanced.shown_levels>`. (:issue:`15694`)
+- ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`)
+- ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a85f9cda50879..10f8c53987471 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -263,10 +263,10 @@
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
- returned. (Only valid with C parser)
+ returned.
warn_bad_lines : boolean, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
- "bad line" will be output. (Only valid with C parser).
+ "bad line" will be output.
low_memory : boolean, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
@@ -485,8 +485,6 @@ def _read(filepath_or_buffer, kwds):
_python_unsupported = set([
'low_memory',
'buffer_lines',
- 'error_bad_lines',
- 'warn_bad_lines',
'float_precision',
])
_deprecated_args = set([
@@ -1897,6 +1895,9 @@ def __init__(self, f, **kwds):
self.usecols, _ = _validate_usecols_arg(kwds['usecols'])
self.skip_blank_lines = kwds['skip_blank_lines']
+ self.warn_bad_lines = kwds['warn_bad_lines']
+ self.error_bad_lines = kwds['error_bad_lines']
+
self.names_passed = kwds['names'] or None
self.na_filter = kwds['na_filter']
@@ -2469,16 +2470,19 @@ def _next_line(self):
next(self.data)
while True:
- orig_line = self._next_iter_line()
- line = self._check_comments([orig_line])[0]
+ orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
- if (not self.skip_blank_lines and
- (self._empty(orig_line) or line)):
- break
- elif self.skip_blank_lines:
- ret = self._check_empty([line])
- if ret:
- line = ret[0]
+
+ if orig_line is not None:
+ line = self._check_comments([orig_line])[0]
+
+ if self.skip_blank_lines:
+ ret = self._check_empty([line])
+
+ if ret:
+ line = ret[0]
+ break
+ elif self._empty(orig_line) or line:
break
# This was the first line of the file,
@@ -2491,7 +2495,28 @@ def _next_line(self):
self.buf.append(line)
return line
- def _next_iter_line(self, **kwargs):
+ def _alert_malformed(self, msg, row_num):
+ """
+ Alert a user about a malformed row.
+
+ If `self.error_bad_lines` is True, the alert will be `ParserError`.
+ If `self.warn_bad_lines` is True, the alert will be printed out.
+
+ Parameters
+ ----------
+ msg : The error message to display.
+ row_num : The row number where the parsing error occurred.
+ Because this row number is displayed, we 1-index,
+ even though we 0-index internally.
+ """
+
+ if self.error_bad_lines:
+ raise ParserError(msg)
+ elif self.warn_bad_lines:
+ base = 'Skipping line {row_num}: '.format(row_num=row_num)
+ sys.stderr.write(base + msg + '\n')
+
+ def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
@@ -2501,32 +2526,34 @@ def _next_iter_line(self, **kwargs):
Parameters
----------
- kwargs : Keyword arguments used to customize the error message.
+ row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
- msg = str(e)
-
- if 'NULL byte' in msg:
- msg = ('NULL byte detected. This byte '
- 'cannot be processed in Python\'s '
- 'native csv library at the moment, '
- 'so please pass in engine=\'c\' instead')
- elif 'newline inside string' in msg:
- msg = ('EOF inside string starting with '
- 'line ' + str(kwargs['row_num']))
-
- if self.skipfooter > 0:
- reason = ('Error could possibly be due to '
- 'parsing errors in the skipped footer rows '
- '(the skipfooter keyword is only applied '
- 'after Python\'s csv library has parsed '
- 'all rows).')
- msg += '. ' + reason
-
- raise csv.Error(msg)
+ if self.warn_bad_lines or self.error_bad_lines:
+ msg = str(e)
+
+ if 'NULL byte' in msg:
+ msg = ('NULL byte detected. This byte '
+ 'cannot be processed in Python\'s '
+ 'native csv library at the moment, '
+ 'so please pass in engine=\'c\' instead')
+ elif 'newline inside string' in msg:
+ msg = ('EOF inside string starting with '
+ 'line ' + str(row_num))
+
+ if self.skipfooter > 0:
+ reason = ('Error could possibly be due to '
+ 'parsing errors in the skipped footer rows '
+ '(the skipfooter keyword is only applied '
+ 'after Python\'s csv library has parsed '
+ 'all rows).')
+ msg += '. ' + reason
+
+ self._alert_malformed(msg, row_num)
+ return None
def _check_comments(self, lines):
if self.comment is None:
@@ -2657,42 +2684,57 @@ def _get_index_name(self, columns):
return index_name, orig_names, columns
def _rows_to_cols(self, content):
+ if self.skipfooter < 0:
+ raise ValueError('skip footer cannot be negative')
+
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
- # see gh-13320
- zipped_content = list(lib.to_object_array(
- content, min_width=col_len).T)
- zip_len = len(zipped_content)
-
- if self.skipfooter < 0:
- raise ValueError('skip footer cannot be negative')
+ max_len = max([len(row) for row in content])
- # Loop through rows to verify lengths are correct.
- if (col_len != zip_len and
+ # Check that there are no rows with too many
+ # elements in their row (rows with too few
+ # elements are padded with NaN).
+ if (max_len > col_len and
self.index_col is not False and
self.usecols is None):
- i = 0
- for (i, l) in enumerate(content):
- if len(l) != col_len:
- break
- footers = 0
- if self.skipfooter:
- footers = self.skipfooter
+ footers = self.skipfooter if self.skipfooter else 0
+ bad_lines = []
- row_num = self.pos - (len(content) - i + footers)
+ iter_content = enumerate(content)
+ content_len = len(content)
+ content = []
- msg = ('Expected %d fields in line %d, saw %d' %
- (col_len, row_num + 1, zip_len))
- if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
- # see gh-13374
- reason = ('Error could possibly be due to quotes being '
- 'ignored when a multi-char delimiter is used.')
- msg += '. ' + reason
- raise ValueError(msg)
+ for (i, l) in iter_content:
+ actual_len = len(l)
+
+ if actual_len > col_len:
+ if self.error_bad_lines or self.warn_bad_lines:
+ row_num = self.pos - (content_len - i + footers)
+ bad_lines.append((row_num, actual_len))
+
+ if self.error_bad_lines:
+ break
+ else:
+ content.append(l)
+
+ for row_num, actual_len in bad_lines:
+ msg = ('Expected %d fields in line %d, saw %d' %
+ (col_len, row_num + 1, actual_len))
+ if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
+ # see gh-13374
+ reason = ('Error could possibly be due to quotes being '
+ 'ignored when a multi-char delimiter is used.')
+ msg += '. ' + reason
+
+ self._alert_malformed(msg, row_num + 1)
+
+ # see gh-13320
+ zipped_content = list(lib.to_object_array(
+ content, min_width=col_len).T)
if self.usecols:
if self._implicit_index:
@@ -2750,10 +2792,12 @@ def _get_lines(self, rows=None):
while True:
new_row = self._next_iter_line(
- row_num=self.pos + rows)
- new_rows.append(new_row)
+ row_num=self.pos + rows + 1)
rows += 1
+ if new_row is not None:
+ new_rows.append(new_row)
+
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 36d5f2dd5274b..ee0f00506cef3 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -19,7 +19,7 @@
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
-from pandas.errors import DtypeWarning, EmptyDataError
+from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
@@ -1569,7 +1569,7 @@ def test_null_byte_char(self):
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
- with tm.assertRaisesRegexp(csv.Error, msg):
+ with tm.assertRaisesRegexp(ParserError, msg):
self.read_csv(StringIO(data), names=cols)
def test_utf8_bom(self):
@@ -1695,3 +1695,41 @@ class InvalidBuffer(object):
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(mock.Mock())
+
+ def test_skip_bad_lines(self):
+ # see gh-15925
+ data = 'a\n1\n1,2,3\n4\n5,6,7'
+
+ with tm.assertRaises(ParserError):
+ self.read_csv(StringIO(data))
+
+ with tm.assertRaises(ParserError):
+ self.read_csv(StringIO(data), error_bad_lines=True)
+
+ stderr = sys.stderr
+ expected = DataFrame({'a': [1, 4]})
+
+ sys.stderr = StringIO()
+ try:
+ out = self.read_csv(StringIO(data),
+ error_bad_lines=False,
+ warn_bad_lines=False)
+ tm.assert_frame_equal(out, expected)
+
+ val = sys.stderr.getvalue()
+ self.assertEqual(val, '')
+ finally:
+ sys.stderr = stderr
+
+ sys.stderr = StringIO()
+ try:
+ out = self.read_csv(StringIO(data),
+ error_bad_lines=False,
+ warn_bad_lines=True)
+ tm.assert_frame_equal(out, expected)
+
+ val = sys.stderr.getvalue()
+ self.assertTrue('Skipping line 3' in val)
+ self.assertTrue('Skipping line 5' in val)
+ finally:
+ sys.stderr = stderr
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index 36356315419c4..9a1eb94270e28 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -14,6 +14,7 @@
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas import compat
+from pandas.errors import ParserError
from pandas.compat import StringIO, BytesIO, u
@@ -213,13 +214,13 @@ def test_multi_char_sep_quotes(self):
data = 'a,,b\n1,,a\n2,,"2,,b"'
msg = 'ignored when a multi-char delimiter is used'
- with tm.assertRaisesRegexp(ValueError, msg):
+ with tm.assertRaisesRegexp(ParserError, msg):
self.read_csv(StringIO(data), sep=',,')
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with tm.assertRaises(AssertionError):
- with tm.assertRaisesRegexp(ValueError, msg):
+ with tm.assertRaisesRegexp(ParserError, msg):
self.read_csv(StringIO(data), sep=',,',
quoting=csv.QUOTE_NONE)
@@ -231,11 +232,11 @@ def test_skipfooter_bad_row(self):
for data in ('a\n1\n"b"a',
'a,b,c\ncat,foo,bar\ndog,foo,"baz'):
- with tm.assertRaisesRegexp(csv.Error, msg):
+ with tm.assertRaisesRegexp(ParserError, msg):
self.read_csv(StringIO(data), skipfooter=1)
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with tm.assertRaises(AssertionError):
- with tm.assertRaisesRegexp(csv.Error, msg):
+ with tm.assertRaisesRegexp(ParserError, msg):
self.read_csv(StringIO(data))
| Support `warn_bad_lines` and `error_bad_lines` for the Python engine.
xref #12686 (master tracker)
Inspired by <a href="https://github.com/pandas-dev/pandas/issues/15910#issuecomment-291998838">#15910 (comment)</a>
In addition, the Python parser now raises `pandas.error.ParserError`, which is in line with what the C engine would do. | https://api.github.com/repos/pandas-dev/pandas/pulls/15925 | 2017-04-06T18:28:34Z | 2017-04-07T19:47:30Z | 2017-04-07T19:47:30Z | 2017-04-07T19:58:05Z |
BUG: use entire size of DatetimeTZBlock when coercing result (#15855) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index cb9e2496757ef..f9b6cebb26693 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -999,6 +999,7 @@ Conversion
- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
- Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`)
+- Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`)
Indexing
^^^^^^^^
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 8db801f8e7212..57361886eab8c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2475,7 +2475,7 @@ def _try_coerce_result(self, result):
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
- result = result.reshape(len(result))
+ result = result.reshape(np.prod(result.shape))
result = self.values._shallow_copy(result)
return result
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 93c3ba78a0abf..eacf032bbcc85 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -257,6 +257,20 @@ def test_fillna(self):
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
+ # with timezone
+ # GH 15855
+ df = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
+ pd.NaT]})
+ exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
+ pd.Timestamp('2012-11-11 00:00:00+01:00')]})
+ assert_frame_equal(df.fillna(method='pad'), exp)
+
+ df = pd.DataFrame({'A': [pd.NaT,
+ pd.Timestamp('2012-11-11 00:00:00+01:00')]})
+ exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
+ pd.Timestamp('2012-11-11 00:00:00+01:00')]})
+ assert_frame_equal(df.fillna(method='bfill'), exp)
+
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 7174283494fe7..ea49abeee21c5 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -258,6 +258,18 @@ def test_datetime64_tz_fillna(self):
self.assert_series_equal(expected, result)
self.assert_series_equal(pd.isnull(s), null_loc)
+ # with timezone
+ # GH 15855
+ df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
+ exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
+ pd.Timestamp('2012-11-11 00:00:00+01:00')])
+ assert_series_equal(df.fillna(method='pad'), exp)
+
+ df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
+ exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
+ pd.Timestamp('2012-11-11 00:00:00+01:00')])
+ assert_series_equal(df.fillna(method='bfill'), exp)
+
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
| - [x] closes #15855
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15924 | 2017-04-06T17:41:46Z | 2017-04-07T20:37:41Z | 2017-04-07T20:37:41Z | 2017-04-07T21:36:37Z |
TST: skip decimal conversion tests on 32-bit | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 8fc8ecbdf8abc..a24e8cdaf0273 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
-from pandas.compat import range, lrange, StringIO, OrderedDict
+from pandas.compat import (range, lrange, StringIO,
+ OrderedDict, is_platform_32bit)
import os
import numpy as np
@@ -380,6 +381,8 @@ def test_frame_from_json_nones(self):
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(np.isnan(unser[2][0]))
+ @pytest.mark.skipif(is_platform_32bit(),
+ reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index c2cbbe1ca65ab..dcfa939f84d7e 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -8,8 +8,6 @@
import simplejson as json
import math
import pytest
-import platform
-import sys
import time
import datetime
import calendar
@@ -25,18 +23,14 @@
import pandas.util.testing as tm
-def _skip_if_python_ver(skip_major, skip_minor=None):
- major, minor = sys.version_info[:2]
- if major == skip_major and (skip_minor is None or minor == skip_minor):
- pytest.skip("skipping Python version %d.%d" % (major, minor))
-
-
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
+ @pytest.mark.skipif(compat.is_platform_32bit(),
+ reason="not compliant on 32-bit, xref #15865")
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
@@ -153,10 +147,9 @@ def test_decimalDecodeTestPrecise(self):
decoded = ujson.decode(encoded, precise_float=True)
self.assertEqual(sut, decoded)
+ @pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
+ reason="buggy on win-64 for py2")
def test_encodeDoubleTinyExponential(self):
- if compat.is_platform_windows() and not compat.PY3:
- pytest.skip("buggy on win-64 for py2")
-
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
@@ -275,8 +268,6 @@ def test_encodeUnicodeConversion2(self):
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicodeSurrogatePair(self):
- _skip_if_python_ver(2, 5)
- _skip_if_python_ver(2, 6)
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
@@ -285,8 +276,6 @@ def test_encodeUnicodeSurrogatePair(self):
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8(self):
- _skip_if_python_ver(2, 5)
- _skip_if_python_ver(2, 6)
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
@@ -295,8 +284,6 @@ def test_encodeUnicode4BytesUTF8(self):
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8Highest(self):
- _skip_if_python_ver(2, 5)
- _skip_if_python_ver(2, 6)
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
@@ -462,7 +449,6 @@ def test_datetime_units(self):
self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
- _skip_if_python_ver(2, 5)
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
@@ -696,8 +682,8 @@ def test_decodeNumericIntNeg(self):
input = "-31337"
self.assertEqual(-31337, ujson.decode(input))
+ @pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encodeUnicode4BytesUTF8Fail(self):
- _skip_if_python_ver(3)
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input) # noqa
@@ -1029,7 +1015,7 @@ def testIntMax(self):
num = np.uint32(np.iinfo(np.uint32).max)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
- if platform.architecture()[0] != '32bit':
+ if not compat.is_platform_32bit():
num = np.int64(np.iinfo(np.int64).max)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
| xref #15865
| https://api.github.com/repos/pandas-dev/pandas/pulls/15922 | 2017-04-06T15:01:58Z | 2017-04-06T16:41:07Z | 2017-04-06T16:41:07Z | 2017-04-06T16:41:08Z |
DOC: Added statsmodels to show_versions | diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index ca75d4d02e927..adc97f4780b87 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -69,6 +69,7 @@ def show_versions(as_json=False):
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
+ ("statsmodels", lambda mod: mod.version.version),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
| xref https://github.com/statsmodels/statsmodels/issues/3580 | https://api.github.com/repos/pandas-dev/pandas/pulls/15921 | 2017-04-06T12:10:35Z | 2017-04-06T13:51:17Z | null | 2017-05-29T20:30:57Z |
DOC: timeseries.rst floating point precision (#15817) | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 44c200e13b877..45fe271e9de9d 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -265,17 +265,23 @@ Typical epoch stored units
pd.to_datetime([1349720105100, 1349720105200, 1349720105300,
1349720105400, 1349720105500 ], unit='ms')
-These *work*, but the results may be unexpected.
+.. note::
-.. ipython:: python
+ Epoch times will be rounded to the nearest nanosecond.
- pd.to_datetime([1])
+.. warning::
- pd.to_datetime([1, 3.14], unit='s')
+ Conversion of float epoch times can lead to inaccurate and unexpected results.
+ :ref:`Python floats <python:tut-fp-issues>` have about 15 digits precision in
+ decimal. Rounding during conversion from float to high precision ``Timestamp`` is
+ unavoidable. The only way to achieve exact precision is to use a fixed-width
+ types (e.g. an int64).
-.. note::
+ .. ipython:: python
- Epoch times will be rounded to the nearest nanosecond.
+ 1490195805.433502912
+ pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s')
+ pd.to_datetime(1490195805433502912, unit='ns')
.. _timeseries.origin:
@@ -300,6 +306,16 @@ Commonly called 'unix epoch' or POSIX time.
pd.to_datetime([1, 2, 3], unit='D')
+.. note::
+
+ Without specifying origin the following examples still evaluate, but the results
+ may be unexpected.
+
+ .. ipython:: python
+
+ pd.to_datetime([1])
+ pd.to_datetime([1, 3.14], unit='s')
+
.. _timeseries.daterange:
Generating Ranges of Timestamps
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index d0f1671f9e309..9d5821d859187 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -315,6 +315,16 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
+ Using a unix epoch time
+
+ >>> pd.to_datetime(1490195805, unit='s')
+ Timestamp('2017-03-22 15:16:45')
+ >>> pd.to_datetime(1490195805433502912, unit='ns')
+ Timestamp('2017-03-22 15:16:45.433502912')
+
+ .. warning:: For float arg, precision rounding might happen. To prevent
+ unexpected behavior use a fixed-width exact type.
+
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
| - [x] closes #15817 | https://api.github.com/repos/pandas-dev/pandas/pulls/15919 | 2017-04-06T09:48:30Z | 2017-04-08T21:54:05Z | null | 2017-04-10T06:45:22Z |
Fix a typo | diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 737141f11d7d1..7f2f0cf4943b8 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -571,7 +571,7 @@ def _fill_mi_header(row, control_row):
----------
row : list
List of items in a single row.
- constrol_row : list of boolean
+ control_row : list of boolean
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
| Simple fix. | https://api.github.com/repos/pandas-dev/pandas/pulls/15918 | 2017-04-06T09:36:14Z | 2017-04-06T11:34:25Z | 2017-04-06T11:34:25Z | 2017-04-06T11:34:40Z |
BLD: update merge script to update on github | diff --git a/scripts/merge-py.py b/scripts/merge-pr.py
similarity index 83%
rename from scripts/merge-py.py
rename to scripts/merge-pr.py
index b9350f8feceb8..1fc4eef3d0583 100755
--- a/scripts/merge-py.py
+++ b/scripts/merge-pr.py
@@ -99,6 +99,14 @@ def continue_maybe(prompt):
fail("Okay, exiting")
+def continue_maybe2(prompt):
+ result = input("\n%s (y/n): " % prompt)
+ if result.lower() != "y":
+ return False
+ else:
+ return True
+
+
original_head = run_cmd("git rev-parse HEAD")[:8]
@@ -193,6 +201,40 @@ def merge_pr(pr_num, target_ref):
return merge_hash
+def update_pr(pr_num, user_login, base_ref):
+
+ pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
+
+ run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
+ pr_branch_name))
+ run_cmd("git checkout %s" % pr_branch_name)
+
+ continue_maybe("Update ready (local ref %s)? Push to %s/%s?" % (
+ pr_branch_name, user_login, base_ref))
+
+ push_user_remote = "https://github.com/%s/pandas.git" % user_login
+
+ try:
+ run_cmd('git push %s %s:%s' % (push_user_remote, pr_branch_name,
+ base_ref))
+ except Exception as e:
+
+ if continue_maybe2("Force push?"):
+ try:
+ run_cmd(
+ 'git push -f %s %s:%s' % (push_user_remote, pr_branch_name,
+ base_ref))
+ except Exception as e:
+ fail("Exception while pushing: %s" % e)
+ clean_up()
+ else:
+ fail("Exception while pushing: %s" % e)
+ clean_up()
+
+ clean_up()
+ print("Pull request #%s updated!" % pr_num)
+
+
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
@@ -257,8 +299,17 @@ def fix_version_from_branch(branch, versions):
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s"
% (title, pr_repo_desc, target_ref, url))
-continue_maybe("Proceed with merging pull request #%s?" % pr_num)
+
+
merged_refs = [target_ref]
-merge_hash = merge_pr(pr_num, target_ref)
+print("\nProceed with updating or merging pull request #%s?" % pr_num)
+update = input("Update PR and push to remote (r), merge locally (l), "
+ "or do nothing (n) ?")
+update = update.lower()
+
+if update == 'r':
+ merge_hash = update_pr(pr_num, user_login, base_ref)
+elif update == 'l':
+ merge_hash = merge_pr(pr_num, target_ref)
| @jreback @TomAugspurger This adds the ability to checkout a PR, do a small fixup or rebase, and push changes to github.
I now added it as an additional question whether to update or merge, but can also make to separate scripts of it for convenience. | https://api.github.com/repos/pandas-dev/pandas/pulls/15917 | 2017-04-06T08:37:15Z | 2017-04-07T18:58:21Z | 2017-04-07T18:58:21Z | 2017-04-07T18:58:29Z |
DOC: Fix a typo in indexing.rst | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index bc8997b313053..f988fb7cd6806 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -69,7 +69,7 @@ Different Choices for Indexing
.. versionadded:: 0.11.0
Object selection has had a number of user-requested additions in order to
-support more explicit location based indexing. pandas now supports three types
+support more explicit location based indexing. Pandas now supports three types
of multi-axis indexing.
- ``.loc`` is primarily label based, but may also be used with a boolean array. ``.loc`` will raise ``KeyError`` when the items are not found. Allowed inputs are:
@@ -401,7 +401,7 @@ Selection By Position
This is sometimes called ``chained assignment`` and should be avoided.
See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`
-pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise a ``IndexError``.
+Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
The ``.iloc`` attribute is the primary access method. The following are valid inputs:
| https://api.github.com/repos/pandas-dev/pandas/pulls/15916 | 2017-04-06T07:40:54Z | 2017-04-06T11:35:59Z | 2017-04-06T11:35:59Z | 2017-04-06T12:52:41Z | |
DOC: Fix a typo in travis.yml | diff --git a/.travis.yml b/.travis.yml
index d864b755541de..e5e05ed26da56 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,7 @@ python: 3.5
# set NOCACHE-true
# To delete caches go to https://travis-ci.org/OWNER/REPOSITORY/caches or run
# travis cache --delete inside the project directory from the travis command line client
-# The cash directories will be deleted if anything in ci/ changes in a commit
+# The cache directories will be deleted if anything in ci/ changes in a commit
cache:
ccache: true
directories:
| https://api.github.com/repos/pandas-dev/pandas/pulls/15915 | 2017-04-06T06:55:40Z | 2017-04-06T07:07:58Z | 2017-04-06T07:07:58Z | 2017-04-06T07:29:15Z | |
BUG: Standardize malformed row handling in Python engine | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index ad190671cbbdc..462341d3d692d 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -365,6 +365,7 @@ Other Enhancements
- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
+- ``pd.read_csv()`` will now raise a ``csv.Error`` error whenever an end-of-file character is encountered in the middle of a data row (:issue:`15913`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
@@ -1034,7 +1035,8 @@ I/O
- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
- Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`)
- Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`)
-- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
+- Bug in ``pd.read_csv()`` in which invalid values for ``nrows`` and ``chunksize`` were allowed (:issue:`15767`)
+- Bug in ``pd.read_csv()`` for the Python engine in which unhelpful error messages were being raised when parsing errors occurred (:issue:`15910`)
- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index b624d2cc0c7ad..a85f9cda50879 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2469,26 +2469,7 @@ def _next_line(self):
next(self.data)
while True:
- try:
- orig_line = next(self.data)
- except csv.Error as e:
- msg = str(e)
-
- if 'NULL byte' in str(e):
- msg = ('NULL byte detected. This byte '
- 'cannot be processed in Python\'s '
- 'native csv library at the moment, '
- 'so please pass in engine=\'c\' instead')
-
- if self.skipfooter > 0:
- reason = ('Error could possibly be due to '
- 'parsing errors in the skipped footer rows '
- '(the skipfooter keyword is only applied '
- 'after Python\'s csv library has parsed '
- 'all rows).')
- msg += '. ' + reason
-
- raise csv.Error(msg)
+ orig_line = self._next_iter_line()
line = self._check_comments([orig_line])[0]
self.pos += 1
if (not self.skip_blank_lines and
@@ -2510,6 +2491,43 @@ def _next_line(self):
self.buf.append(line)
return line
+ def _next_iter_line(self, **kwargs):
+ """
+ Wrapper around iterating through `self.data` (CSV source).
+
+ When a CSV error is raised, we check for specific
+ error messages that allow us to customize the
+ error message displayed to the user.
+
+ Parameters
+ ----------
+ kwargs : Keyword arguments used to customize the error message.
+ """
+
+ try:
+ return next(self.data)
+ except csv.Error as e:
+ msg = str(e)
+
+ if 'NULL byte' in msg:
+ msg = ('NULL byte detected. This byte '
+ 'cannot be processed in Python\'s '
+ 'native csv library at the moment, '
+ 'so please pass in engine=\'c\' instead')
+ elif 'newline inside string' in msg:
+ msg = ('EOF inside string starting with '
+ 'line ' + str(kwargs['row_num']))
+
+ if self.skipfooter > 0:
+ reason = ('Error could possibly be due to '
+ 'parsing errors in the skipped footer rows '
+ '(the skipfooter keyword is only applied '
+ 'after Python\'s csv library has parsed '
+ 'all rows).')
+ msg += '. ' + reason
+
+ raise csv.Error(msg)
+
def _check_comments(self, lines):
if self.comment is None:
return lines
@@ -2688,7 +2706,6 @@ def _rows_to_cols(self, content):
return zipped_content
def _get_lines(self, rows=None):
- source = self.data
lines = self.buf
new_rows = None
@@ -2703,14 +2720,14 @@ def _get_lines(self, rows=None):
rows -= len(self.buf)
if new_rows is None:
- if isinstance(source, list):
- if self.pos > len(source):
+ if isinstance(self.data, list):
+ if self.pos > len(self.data):
raise StopIteration
if rows is None:
- new_rows = source[self.pos:]
- new_pos = len(source)
+ new_rows = self.data[self.pos:]
+ new_pos = len(self.data)
else:
- new_rows = source[self.pos:self.pos + rows]
+ new_rows = self.data[self.pos:self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
@@ -2726,21 +2743,17 @@ def _get_lines(self, rows=None):
try:
if rows is not None:
for _ in range(rows):
- new_rows.append(next(source))
+ new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
+
while True:
- try:
- new_rows.append(next(source))
- rows += 1
- except csv.Error as inst:
- if 'newline inside string' in str(inst):
- row_num = str(self.pos + rows)
- msg = ('EOF inside string starting with '
- 'line ' + row_num)
- raise Exception(msg)
- raise
+ new_row = self._next_iter_line(
+ row_num=self.pos + rows)
+ new_rows.append(new_row)
+ rows += 1
+
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py
index ffbd904843bfc..837b7a7922d75 100644
--- a/pandas/tests/io/parser/c_parser_only.py
+++ b/pandas/tests/io/parser/c_parser_only.py
@@ -408,3 +408,12 @@ def test_large_difference_in_columns(self):
expected = DataFrame([row.split(',')[0] for row in rows])
tm.assert_frame_equal(result, expected)
+
+ def test_data_after_quote(self):
+ # see gh-15910
+
+ data = 'a\n1\n"b"a'
+ result = self.read_csv(StringIO(data))
+ expected = DataFrame({'a': ['1', 'ba']})
+
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index bd76070933c47..36356315419c4 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -225,15 +225,17 @@ def test_multi_char_sep_quotes(self):
def test_skipfooter_bad_row(self):
# see gh-13879
+ # see gh-15910
- data = 'a,b,c\ncat,foo,bar\ndog,foo,"baz'
msg = 'parsing errors in the skipped footer rows'
- with tm.assertRaisesRegexp(csv.Error, msg):
- self.read_csv(StringIO(data), skipfooter=1)
-
- # We expect no match, so there should be an assertion
- # error out of the inner context manager.
- with tm.assertRaises(AssertionError):
+ for data in ('a\n1\n"b"a',
+ 'a,b,c\ncat,foo,bar\ndog,foo,"baz'):
with tm.assertRaisesRegexp(csv.Error, msg):
- self.read_csv(StringIO(data))
+ self.read_csv(StringIO(data), skipfooter=1)
+
+ # We expect no match, so there should be an assertion
+ # error out of the inner context manager.
+ with tm.assertRaises(AssertionError):
+ with tm.assertRaisesRegexp(csv.Error, msg):
+ self.read_csv(StringIO(data))
| Wrap `next(source)` when `source` is a file-buffer object to allow for more uniform error message displaying in the Python engine. Closes #15910. | https://api.github.com/repos/pandas-dev/pandas/pulls/15913 | 2017-04-06T01:53:36Z | 2017-04-06T13:31:32Z | 2017-04-06T13:31:32Z | 2017-04-06T14:53:25Z |
DEPR: correct locations to access public json/parser objects in depr message | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 83ad85e3e292b..529750cd97076 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -60,11 +60,15 @@
# extension module deprecations
from pandas.util.depr_module import _DeprecatedModule
-json = _DeprecatedModule(deprmod='pandas.json', deprmodto='pandas.io.json.libjson')
-parser = _DeprecatedModule(deprmod='pandas.parser', deprmodto='pandas.io.libparsers')
+json = _DeprecatedModule(deprmod='pandas.json',
+ moved={'dumps': 'pandas.io.json.dumps',
+ 'loads': 'pandas.io.json.loads'})
+parser = _DeprecatedModule(deprmod='pandas.parser',
+ removals=['na_values'],
+ moved={'CParserError': 'pandas.errors.ParserError'})
lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto='pandas._libs.lib',
moved={'infer_dtype': 'pandas.api.lib.infer_dtype'})
-tslib = _DeprecatedModule(deprmod='pandas.tslib', deprmodto='pandas._libs.tslib',
+tslib = _DeprecatedModule(deprmod='pandas.tslib',
moved={'Timestamp': 'pandas.Timestamp',
'Timedelta': 'pandas.Timedelta',
'NaT': 'pandas.NaT',
diff --git a/pandas/tslib.py b/pandas/tslib.py
index 3d96dc496c0de..f7d99538c2ea2 100644
--- a/pandas/tslib.py
+++ b/pandas/tslib.py
@@ -2,7 +2,6 @@
import warnings
warnings.warn("The pandas.tslib module is deprecated and will be "
- "removed in a future version. Please import from "
- "the pandas or pandas.errors instead", FutureWarning, stacklevel=2)
+ "removed in a future version.", FutureWarning, stacklevel=2)
from pandas._libs.tslib import (Timestamp, Timedelta,
NaT, OutOfBoundsDatetime)
diff --git a/pandas/util/depr_module.py b/pandas/util/depr_module.py
index 0885c81ce2757..1f428198c19f3 100644
--- a/pandas/util/depr_module.py
+++ b/pandas/util/depr_module.py
@@ -68,7 +68,7 @@ def __getattr__(self, name):
elif self.moved is not None and name in self.moved:
warnings.warn(
"{deprmod} is deprecated and will be removed in "
- "a future version.\nYou can access {name} in {moved}".format(
+ "a future version.\nYou can access {name} as {moved}".format(
deprmod=self.deprmod,
name=name,
moved=self.moved[name]),
|
Another one remaining is `pd.parser.na_values`
```
In [2]: pd.parser.na_values
/home/joris/miniconda3/envs/dev/bin/ipython:1: FutureWarning: pandas.parser.na_values is deprecated.
Please use pandas.io.libparsers.na_values instead.
#!/home/joris/miniconda3/envs/dev/bin/python
Out[2]:
{numpy.bool_: 255,
dtype('bool'): 255,
numpy.uint64: 18446744073709551615,
numpy.int64: -9223372036854775808,
dtype('int16'): -32768,
dtype('uint64'): 18446744073709551615,
numpy.int8: -128,
dtype('int32'): -2147483648,
numpy.uint8: 255,
dtype('int8'): -128,
numpy.object_: nan,
numpy.float64: nan,
numpy.uint32: 4294967295,
dtype('O'): nan,
numpy.int32: -2147483648,
dtype('uint32'): 4294967295,
dtype('uint16'): 65535,
dtype('uint8'): 255,
dtype('int64'): -9223372036854775808,
dtype('float64'): nan,
numpy.uint16: 65535,
numpy.int16: -32768}
```
I don't think we should refer to `libparsers`, but for now it is not imported in `pandas.io.parsers` as alternative. I could add it there, but the question is maybe whether this is actually useful for users? Maybe we should say that it will be removed without giving alternative? | https://api.github.com/repos/pandas-dev/pandas/pulls/15909 | 2017-04-05T19:44:53Z | 2017-04-07T19:04:32Z | 2017-04-07T19:04:32Z | 2017-04-09T08:50:21Z |
WIP: add pd.read_ipc and DataFrame.to_ipc to provide efficient serialization to/from memory | diff --git a/pandas/io/ipc.py b/pandas/io/ipc.py
new file mode 100644
index 0000000000000..3aeee40ba8a3e
--- /dev/null
+++ b/pandas/io/ipc.py
@@ -0,0 +1,129 @@
+""" ipc format compat """
+
+from pandas.types.generic import ABCIndexClass, ABCSeries, ABCDataFrame
+from pandas.compat import string_types, cPickle
+from pandas._libs.lib import is_string_array, is_unicode_array
+from pandas.types.common import is_object_dtype
+
+
+def _try_import():
+ # since pandas
+ # we need to import on first use
+
+ try:
+ import pyarrow
+ except ImportError:
+
+ # give a nice error message
+ raise ImportError("the pyarrow is not installed\n"
+ "you can install via conda\n"
+ "conda install pyarrow -c conda-forge")
+
+ return pyarrow
+
+
+def to_ipc(obj, engine='infer'):
+ """
+ Write a DataFrame to the ipc format
+
+ Parameters
+ ----------
+ obj : Index, Series, DataFrame
+ engine : string, optional
+ string to indicate the engine {'infer', 'pickle', 'pyarrow'}
+ 'infer' will pick an engine based upon performance considerations
+
+ Returns
+ -------
+ dict-of-metadata and bytes
+
+ """
+ if engine == 'pickle':
+ return _to_pickle(obj)
+ elif engine == 'pyarrow':
+ try:
+ return _to_pyarrow(obj)
+ except: # pragma
+ pass
+
+ if isinstance(obj, (ABCIndexClass, ABCSeries)):
+ return _to_pickle(obj)
+ elif isinstance(obj, ABCDataFrame):
+
+ # decide quickly if we can serialize using
+ # pyarrow or pickle
+
+ # smallish, just pickle
+ if len(obj) <= 100000:
+ return _to_pickle(obj)
+
+ # check our object columns
+ for c, col in obj.iteritems():
+ if not is_object_dtype(col):
+ continue
+
+ # if we discover we have actual python objects
+ # embedded with strings/unicode, then pickle
+ values = col.values
+ if isinstance(values[0], string_types):
+ if not is_string_array(values):
+ return _to_pickle(obj)
+ else:
+ if not is_unicode_array(values):
+ return _to_pickle(obj)
+
+ return _to_pyarrow(obj)
+
+ raise ValueError("ipc only supports IO with Index,"
+ "Series, DataFrames, a {} was "
+ "passed".format(type(obj)))
+
+
+def _to_pyarrow(df):
+ """ helper routine to return via pyarrow """
+ pyarrow = _try_import()
+ d = pyarrow.write_ipc(df)
+ d['engine'] = 'pyarrow'
+ return d
+
+
+def _to_pickle(obj):
+ """ helper routine to return a pickle of an object """
+ d = {'engine': 'pickle', 'data': cPickle.dumps(obj)}
+ return d
+
+
+def read_ipc(db):
+ """
+ Load a pyarrow ipc format object from the file dict-of-bytes
+
+ .. versionadded 0.20.0
+
+ Parameters
+ ----------
+ dict-of-meta-and-bytes : a dictionary of meta data & bytes
+
+ Returns
+ -------
+ Pandas Object
+
+ """
+ engine = db['engine']
+
+ if engine == 'pickle':
+ return _read_pickle(db['data'])
+ try:
+ return _read_pyarrow(db['data'])
+ except: # pragma
+ return _read_pickle(db['data'])
+
+
+def _read_pyarrow(db):
+ """ helper to return via pyarrow """
+ pyarrow = _try_import()
+ return pyarrow.read_ipc(db)
+
+
+def _read_pickle(db):
+ """ helper to return via pickle """
+ return cPickle.loads(db)
diff --git a/pandas/tests/io/test_ipc.py b/pandas/tests/io/test_ipc.py
new file mode 100644
index 0000000000000..bbe9c3155dea3
--- /dev/null
+++ b/pandas/tests/io/test_ipc.py
@@ -0,0 +1,146 @@
+""" test ipc compat """
+
+import pytest
+pyarrow = pytest.importorskip('pyarrow')
+
+from distutils.version import LooseVersion
+import numpy as np
+import pandas as pd
+from pandas import Series, Index, DataFrame
+from pandas.io.ipc import (to_ipc, read_ipc,
+ _to_pickle, _to_pyarrow,
+ _read_pickle, _read_pyarrow)
+
+import pandas.util.testing as tm
+
+_HAVE_LATEST_PYARROW = LooseVersion(pyarrow.__version__) > '0.2.0'
+
+
+@pytest.fixture(
+ params=[('pickle', _to_pickle, _read_pickle),
+ pytest.mark.skipif(not _HAVE_LATEST_PYARROW,
+ reason='need newer pyarrow version')(
+ 'pyarrow', _to_pyarrow, _read_pyarrow)],
+ ids=lambda x: x[0])
+def engine(request):
+ return request.param
+
+
+@pytest.fixture
+def pa():
+ if not _HAVE_LATEST_PYARROW:
+ pytest.skip("need newer pyarrow")
+
+
+def make_mixed_frame(N):
+ return DataFrame(
+ {'A': np.arange(N),
+ 'B': np.random.randn(N),
+ 'C': 'foo',
+ 'D': tm.makeStringIndex(N),
+ 'E': pd.Categorical.from_codes(np.repeat([0, 1], N // 2),
+ categories=['foo', 'bar']),
+ 'F': pd.date_range('20130101', freq='s', periods=N)})
+
+
+class TestIPC(object):
+
+ def check_error_on_write(self, df, exc):
+ # check that we are raising the exception
+ # on writing
+
+ with pytest.raises(exc):
+ to_ipc(df)
+
+ def check_round_trip(self, df, engine=None):
+
+ if engine is None:
+ writer = to_ipc
+ reader = read_ipc
+ b = writer(df)
+ else:
+ _, writer, reader = engine
+ b = writer(df)
+
+ # we are calling a lower-level routine
+ b = b['data']
+
+ result = reader(b)
+ tm.assert_frame_equal(result, df)
+
+ def test_error(self):
+ for obj in [1, 'foo', pd.Timestamp('20130101'),
+ np.array([1, 2, 3])]:
+ self.check_error_on_write(obj, ValueError)
+
+ def test_with_small_size(self, engine):
+
+ N = 100
+ df = make_mixed_frame(N)
+ self.check_round_trip(df, engine)
+
+ def test_with_med_size(self, engine):
+
+ # large size
+ N = 10000
+ df = make_mixed_frame(N)
+ self.check_round_trip(df, engine)
+
+ def test_with_large_size(self, engine):
+
+ # large size
+ N = 1000000
+ df = make_mixed_frame(N)
+ self.check_round_trip(df, engine)
+
+ def test_non_dataframe(self):
+
+ i = Index(['foo', 'bar'])
+ b = to_ipc(i)
+ result = read_ipc(b)
+ tm.assert_index_equal(result, i)
+
+ s = Series(['foo', 'bar'])
+ b = to_ipc(s)
+ result = read_ipc(b)
+ tm.assert_series_equal(result, s)
+
+ def test_basic(self, pa):
+
+ df = pd.DataFrame({
+ 'string': list('abc'),
+ 'int': list(range(1, 4)),
+ 'uint': np.arange(3, 6).astype('u1'),
+ 'float': np.arange(4.0, 7.0, dtype='float64'),
+ 'bool': [True, False, True],
+ 'bool_with_nan': [True, None, True],
+ 'cat': pd.Categorical(list('abc')),
+ 'date_range': pd.date_range('20130101', periods=3),
+ 'date_range_tz': pd.date_range('20130101', periods=3,
+ tz='US/Eastern'),
+ 'timedelta': pd.timedelta_range('1 day', periods=3)})
+
+ # should work both on pickle & pyarrow
+ # TODO: how to assure this?
+ self.check_round_trip(df)
+
+ def test_pickle_only(self):
+
+ # period
+ df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
+ self.check_round_trip(df)
+
+ # non-strings
+ df = pd.DataFrame({'a': ['a', 1, 2.0]})
+ self.check_round_trip(df)
+
+ def test_duplicate_columns(self, pa):
+
+ df = pd.DataFrame(np.arange(12).reshape(4, 3),
+ columns=list('aaa')).copy()
+ self.check_round_trip(df)
+
+ def test_stringify_columns(self, pa):
+
+ df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
+ self.check_round_trip(df)
| https://api.github.com/repos/pandas-dev/pandas/pulls/15907 | 2017-04-05T18:40:33Z | 2017-06-10T19:02:52Z | null | 2017-09-12T13:15:09Z | |
TST: better testing of Series.nlargest/nsmallest | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index a62d290277443..99ef76e0f4812 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -12,6 +12,7 @@
from pandas.types.common import (is_unsigned_integer_dtype,
is_signed_integer_dtype,
is_integer_dtype,
+ is_complex_dtype,
is_categorical_dtype,
is_extension_type,
is_datetimetz,
@@ -40,6 +41,44 @@
from pandas._libs.tslib import iNaT
+# --------------- #
+# dtype access #
+# --------------- #
+
+def _ensure_data_view(values):
+ """
+ helper routine to ensure that our data is of the correct
+ input dtype for lower-level routines
+
+ Parameters
+ ----------
+ values : array-like
+ """
+
+ if needs_i8_conversion(values):
+ values = values.view(np.int64)
+ elif is_period_arraylike(values):
+ from pandas.tseries.period import PeriodIndex
+ values = PeriodIndex(values).asi8
+ elif is_categorical_dtype(values):
+ values = values.values.codes
+ elif isinstance(values, (ABCSeries, ABCIndex)):
+ values = values.values
+
+ if is_signed_integer_dtype(values):
+ values = _ensure_int64(values)
+ elif is_unsigned_integer_dtype(values):
+ values = _ensure_uint64(values)
+ elif is_complex_dtype(values):
+ values = _ensure_float64(values)
+ elif is_float_dtype(values):
+ values = _ensure_float64(values)
+ else:
+ values = _ensure_object(values)
+
+ return values
+
+
# --------------- #
# top-level algos #
# --------------- #
@@ -867,9 +906,7 @@ def nsmallest(arr, n, keep='first'):
narr = len(arr)
n = min(n, narr)
- sdtype = str(arr.dtype)
- arr = arr.view(_dtype_map.get(sdtype, sdtype))
-
+ arr = _ensure_data_view(arr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
return _finalize_nsmallest(arr, kth_val, n, keep, narr)
@@ -880,8 +917,7 @@ def nlargest(arr, n, keep='first'):
Note: Fails silently with NaN.
"""
- sdtype = str(arr.dtype)
- arr = arr.view(_dtype_map.get(sdtype, sdtype))
+ arr = _ensure_data_view(arr)
return nsmallest(-arr, n, keep=keep)
@@ -910,9 +946,10 @@ def select_n_series(series, n, keep, method):
nordered : Series
"""
dtype = series.dtype
- if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64,
- np.timedelta64)):
- raise TypeError("Cannot use method %r with dtype %s" % (method, dtype))
+ if not ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
+ needs_i8_conversion(dtype)):
+ raise TypeError("Cannot use method '{method}' with "
+ "dtype {dtype}".format(method=method, dtype=dtype))
if keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
@@ -964,9 +1001,6 @@ def _finalize_nsmallest(arr, kth_val, n, keep, narr):
return inds
-_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}
-
-
# ------- #
# helpers #
# ------- #
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index b747a680c17dd..732142f1bce9a 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1381,80 +1381,6 @@ def test_is_monotonic(self):
self.assertFalse(s.is_monotonic)
self.assertTrue(s.is_monotonic_decreasing)
- def test_nsmallest_nlargest(self):
- # float, int, datetime64 (use i8), timedelts64 (same),
- # object that are numbers, object that are strings
-
- base = [3, 2, 1, 2, 5]
-
- s_list = [
- Series(base, dtype='int8'),
- Series(base, dtype='int16'),
- Series(base, dtype='int32'),
- Series(base, dtype='int64'),
- Series(base, dtype='float32'),
- Series(base, dtype='float64'),
- Series(base, dtype='uint8'),
- Series(base, dtype='uint16'),
- Series(base, dtype='uint32'),
- Series(base, dtype='uint64'),
- Series(base).astype('timedelta64[ns]'),
- Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
- ]
-
- raising = [
- Series([3., 2, 1, 2, '5'], dtype='object'),
- Series([3., 2, 1, 2, 5], dtype='object'),
- # not supported on some archs
- # Series([3., 2, 1, 2, 5], dtype='complex256'),
- Series([3., 2, 1, 2, 5], dtype='complex128'),
- ]
-
- for r in raising:
- dt = r.dtype
- msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
- args = 2, len(r), 0, -1
- methods = r.nlargest, r.nsmallest
- for method, arg in product(methods, args):
- with tm.assertRaisesRegexp(TypeError, msg):
- method(arg)
-
- for s in s_list:
-
- assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
- assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
-
- empty = s.iloc[0:0]
- assert_series_equal(s.nsmallest(0), empty)
- assert_series_equal(s.nsmallest(-1), empty)
- assert_series_equal(s.nlargest(0), empty)
- assert_series_equal(s.nlargest(-1), empty)
-
- assert_series_equal(s.nsmallest(len(s)), s.sort_values())
- assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
- assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
- assert_series_equal(s.nlargest(len(s) + 1),
- s.iloc[[4, 0, 1, 3, 2]])
-
- s = Series([3., np.nan, 1, 2, 5])
- assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
- assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
-
- msg = 'keep must be either "first", "last"'
- with tm.assertRaisesRegexp(ValueError, msg):
- s.nsmallest(keep='invalid')
- with tm.assertRaisesRegexp(ValueError, msg):
- s.nlargest(keep='invalid')
-
- # GH 13412
- s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
- result = s.nlargest(3)
- expected = s.sort_values(ascending=False).head(3)
- assert_series_equal(result, expected)
- result = s.nsmallest(3)
- expected = s.sort_values().head(3)
- assert_series_equal(result, expected)
-
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
@@ -1729,3 +1655,109 @@ def test_value_counts_categorical_not_ordered(self):
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
+
+
+@pytest.fixture
+def s_main_dtypes():
+ df = pd.DataFrame(
+ {'datetime': pd.to_datetime(['2003', '2002',
+ '2001', '2002',
+ '2005']),
+ 'datetimetz': pd.to_datetime(
+ ['2003', '2002',
+ '2001', '2002',
+ '2005']).tz_localize('US/Eastern'),
+ 'timedelta': pd.to_timedelta(['3d', '2d', '1d',
+ '2d', '5d'])})
+
+ for dtype in ['int8', 'int16', 'int32', 'int64',
+ 'float32', 'float64',
+ 'uint8', 'uint16', 'uint32', 'uint64']:
+ df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
+
+ return df
+
+
+class TestNLargestNSmallest(object):
+
+ @pytest.mark.parametrize(
+ "r", [Series([3., 2, 1, 2, '5'], dtype='object'),
+ Series([3., 2, 1, 2, 5], dtype='object'),
+ # not supported on some archs
+ # Series([3., 2, 1, 2, 5], dtype='complex256'),
+ Series([3., 2, 1, 2, 5], dtype='complex128'),
+ Series(list('abcde'), dtype='category'),
+ Series(list('abcde'))])
+ def test_error(self, r):
+ dt = r.dtype
+ msg = ("Cannot use method 'n(larg|small)est' with "
+ "dtype {dt}".format(dt=dt))
+ args = 2, len(r), 0, -1
+ methods = r.nlargest, r.nsmallest
+ for method, arg in product(methods, args):
+ with tm.assertRaisesRegexp(TypeError, msg):
+ method(arg)
+
+ @pytest.mark.parametrize(
+ "s",
+ [v for k, v in s_main_dtypes().iteritems()])
+ def test_nsmallest_nlargest(self, s):
+ # float, int, datetime64 (use i8), timedelts64 (same),
+ # object that are numbers, object that are strings
+
+ assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
+ assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
+
+ empty = s.iloc[0:0]
+ assert_series_equal(s.nsmallest(0), empty)
+ assert_series_equal(s.nsmallest(-1), empty)
+ assert_series_equal(s.nlargest(0), empty)
+ assert_series_equal(s.nlargest(-1), empty)
+
+ assert_series_equal(s.nsmallest(len(s)), s.sort_values())
+ assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
+ assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
+ assert_series_equal(s.nlargest(len(s) + 1),
+ s.iloc[[4, 0, 1, 3, 2]])
+
+ def test_misc(self):
+
+ s = Series([3., np.nan, 1, 2, 5])
+ assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
+ assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
+
+ msg = 'keep must be either "first", "last"'
+ with tm.assertRaisesRegexp(ValueError, msg):
+ s.nsmallest(keep='invalid')
+ with tm.assertRaisesRegexp(ValueError, msg):
+ s.nlargest(keep='invalid')
+
+ # GH 15297
+ s = Series([1] * 5, index=[1, 2, 3, 4, 5])
+ expected_first = Series([1] * 3, index=[1, 2, 3])
+ expected_last = Series([1] * 3, index=[5, 4, 3])
+
+ result = s.nsmallest(3)
+ assert_series_equal(result, expected_first)
+
+ result = s.nsmallest(3, keep='last')
+ assert_series_equal(result, expected_last)
+
+ result = s.nlargest(3)
+ assert_series_equal(result, expected_first)
+
+ result = s.nlargest(3, keep='last')
+ assert_series_equal(result, expected_last)
+
+ @pytest.mark.parametrize('n', range(1, 5))
+ def test_n(self, n):
+
+ # GH 13412
+ s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
+ result = s.nlargest(n)
+ expected = s.sort_values(ascending=False).head(n)
+ assert_series_equal(result, expected)
+
+ result = s.nsmallest(n)
+ expected = s.sort_values().head(n)
+ assert_series_equal(result, expected)
| xref #15299 | https://api.github.com/repos/pandas-dev/pandas/pulls/15902 | 2017-04-05T14:03:55Z | 2017-04-05T19:16:06Z | null | 2017-04-05T19:17:48Z |
(rebased) ENH: Added more options for formats.style.bar | diff --git a/doc/source/html-styling.ipynb b/doc/source/html-styling.ipynb
index 1a97378fd30b1..841dc34f5cb04 100644
--- a/doc/source/html-styling.ipynb
+++ b/doc/source/html-styling.ipynb
@@ -2,7 +2,9 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"source": [
"*New in version 0.17.1*\n",
"\n",
@@ -518,7 +520,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can include \"bar charts\" in your DataFrame."
+ "There's also `.highlight_min` and `.highlight_max`."
]
},
{
@@ -529,14 +531,25 @@
},
"outputs": [],
"source": [
- "df.style.bar(subset=['A', 'B'], color='#d65f5f')"
+ "df.style.highlight_max(axis=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "df.style.highlight_min(axis=0)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "There's also `.highlight_min` and `.highlight_max`."
+ "Use `Styler.set_properties` when the style doesn't actually depend on the values."
]
},
{
@@ -547,7 +560,23 @@
},
"outputs": [],
"source": [
- "df.style.highlight_max(axis=0)"
+ "df.style.set_properties(**{'background-color': 'black',\n",
+ " 'color': 'lawngreen',\n",
+ " 'border-color': 'white'})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Bar charts"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can include \"bar charts\" in your DataFrame."
]
},
{
@@ -558,14 +587,16 @@
},
"outputs": [],
"source": [
- "df.style.highlight_min(axis=0)"
+ "df.style.bar(subset=['A', 'B'], color='#d65f5f')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Use `Styler.set_properties` when the style doesn't actually depend on the values."
+ "New in version 0.20.0 is the ability to customize further the bar chart: You can now have the `df.style.bar` be centered on zero or midpoint value (in addition to the already existing way of having the min value at the left side of the cell), and you can pass a list of `[color_negative, color_positive]`.\n",
+ "\n",
+ "Here's how you can change the above with the new `align='mid'` option:"
]
},
{
@@ -576,9 +607,62 @@
},
"outputs": [],
"source": [
- "df.style.set_properties(**{'background-color': 'black',\n",
- " 'color': 'lawngreen',\n",
- " 'border-color': 'white'})"
+ "df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The following example aims to give a highlight of the behavior of the new align options:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "from IPython.display import HTML\n",
+ "\n",
+ "# Test series\n",
+ "test1 = pd.Series([-100,-60,-30,-20], name='All Negative')\n",
+ "test2 = pd.Series([10,20,50,100], name='All Positive')\n",
+ "test3 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')\n",
+ "\n",
+ "head = \"\"\"\n",
+ "<table>\n",
+ " <thead>\n",
+ " <th>Align</th>\n",
+ " <th>All Negative</th>\n",
+ " <th>All Positive</th>\n",
+ " <th>Both Neg and Pos</th>\n",
+ " </thead>\n",
+ " </tbody>\n",
+ "\n",
+ "\"\"\"\n",
+ "\n",
+ "aligns = ['left','zero','mid']\n",
+ "for align in aligns:\n",
+ " row = \"<tr><th>{}</th>\".format(align)\n",
+ " for serie in [test1,test2,test3]:\n",
+ " s = serie.copy()\n",
+ " s.name=''\n",
+ " row += \"<td>{}</td>\".format(s.to_frame().style.bar(align=align, \n",
+ " color=['#d65f5f', '#5fba7d'], \n",
+ " width=100).render()) #testn['width']\n",
+ " row += '</tr>'\n",
+ " head += row\n",
+ " \n",
+ "head+= \"\"\"\n",
+ "</tbody>\n",
+ "</table>\"\"\"\n",
+ " \n",
+ "\n",
+ "HTML(head)"
]
},
{
@@ -961,7 +1045,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1"
+ "version": "3.5.2"
}
},
"nbformat": 4,
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 2e1cc396287ce..f3954688ce126 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -366,6 +366,8 @@ Other Enhancements
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
+- ``DataFrame.style.bar()`` now accepts two more options to further customize the bar chart. Bar alignment is set with ``align='left'|'mid'|'zero'``, the default is "left", which is backward compatible; You can now pass a list of ``color=[color_negative, color_positive]``. (:issue:`14757`)
+
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/formats/style.py b/pandas/formats/style.py
index e712010a8b4f2..f8c61236a6eff 100644
--- a/pandas/formats/style.py
+++ b/pandas/formats/style.py
@@ -17,7 +17,7 @@
"or `pip install Jinja2`"
raise ImportError(msg)
-from pandas.types.common import is_float, is_string_like
+from pandas.types.common import is_float, is_string_like, is_list_like
import numpy as np
import pandas as pd
@@ -857,19 +857,125 @@ def set_properties(self, subset=None, **kwargs):
return self.applymap(f, subset=subset)
@staticmethod
- def _bar(s, color, width):
- normed = width * (s - s.min()) / (s.max() - s.min())
+ def _bar_left(s, color, width, base):
+ """
+ The minimum value is aligned at the left of the cell
- base = 'width: 10em; height: 80%;'
- attrs = (base + 'background: linear-gradient(90deg,{c} {w}%, '
+ Parameters
+ ----------
+ color: 2-tuple/list, of [``color_negative``, ``color_positive``]
+ width: float
+ A number between 0 or 100. The largest value will cover ``width``
+ percent of the cell's width
+ base: str
+ The base css format of the cell, e.g.:
+ ``base = 'width: 10em; height: 80%;'``
+
+ Returns
+ -------
+ self : Styler
+ """
+ normed = width * (s - s.min()) / (s.max() - s.min())
+ zero_normed = width * (0 - s.min()) / (s.max() - s.min())
+ attrs = (base + 'background: linear-gradient(90deg,{c} {w:.1f}%, '
'transparent 0%)')
- return [attrs.format(c=color, w=x) if x != 0 else base for x in normed]
- def bar(self, subset=None, axis=0, color='#d65f5f', width=100):
+ return [base if x == 0 else attrs.format(c=color[0], w=x)
+ if x < zero_normed
+ else attrs.format(c=color[1], w=x) if x >= zero_normed
+ else base for x in normed]
+
+ @staticmethod
+ def _bar_center_zero(s, color, width, base):
+ """
+ Creates a bar chart where the zero is centered in the cell
+
+ Parameters
+ ----------
+ color: 2-tuple/list, of [``color_negative``, ``color_positive``]
+ width: float
+ A number between 0 or 100. The largest value will cover ``width``
+ percent of the cell's width
+ base: str
+ The base css format of the cell, e.g.:
+ ``base = 'width: 10em; height: 80%;'``
+
+ Returns
+ -------
+ self : Styler
+ """
+
+ # Either the min or the max should reach the edge
+ # (50%, centered on zero)
+ m = max(abs(s.min()), abs(s.max()))
+
+ normed = s * 50 * width / (100.0 * m)
+
+ attrs_neg = (base + 'background: linear-gradient(90deg, transparent 0%'
+ ', transparent {w:.1f}%, {c} {w:.1f}%, '
+ '{c} 50%, transparent 50%)')
+
+ attrs_pos = (base + 'background: linear-gradient(90deg, transparent 0%'
+ ', transparent 50%, {c} 50%, {c} {w:.1f}%, '
+ 'transparent {w:.1f}%)')
+
+ return [attrs_pos.format(c=color[1], w=(50 + x)) if x >= 0
+ else attrs_neg.format(c=color[0], w=(50 + x))
+ for x in normed]
+
+ @staticmethod
+ def _bar_center_mid(s, color, width, base):
+ """
+ Creates a bar chart where the midpoint is centered in the cell
+
+ Parameters
+ ----------
+ color: 2-tuple/list, of [``color_negative``, ``color_positive``]
+ width: float
+ A number between 0 or 100. The largest value will cover ``width``
+ percent of the cell's width
+ base: str
+ The base css format of the cell, e.g.:
+ ``base = 'width: 10em; height: 80%;'``
+
+ Returns
+ -------
+ self : Styler
+ """
+
+ if s.min() >= 0:
+ # In this case, we place the zero at the left, and the max() should
+ # be at width
+ zero = 0.0
+ slope = width / s.max()
+ elif s.max() <= 0:
+ # In this case, we place the zero at the right, and the min()
+ # should be at 100-width
+ zero = 100.0
+ slope = width / -s.min()
+ else:
+ slope = width / (s.max() - s.min())
+ zero = (100.0 + width) / 2.0 - slope * s.max()
+
+ normed = zero + slope * s
+
+ attrs_neg = (base + 'background: linear-gradient(90deg, transparent 0%'
+ ', transparent {w:.1f}%, {c} {w:.1f}%, '
+ '{c} {zero:.1f}%, transparent {zero:.1f}%)')
+
+ attrs_pos = (base + 'background: linear-gradient(90deg, transparent 0%'
+ ', transparent {zero:.1f}%, {c} {zero:.1f}%, '
+ '{c} {w:.1f}%, transparent {w:.1f}%)')
+
+ return [attrs_pos.format(c=color[1], zero=zero, w=x) if x > zero
+ else attrs_neg.format(c=color[0], zero=zero, w=x)
+ for x in normed]
+
+ def bar(self, subset=None, align='left', axis=0,
+ color='#d65f5f', width=100):
"""
Color the background ``color`` proptional to the values in each column.
Excludes non-numeric data by default.
-
.. versionadded:: 0.17.1
Parameters
@@ -877,10 +983,23 @@ def bar(self, subset=None, axis=0, color='#d65f5f', width=100):
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
axis: int
- color: str
+ color: str or 2-tuple/list
+ If a str is passed, the color is the same for both
+ negative and positive numbers. If 2-tuple/list is used, the
+ first element is the color_negative and the second is the
+ color_positive (eg: ['#d65f5f', '#5fba7d'])
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
+ align : {'left', 'zero',' mid'}
+
+ .. versionadded:: 0.20.0
+
+ - 'left' : the min value starts at the left of the cell
+ - 'zero' : a value of zero is located at the center of the cell
+ - 'mid' : the center of the cell is at (max-min)/2, or
+ if values are all negative (positive) the zero is aligned
+ at the right (left) of the cell
Returns
-------
@@ -888,8 +1007,32 @@ def bar(self, subset=None, axis=0, color='#d65f5f', width=100):
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
- self.apply(self._bar, subset=subset, axis=axis, color=color,
- width=width)
+
+ base = 'width: 10em; height: 80%;'
+
+ if not(is_list_like(color)):
+ color = [color, color]
+ elif len(color) == 1:
+ color = [color[0], color[0]]
+ elif len(color) > 2:
+ msg = ("Must pass `color` as string or a list-like"
+ " of length 2: [`color_negative`, `color_positive`]\n"
+ "(eg: color=['#d65f5f', '#5fba7d'])")
+ raise ValueError(msg)
+
+ if align == 'left':
+ self.apply(self._bar_left, subset=subset, axis=axis, color=color,
+ width=width, base=base)
+ elif align == 'zero':
+ self.apply(self._bar_center_zero, subset=subset, axis=axis,
+ color=color, width=width, base=base)
+ elif align == 'mid':
+ self.apply(self._bar_center_mid, subset=subset, axis=axis,
+ color=color, width=width, base=base)
+ else:
+ msg = ("`align` must be one of {'left', 'zero',' mid'}")
+ raise ValueError(msg)
+
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/formats/test_style.py
index 44af0b8ebb085..a1768fac47acc 100644
--- a/pandas/tests/formats/test_style.py
+++ b/pandas/tests/formats/test_style.py
@@ -265,7 +265,7 @@ def test_empty(self):
{'props': [['', '']], 'selector': 'row1_col0'}]
self.assertEqual(result, expected)
- def test_bar(self):
+ def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
@@ -298,7 +298,7 @@ def test_bar(self):
result = df.style.bar(color='red', width=50)._compute().ctx
self.assertEqual(result, expected)
- def test_bar_0points(self):
+ def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
@@ -348,6 +348,115 @@ def test_bar_0points(self):
', transparent 0%)']}
self.assertEqual(result, expected)
+ def test_bar_align_zero_pos_and_neg(self):
+ # See https://github.com/pandas-dev/pandas/pull/14757
+ df = pd.DataFrame({'A': [-10, 0, 20, 90]})
+
+ result = df.style.bar(align='zero', color=[
+ '#d65f5f', '#5fba7d'], width=90)._compute().ctx
+
+ expected = {(0, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 45.0%, '
+ '#d65f5f 45.0%, #d65f5f 50%, '
+ 'transparent 50%)'],
+ (1, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 50%, '
+ '#5fba7d 50%, #5fba7d 50.0%, '
+ 'transparent 50.0%)'],
+ (2, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 50%, #5fba7d 50%, '
+ '#5fba7d 60.0%, transparent 60.0%)'],
+ (3, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 50%, #5fba7d 50%, '
+ '#5fba7d 95.0%, transparent 95.0%)']}
+ self.assertEqual(result, expected)
+
+ def test_bar_align_mid_pos_and_neg(self):
+ df = pd.DataFrame({'A': [-10, 0, 20, 90]})
+
+ result = df.style.bar(align='mid', color=[
+ '#d65f5f', '#5fba7d'])._compute().ctx
+
+ expected = {(0, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
+ '#d65f5f 10.0%, transparent 10.0%)'],
+ (1, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 10.0%, '
+ '#d65f5f 10.0%, #d65f5f 10.0%, '
+ 'transparent 10.0%)'],
+ (2, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
+ ', #5fba7d 30.0%, transparent 30.0%)'],
+ (3, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 10.0%, '
+ '#5fba7d 10.0%, #5fba7d 100.0%, '
+ 'transparent 100.0%)']}
+
+ self.assertEqual(result, expected)
+
+ def test_bar_align_mid_all_pos(self):
+ df = pd.DataFrame({'A': [10, 20, 50, 100]})
+
+ result = df.style.bar(align='mid', color=[
+ '#d65f5f', '#5fba7d'])._compute().ctx
+
+ expected = {(0, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
+ '#5fba7d 10.0%, transparent 10.0%)'],
+ (1, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
+ '#5fba7d 20.0%, transparent 20.0%)'],
+ (2, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
+ '#5fba7d 50.0%, transparent 50.0%)'],
+ (3, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
+ '#5fba7d 100.0%, transparent 100.0%)']}
+
+ self.assertEqual(result, expected)
+
+ def test_bar_align_mid_all_neg(self):
+ df = pd.DataFrame({'A': [-100, -60, -30, -20]})
+
+ result = df.style.bar(align='mid', color=[
+ '#d65f5f', '#5fba7d'])._compute().ctx
+
+ expected = {(0, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 0.0%, '
+ '#d65f5f 0.0%, #d65f5f 100.0%, transparent 100.0%)'],
+ (1, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 40.0%, '
+ '#d65f5f 40.0%, #d65f5f 100.0%, '
+ 'transparent 100.0%)'],
+ (2, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 70.0%, '
+ '#d65f5f 70.0%, #d65f5f 100.0%, transparent 100.0%)'],
+ (3, 0): ['width: 10em', ' height: 80%',
+ 'background: linear-gradient(90deg, '
+ 'transparent 0%, transparent 80.0%, '
+ '#d65f5f 80.0%, #d65f5f 100.0%, transparent 100.0%)']}
+ self.assertEqual(result, expected)
+
+ def test_bar_bad_align_raises(self):
+ df = pd.DataFrame({'A': [-100, -60, -30, -20]})
+ with tm.assertRaises(ValueError):
+ df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
+
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
| Author: Julien Marrec <julien.marrec@gmail.com>
Closes #14757 from jmarrec/style-bar and squashes the following commits:
dc3cbe8 [Julien Marrec] Added a whatsnew note
af6c9bd [Julien Marrec] Added a simple example before the parametric one
80a3ce0 [Julien Marrec] Check for bad align value and raise. Wrote test for it too
5875eb9 [Julien Marrec] Change docstrings for color and align
5a22ee1 [Julien Marrec] Merge commit '673fb8f828952a4907e5659c1fcf83b771db7280' into style-bar
0e74b4d [Julien Marrec] Fix versionadded
1b7ffa2 [Julien Marrec] Added documentation on the new df.style.bar options for align and Colors in the documentation.
46bee6d [Julien Marrec] Change the tests to match new float formats.
01c200c [Julien Marrec] Format flots to avoid issue with py2.7 / py3.5 compta.
7ac2443 [Julien Marrec] Changes according to @sinhrks: Raise ValueError instead of warnings when color isn’t a str or 2-tuple/list. Passing “base” from bar().
e3f714c [Julien Marrec] Added a check on color argument that will issue a warning. Not sure if need to raise TypeError or issue a UserWarning if a list with more than two elements is passed.
f12faab [Julien Marrec] Fixed line too long `git diff upstream/master | flake8 --diff now passes`
7c89137 [Julien Marrec] ENH: Added more options for formats.style.bar
673fb8f [Julien Marrec] Fix versionadded
506f3d2 [Julien Marrec] Added documentation on the new df.style.bar options for align and Colors in the documentation.
e0563d5 [Julien Marrec] Change the tests to match new float formats.
d210938 [Julien Marrec] Format flots to avoid issue with py2.7 / py3.5 compta.
b22f639 [Julien Marrec] Changes according to @sinhrks: Raise ValueError instead of warnings when color isn’t a str or 2-tuple/list. Passing “base” from bar().
3046626 [Julien Marrec] Added a check on color argument that will issue a warning. Not sure if need to raise TypeError or issue a UserWarning if a list with more than two elements is passed.
524a9ab [Julien Marrec] Fixed line too long `git diff upstream/master | flake8 --diff now passes`
d1eafbb [Julien Marrec] ENH: Added more options for formats.style.bar
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15900 | 2017-04-05T12:22:34Z | 2017-04-06T13:32:04Z | null | 2017-04-06T13:32:04Z |
DEPR: correct locations to access public tslib objects | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 1bc85899fb89f..83ad85e3e292b 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -64,7 +64,11 @@
parser = _DeprecatedModule(deprmod='pandas.parser', deprmodto='pandas.io.libparsers')
lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto='pandas._libs.lib',
moved={'infer_dtype': 'pandas.api.lib.infer_dtype'})
-tslib = _DeprecatedModule(deprmod='pandas.tslib', deprmodto='pandas._libs.tslib')
+tslib = _DeprecatedModule(deprmod='pandas.tslib', deprmodto='pandas._libs.tslib',
+ moved={'Timestamp': 'pandas.Timestamp',
+ 'Timedelta': 'pandas.Timedelta',
+ 'NaT': 'pandas.NaT',
+ 'OutOfBoundsDatetime': 'pandas.errors.OutOfBoundsDatetime'})
# use the closest tagged version if possible
from ._version import get_versions
| You got currently:
```
In [4]: pd.tslib.Timestamp
/home/joris/miniconda3/envs/dev/bin/ipython:1: FutureWarning: pandas.tslib.Timestamp is deprecated.
Please use pandas._libs.tslib.Timestamp instead.
#!/home/joris/miniconda3/envs/dev/bin/python
Out[4]: pandas._libs.tslib.Timestamp
```
and
```
In [2]: tslib.OutOfBoundsDatetime
/home/joris/miniconda3/envs/dev/bin/ipython:1: FutureWarning: pandas.tslib.OutOfBoundsDatetime is deprecated.
Please use pandas._libs.tslib.OutOfBoundsDatetime instead.
#!/home/joris/miniconda3/envs/dev/bin/python
Out[2]: pandas._libs.tslib.OutOfBoundsDatetime
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/15897 | 2017-04-05T08:03:44Z | 2017-04-05T10:59:18Z | 2017-04-05T10:59:18Z | 2017-04-05T10:59:21Z |
API: to_datetime, required unit with numerical (#15836) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 014f251ffb90a..db1b1ba07088a 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -131,13 +131,89 @@ Other Enhancements
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
-
-
.. _whatsnew_0210.api_breaking:
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. _whatsnew_0210.api_breaking.pandas_to_datetime:
+
+Numerical values need an explicit unit in pd.to_datetime
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- :func:`to_datetime` requires an unit with numerical arg (scalar or iterable), if not provided it raises an error (:issue:`15836`)
+For example:
+
+.. ipython:: python
+
+ # Old behaviour:
+ In [1]: pd.to_datetime(42)
+ Out[1]: Timestamp('1970-01-01 00:00:00.000000042')
+
+ # New behaviour
+ In [1]: pd.to_datetime(42)
+ ---------------------------------------------------------------------------
+ ValueError Traceback (most recent call last)
+ <ipython-input-1-a8ad7fa1924c> in <module>()
+ ----> 1 pd.to_datetime(42)
+
+ /home/anthony/src/pandas/pandas/core/tools/datetimes.py in to_datetime(arg, errors, dayfirst, yearfirst, utc, box, format, exact, unit, infer_datetime_format, origin)
+ 461 elif ((not isinstance(arg, DataFrame)) and
+ 462 (check_numerical_arg() and unit is None and format is None)):
+ --> 463 raise ValueError("a unit is required in case of numerical arg")
+ 464
+ 465 # handle origin
+
+ ValueError: a unit is required in case of numerical arg
+
+ In [2]: pd.to_datetime(42, unit='ns')
+ Out[2]: Timestamp('1970-01-01 00:00:00.000000042')
+
+Furthermore, this change fixes a bug with boolean values.
+
+.. ipython:: python
+ # Old behaviour
+ In [1]: pd.to_datetime(True, unit='ms')
+ Out[1]: Timestamp('1970-01-01 00:00:00.001000')
+
+ # New behaviour
+ In [2]: pd.to_datetime(True, unit='ms')
+ ---------------------------------------------------------------------------
+ TypeError Traceback (most recent call last)
+ <ipython-input-9-d7a95ef3ecc2> in <module>()
+ ----> 1 pd.to_datetime(True, unit='ms')
+
+ /home/anthony/src/pandas/pandas/core/tools/datetimes.py in to_datetime(arg, errors, dayfirst, yearfirst, utc, box, format, exact, unit, infer_datetime_format, origin)
+ 533 result = _convert_listlike(arg, box, format)
+ 534 else:
+ --> 535 result = _convert_listlike(np.array([arg]), box, format)[0]
+ 536
+ 537 return result
+
+ /home/anthony/src/pandas/pandas/core/tools/datetimes.py in _convert_listlike(arg, box, format, name, tz)
+ 374 arg = getattr(arg, 'values', arg)
+ 375 result = tslib.array_with_unit_to_datetime(arg, unit,
+ --> 376 errors=errors)
+ 377 if box:
+ 378 if errors == 'ignore':
+
+ /home/anthony/src/pandas/pandas/_libs/tslib.pyx in pandas._libs.tslib.array_with_unit_to_datetime()
+ 2210
+ 2211
+ -> 2212 cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
+ 2213 """
+ 2214 convert the ndarray according to the unit
+
+ /home/anthony/src/pandas/pandas/_libs/tslib.pyx in pandas._libs.tslib.array_with_unit_to_datetime()
+ 2246 raise TypeError("{0} is not convertible to datetime"
+ 2247 .format(values.dtype))
+ -> 2248
+ 2249 # try a quick conversion to i8
+ 2250 # if we have nulls that are not type-compat
+
+ TypeError: bool is not convertible to datetime
+
+Now boolean values raise an error everytime.
.. _whatsnew_0210.api_breaking.deps:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5dd30072fb7aa..06c2e4962a3fd 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -31,7 +31,7 @@ cdef extern from "Python.h":
from libc.stdlib cimport free
from util cimport (is_integer_object, is_float_object, is_datetime64_object,
- is_timedelta64_object, INT64_MAX)
+ is_bool_object, is_timedelta64_object, INT64_MAX)
cimport util
# this is our datetime.pxd
@@ -2242,6 +2242,9 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
m = cast_from_unit(None, unit)
if is_raise:
+ if np.issubdtype(values.dtype, np.bool_):
+ raise TypeError("{0} is not convertible to datetime"
+ .format(values.dtype))
# try a quick conversion to i8
# if we have nulls that are not type-compat
@@ -2277,6 +2280,16 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
if _checknull_with_nat(val):
iresult[i] = NPY_NAT
+ elif is_bool_object(val):
+ if is_raise:
+ raise TypeError(
+ "{0} is not convertible to datetime"
+ .format(values.dtype)
+ )
+ elif is_ignore:
+ raise AssertionError
+ iresult[i] = NPY_NAT
+
elif is_integer_object(val) or is_float_object(val):
if val != val or val == NPY_NAT:
@@ -2320,7 +2333,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
else:
if is_raise:
- raise ValueError("non convertible value {0}"
+ raise ValueError("non convertible value {0} "
"with the unit '{1}'".format(
val,
unit))
@@ -2344,6 +2357,8 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'):
if _checknull_with_nat(val):
oresult[i] = NaT
+ elif is_bool_object(val):
+ oresult[i] = val
elif is_integer_object(val) or is_float_object(val):
if val != val or val == NPY_NAT:
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index c2cf6afc1a7b5..3e9af1c491741 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -155,7 +155,7 @@ def trans(x): # noqa
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
- result = to_datetime(result).tz_localize('utc')
+ result = to_datetime(result, unit='ns').tz_localize('utc')
result = result.tz_convert(dtype.tz)
except:
@@ -963,11 +963,13 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
dtype):
try:
if is_datetime64:
- value = to_datetime(value, errors=errors)._values
+ value = to_datetime(value, unit='ns',
+ errors=errors)._values
elif is_datetime64tz:
# input has to be UTC at this point, so just
# localize
- value = (to_datetime(value, errors=errors)
+ value = (to_datetime(value, unit='ns',
+ errors=errors)
.tz_localize('UTC')
.tz_convert(dtype.tz)
)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 5a04c550f4502..90d44223d2172 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -277,6 +277,7 @@ def __new__(cls, data=None,
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
+ unit = kwargs.pop('unit', None)
freq_infer = False
if not isinstance(freq, DateOffset):
@@ -333,7 +334,7 @@ def __new__(cls, data=None,
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data)):
data = tools.to_datetime(data, dayfirst=dayfirst,
- yearfirst=yearfirst)
+ unit=unit, yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index c0f234a36803d..51d0157167d60 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -235,7 +235,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
- unit : string, default 'ns'
+ unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
@@ -342,6 +342,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
pandas.to_timedelta : Convert argument to timedelta.
"""
from pandas.core.indexes.datetimes import DatetimeIndex
+ from pandas.core.frame import DataFrame
tz = 'utc' if utc else None
@@ -451,8 +452,15 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
except (ValueError, TypeError):
raise e
+ def check_numerical_arg():
+ return ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
+ (is_numeric_dtype(np.asarray(arg)) and np.asarray(arg).size))
+
if arg is None:
return None
+ elif ((not isinstance(arg, DataFrame)) and
+ (check_numerical_arg() and unit is None and format is None)):
+ raise ValueError("a unit is required in case of numerical arg")
# handle origin
if origin == 'julian':
@@ -479,8 +487,7 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
# arg must be a numeric
original = arg
- if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
- is_numeric_dtype(np.asarray(arg))):
+ if not check_numerical_arg():
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
@@ -605,7 +612,7 @@ def f(value):
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
- "[{excess}]".format(','.join(excess=excess)))
+ "[{}]".format(','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 50669ee357bbd..c27c06de18f82 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -25,6 +25,35 @@
compat)
+@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
+def units(request):
+ return request.param
+
+
+@pytest.fixture
+def epoch_1960():
+ # for origin as 1960-01-01
+ return Timestamp('1960-01-01')
+
+
+@pytest.fixture
+def units_from_epochs():
+ return list(range(5))
+
+
+@pytest.fixture(params=[epoch_1960(),
+ epoch_1960().to_pydatetime(),
+ epoch_1960().to_datetime64(),
+ str(epoch_1960())])
+def epochs(request):
+ return request.param
+
+
+@pytest.fixture
+def julian_dates():
+ return pd.date_range('2014-1-1', periods=10).to_julian_date().values
+
+
class TimeConversionFormats(object):
def test_to_datetime_format(self):
@@ -306,25 +335,6 @@ def test_to_datetime_tz_psycopg2(self):
dtype='datetime64[ns, UTC]')
tm.assert_index_equal(result, expected)
- def test_datetime_bool(self):
- # GH13176
- with pytest.raises(TypeError):
- to_datetime(False)
- assert to_datetime(False, errors="coerce") is NaT
- assert to_datetime(False, errors="ignore") is False
- with pytest.raises(TypeError):
- to_datetime(True)
- assert to_datetime(True, errors="coerce") is NaT
- assert to_datetime(True, errors="ignore") is True
- with pytest.raises(TypeError):
- to_datetime([False, datetime.today()])
- with pytest.raises(TypeError):
- to_datetime(['20130101', True])
- tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
- errors="coerce"),
- DatetimeIndex([to_datetime(0), NaT,
- NaT, to_datetime(0)]))
-
def test_datetime_invalid_datatype(self):
# GH13176
@@ -334,7 +344,27 @@ def test_datetime_invalid_datatype(self):
pd.to_datetime(pd.to_datetime)
-class ToDatetimeUnit(object):
+class TestToDatetimeUnit(object):
+
+ def test_datetime_bool(self, units):
+ # GH13176
+ with pytest.raises(TypeError):
+ to_datetime(False, unit=units)
+ assert to_datetime(False, unit=units, errors="coerce") is NaT
+ assert (not to_datetime(False, unit=units, errors="ignore"))
+ with pytest.raises(TypeError):
+ to_datetime(True, unit=units)
+ assert to_datetime(True, unit=units, errors="coerce") is NaT
+ assert to_datetime(True, unit=units, errors="ignore")
+ with pytest.raises(TypeError):
+ to_datetime([False, datetime.today()], unit=units)
+ with pytest.raises(TypeError):
+ to_datetime([True, '20130101'], unit=units)
+
+ tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
+ errors="coerce"),
+ DatetimeIndex([to_datetime(0, unit=units), NaT,
+ NaT, to_datetime(0, unit=units)]))
def test_unit(self):
# GH 11758
@@ -409,10 +439,10 @@ def test_unit_with_numeric(self):
arr1 = [1.434692e+18, 1.432766e+18]
arr2 = np.array(arr1).astype('int64')
for errors in ['ignore', 'raise', 'coerce']:
- result = pd.to_datetime(arr1, errors=errors)
+ result = pd.to_datetime(arr1, unit='ns', errors=errors)
tm.assert_index_equal(result, expected)
- result = pd.to_datetime(arr2, errors=errors)
+ result = pd.to_datetime(arr2, unit='ns', errors=errors)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
@@ -421,7 +451,7 @@ def test_unit_with_numeric(self):
'2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr = ['foo', 1.434692e+18, 1.432766e+18]
- result = pd.to_datetime(arr, errors='coerce')
+ result = pd.to_datetime(arr, unit='ns', errors='coerce')
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(['2015-06-19 05:33:20',
@@ -429,7 +459,7 @@ def test_unit_with_numeric(self):
'NaT',
'NaT'])
arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT']
- result = pd.to_datetime(arr, errors='coerce')
+ result = pd.to_datetime(arr, unit='ns', errors='coerce')
tm.assert_index_equal(result, expected)
def test_unit_mixed(self):
@@ -437,21 +467,21 @@ def test_unit_mixed(self):
# mixed integers/datetimes
expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT'])
arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18]
- result = pd.to_datetime(arr, errors='coerce')
+ result = pd.to_datetime(arr, unit='ns', errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
- pd.to_datetime(arr, errors='raise')
+ pd.to_datetime(arr, unit='ns', errors='raise')
expected = DatetimeIndex(['NaT',
'NaT',
'2013-01-01'])
arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')]
- result = pd.to_datetime(arr, errors='coerce')
+ result = pd.to_datetime(arr, unit='ns', errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
- pd.to_datetime(arr, errors='raise')
+ pd.to_datetime(arr, unit='ns', errors='raise')
def test_dataframe(self):
@@ -1488,35 +1518,6 @@ def test_normalize_date():
assert (result == datetime(2012, 9, 7))
-@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
-def units(request):
- return request.param
-
-
-@pytest.fixture
-def epoch_1960():
- # for origin as 1960-01-01
- return Timestamp('1960-01-01')
-
-
-@pytest.fixture
-def units_from_epochs():
- return list(range(5))
-
-
-@pytest.fixture(params=[epoch_1960(),
- epoch_1960().to_pydatetime(),
- epoch_1960().to_datetime64(),
- str(epoch_1960())])
-def epochs(request):
- return request.param
-
-
-@pytest.fixture
-def julian_dates():
- return pd.date_range('2014-1-1', periods=10).to_julian_date().values
-
-
class TestOrigin(object):
def test_to_basic(self, julian_dates):
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 0900d21b250ed..195519b0e66fa 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -98,7 +98,7 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
- values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
+ values = DatetimeIndex(np.arange(N) * 1e9, unit='ns', tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index e447a74b2b462..3fb04d94d94ec 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -349,15 +349,16 @@ def test_make_field_float(self):
def test_make_field_datetime(self):
data = [1., 2., 3.]
- kinds = [pd.Series(pd.to_datetime(data), name='values'),
- pd.to_datetime(data)]
+ kinds = [pd.Series(pd.to_datetime(data, unit='ns'), name='values'),
+ pd.to_datetime(data, unit='ns')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime'}
assert result == expected
- kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'),
- pd.to_datetime(data, utc=True)]
+ kinds = [pd.Series(pd.to_datetime(data, unit='ns', utc=True),
+ name='values'),
+ pd.to_datetime(data, unit='ns', utc=True)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime', "tz": "UTC"}
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 671d4248818e4..6a937eac3dba6 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -181,7 +181,7 @@ def _check_orient(df, orient, dtype=None, numpy=False,
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
- unser.index.values.astype('i8') * 1e6)
+ unser.index.values.astype('i8') * 1e6, unit='ns')
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
@@ -832,7 +832,7 @@ def test_timedelta(self):
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
- result['c'] = pd.to_datetime(result.c)
+ result['c'] = pd.to_datetime(result.c, unit='ns')
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index d42e37048d87f..43473108de28d 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -1730,7 +1730,7 @@ def test_nanosecond_resample_error(self):
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
- start=pd.to_datetime(start),
+ start=pd.to_datetime(start, unit='ns'),
periods=10,
freq='100n'
)
@@ -1739,7 +1739,7 @@ def test_nanosecond_resample_error(self):
result = r.agg('mean')
exp_indx = pd.date_range(
- start=pd.to_datetime(exp_start),
+ start=pd.to_datetime(exp_start, unit='ns'),
periods=10,
freq='100n'
)
| * add test_to_datetime_numerical_input
* check arg for numerical type
- [x] closes #15836
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15896 | 2017-04-04T22:07:47Z | 2017-11-12T19:49:38Z | null | 2017-11-12T19:49:38Z |
ENH: Add file buffer validation to I/O ops | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 2e1cc396287ce..cbb4d32cc5edb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1033,6 +1033,7 @@ I/O
- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
- Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`)
+- Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`)
- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 8bc7217db87f9..8ee6ded67f790 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -10,7 +10,7 @@
from pandas import compat
from pandas.formats.printing import pprint_thing
from pandas.core.common import AbstractMethodError
-from pandas.types.common import is_number
+from pandas.types.common import is_number, is_file_like
# compat
from pandas.errors import (ParserError, DtypeWarning, # noqa
@@ -197,9 +197,19 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
encoding=encoding,
compression=compression)
- # It is a pathlib.Path/py.path.local or string
+ # Convert pathlib.Path/py.path.local or string
filepath_or_buffer = _stringify_path(filepath_or_buffer)
- return _expand_user(filepath_or_buffer), None, compression
+
+ if isinstance(filepath_or_buffer, (compat.string_types,
+ compat.binary_type,
+ mmap.mmap)):
+ return _expand_user(filepath_or_buffer), None, compression
+
+ if not is_file_like(filepath_or_buffer):
+ msg = "Invalid file path or buffer object type: {_type}"
+ raise ValueError(msg.format(_type=type(filepath_or_buffer)))
+
+ return filepath_or_buffer, None, compression
def file_path_to_url(path):
@@ -416,6 +426,9 @@ def __init__(self, f):
def __getattr__(self, name):
return getattr(self.mmap, name)
+ def __iter__(self):
+ return self
+
def __next__(self):
newline = self.mmap.readline()
@@ -433,6 +446,10 @@ def __next__(self):
return newline
+if not compat.PY3:
+ MMapWrapper.next = lambda self: self.__next__()
+
+
class UTF8Recoder(BaseIterator):
"""
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 6d136869fc73f..737141f11d7d1 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -243,9 +243,8 @@ def __init__(self, io, **kwds):
# to get_filepath_or_buffer()
if _is_url(io):
io = _urlopen(io)
- # Deal with S3 urls, path objects, etc. Will convert them to
- # buffer or path string
- io, _, _ = get_filepath_or_buffer(io)
+ elif not isinstance(io, (ExcelFile, xlrd.Book)):
+ io, _, _ = get_filepath_or_buffer(io)
if engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py
index 686de4a196034..f3fd6332417a1 100644
--- a/pandas/tests/api/test_types.py
+++ b/pandas/tests/api/test_types.py
@@ -24,7 +24,7 @@ class TestTypes(Base, tm.TestCase):
'is_timedelta64_dtype', 'is_timedelta64_ns_dtype',
'is_unsigned_integer_dtype', 'is_period',
'is_period_dtype', 'is_re', 'is_re_compilable',
- 'is_dict_like', 'is_iterator',
+ 'is_dict_like', 'is_iterator', 'is_file_like',
'is_list_like', 'is_hashable',
'is_named_tuple', 'is_sequence',
'pandas_dtype']
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 7faf485b65d10..36d5f2dd5274b 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1678,3 +1678,20 @@ def test_file_handles(self):
if PY3:
self.assertFalse(m.closed)
m.close()
+
+ def test_invalid_file_buffer(self):
+ # see gh-15337
+
+ class InvalidBuffer(object):
+ pass
+
+ msg = "Invalid file path or buffer object type"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(InvalidBuffer())
+
+ if PY3:
+ from unittest import mock
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(mock.Mock())
diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py
index b41df0da45234..de3a2ca35a7f5 100644
--- a/pandas/tests/types/test_inference.py
+++ b/pandas/tests/types/test_inference.py
@@ -17,7 +17,7 @@
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical)
-from pandas.compat import u, PY2, lrange
+from pandas.compat import u, PY2, PY3, StringIO, lrange
from pandas.types import inference
from pandas.types.common import (is_timedelta64_dtype,
is_timedelta64_ns_dtype,
@@ -78,6 +78,20 @@ def test_is_dict_like():
assert not inference.is_dict_like(f)
+def test_is_file_like():
+ is_file = inference.is_file_like
+
+ data = StringIO("data")
+ assert is_file(data)
+
+ data = [1, 2, 3]
+ assert not is_file(data)
+
+ if PY3:
+ from unittest import mock
+ assert not is_file(mock.Mock())
+
+
def test_is_named_tuple():
passes = (collections.namedtuple('Test', list('abc'))(1, 2, 3), )
fails = ((1, 2, 3), 'a', Series({'pi': 3.14}))
diff --git a/pandas/types/api.py b/pandas/types/api.py
index c809cb3614a8c..e78514ce77822 100644
--- a/pandas/types/api.py
+++ b/pandas/types/api.py
@@ -52,6 +52,7 @@
is_re_compilable,
is_dict_like,
is_iterator,
+ is_file_like,
is_list_like,
is_hashable,
is_named_tuple,
diff --git a/pandas/types/inference.py b/pandas/types/inference.py
index d8e3b3ee7329b..91418677c6b19 100644
--- a/pandas/types/inference.py
+++ b/pandas/types/inference.py
@@ -4,7 +4,7 @@
import re
import numpy as np
from numbers import Number
-from pandas.compat import (string_types, text_type,
+from pandas.compat import (PY2, string_types, text_type,
string_and_binary_types)
from pandas._libs import lib
@@ -22,28 +22,211 @@
def is_number(obj):
+ """
+ Check if the object is a number.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_number : bool
+ Whether `obj` is a number or not.
+
+ Examples
+ --------
+ >>> is_number(1)
+ True
+ >>> is_number("foo")
+ False
+ """
+
return isinstance(obj, (Number, np.number))
def is_string_like(obj):
+ """
+ Check if the object is a string.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Examples
+ --------
+ >>> is_string_like("foo")
+ True
+ >>> is_string_like(1)
+ False
+
+ Returns
+ -------
+ is_str_like : bool
+ Whether `obj` is a string or not.
+ """
+
return isinstance(obj, (text_type, string_types))
-def _iterable_not_string(x):
- return (isinstance(x, collections.Iterable) and
- not isinstance(x, string_types))
+def _iterable_not_string(obj):
+ """
+ Check if the object is an iterable but not a string.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_iter_not_string : bool
+ Whether `obj` is a non-string iterable.
+
+ Examples
+ --------
+ >>> _iterable_not_string([1, 2, 3])
+ True
+ >>> _iterable_not_string("foo")
+ False
+ >>> _iterable_not_string(1)
+ False
+ """
+
+ return (isinstance(obj, collections.Iterable) and
+ not isinstance(obj, string_types))
def is_iterator(obj):
- # python 3 generators have __next__ instead of next
- return hasattr(obj, 'next') or hasattr(obj, '__next__')
+ """
+ Check if the object is an iterator.
+
+ For example, lists are considered iterators
+ but not strings or datetime objects.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_iter : bool
+ Whether `obj` is an iterator.
+
+ Examples
+ --------
+ >>> is_iterator([1, 2, 3])
+ True
+ >>> is_iterator(datetime(2017, 1, 1))
+ False
+ >>> is_iterator("foo")
+ False
+ >>> is_iterator(1)
+ False
+ """
+
+ if not hasattr(obj, '__iter__'):
+ return False
+
+ if PY2:
+ return hasattr(obj, 'next')
+ else:
+ # Python 3 generators have
+ # __next__ instead of next
+ return hasattr(obj, '__next__')
+
+
+def is_file_like(obj):
+ """
+ Check if the object is a file-like object.
+
+ For objects to be considered file-like, they must
+ be an iterator AND have the following four methods:
+
+ 1) read
+ 2) write
+ 3) seek
+ 4) tell
+
+ Note: file-like objects must be iterable, but
+ iterable objects need not be file-like.
+
+ .. versionadded:: 0.20.0
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_file_like : bool
+ Whether `obj` has file-like properties.
+
+ Examples
+ --------
+ >>> buffer(StringIO("data"))
+ >>> is_file_like(buffer)
+ True
+ >>> is_file_like([1, 2, 3])
+ False
+ """
+
+ file_attrs = ('read', 'write', 'seek', 'tell')
+
+ for attr in file_attrs:
+ if not hasattr(obj, attr):
+ return False
+
+ if not is_iterator(obj):
+ return False
+
+ return True
def is_re(obj):
+ """
+ Check if the object is a regex pattern instance.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_regex : bool
+ Whether `obj` is a regex pattern.
+
+ Examples
+ --------
+ >>> is_re(re.compile(".*"))
+ True
+ >>> is_re("foo")
+ False
+ """
+
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
+ """
+ Check if the object can be compiled into a regex pattern instance.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_regex_compilable : bool
+ Whether `obj` can be compiled as a regex pattern.
+
+ Examples
+ --------
+ >>> is_re_compilable(".*")
+ True
+ >>> is_re_compilable(1)
+ False
+ """
+
try:
re.compile(obj)
except TypeError:
@@ -52,21 +235,95 @@ def is_re_compilable(obj):
return True
-def is_list_like(arg):
- return (hasattr(arg, '__iter__') and
- not isinstance(arg, string_and_binary_types))
+def is_list_like(obj):
+ """
+ Check if the object is list-like.
+
+ Objects that are considered list-like are for example Python
+ lists, tuples, sets, NumPy arrays, and Pandas Series.
+
+ Strings and datetime objects, however, are not considered list-like.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_list_like : bool
+ Whether `obj` has list-like properties.
+
+ Examples
+ --------
+ >>> is_list_like([1, 2, 3])
+ True
+ >>> is_list_like({1, 2, 3})
+ True
+ >>> is_list_like(datetime(2017, 1, 1))
+ False
+ >>> is_list_like("foo")
+ False
+ >>> is_list_like(1)
+ False
+ """
+
+ return (hasattr(obj, '__iter__') and
+ not isinstance(obj, string_and_binary_types))
+
+def is_dict_like(obj):
+ """
+ Check if the object is dict-like.
-def is_dict_like(arg):
- return hasattr(arg, '__getitem__') and hasattr(arg, 'keys')
+ Parameters
+ ----------
+ obj : The object to check.
+ Returns
+ -------
+ is_dict_like : bool
+ Whether `obj` has dict-like properties.
-def is_named_tuple(arg):
- return isinstance(arg, tuple) and hasattr(arg, '_fields')
+ Examples
+ --------
+ >>> is_dict_like({1: 2})
+ True
+ >>> is_dict_like([1, 2, 3])
+ False
+ """
+
+ return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
+
+
+def is_named_tuple(obj):
+ """
+ Check if the object is a named tuple.
+ Parameters
+ ----------
+ obj : The object to check.
-def is_hashable(arg):
- """Return True if hash(arg) will succeed, False otherwise.
+ Returns
+ -------
+ is_named_tuple : bool
+ Whether `obj` is a named tuple.
+
+ Examples
+ --------
+ >>> Point = namedtuple("Point", ["x", "y"])
+ >>> p = Point(1, 2)
+ >>>
+ >>> is_named_tuple(p)
+ True
+ >>> is_named_tuple((1, 2))
+ False
+ """
+
+ return isinstance(obj, tuple) and hasattr(obj, '_fields')
+
+
+def is_hashable(obj):
+ """Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.Hashable but fail when they
are actually hashed with hash().
@@ -82,25 +339,48 @@ def is_hashable(arg):
>>> is_hashable(a)
False
"""
- # unfortunately, we can't use isinstance(arg, collections.Hashable), which
- # can be faster than calling hash, because numpy scalars on Python 3 fail
- # this test
+ # Unfortunately, we can't use isinstance(obj, collections.Hashable), which
+ # can be faster than calling hash. That is because numpy scalars on Python
+ # 3 fail this test.
- # reconsider this decision once this numpy bug is fixed:
+ # Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
- hash(arg)
+ hash(obj)
except TypeError:
return False
else:
return True
-def is_sequence(x):
+def is_sequence(obj):
+ """
+ Check if the object is a sequence of objects.
+ String types are not included as sequences here.
+
+ Parameters
+ ----------
+ obj : The object to check.
+
+ Returns
+ -------
+ is_sequence : bool
+ Whether `obj` is a sequence of objects.
+
+ Examples
+ --------
+ >>> l = [1, 2, 3]
+ >>>
+ >>> is_sequence(l)
+ True
+ >>> is_sequence(iter(l))
+ False
+ """
+
try:
- iter(x)
- len(x) # it has a length
- return not isinstance(x, string_and_binary_types)
+ iter(obj) # Can iterate over it.
+ len(obj) # Has a length associated with it.
+ return not isinstance(obj, string_and_binary_types)
except (TypeError, AttributeError):
return False
| 1) Allows for more uniform handling of invalid file buffers to our `read_*` functions.
2) Adds a ton of new documentation to `inference.py`
Closes #15337.
Partially addresses #15895. | https://api.github.com/repos/pandas-dev/pandas/pulls/15894 | 2017-04-04T21:33:40Z | 2017-04-05T19:18:44Z | null | 2017-04-05T19:57:50Z |
BUG: Bug in DataFrame construction with nulls and datetimes in a list like | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 355dceba1b953..2e1cc396287ce 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -997,6 +997,7 @@ Conversion
- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
+- Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`)
Indexing
^^^^^^^^
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index b0fb7048f154c..905f5278bcfd8 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -439,31 +439,86 @@ def infer_dtype(object value):
return 'mixed'
-cpdef bint is_possible_datetimelike_array(object arr):
- # determine if we have a possible datetimelike (or null-like) array
+cpdef object infer_datetimelike_array(object arr):
+ """
+ infer if we have a datetime or timedelta array
+ - date: we have *only* date and maybe strings, nulls
+ - datetime: we have *only* datetimes and maybe strings, nulls
+ - timedelta: we have *only* timedeltas and maybe strings, nulls
+ - nat: we do not have *any* date, datetimes or timedeltas, but do have
+ at least a NaT
+ - mixed: other objects (strings or actual objects)
+
+ Parameters
+ ----------
+ arr : object array
+
+ Returns
+ -------
+ string: {datetime, timedelta, date, nat, mixed}
+
+ """
+
cdef:
Py_ssize_t i, n = len(arr)
- bint seen_timedelta = 0, seen_datetime = 0
+ bint seen_timedelta = 0, seen_date = 0, seen_datetime = 0
+ bint seen_nat = 0
+ list objs = []
object v
for i in range(n):
v = arr[i]
if util.is_string_object(v):
- continue
+ objs.append(v)
+
+ if len(objs) == 3:
+ break
+
elif util._checknull(v):
- continue
- elif is_datetime(v):
- seen_datetime=1
- elif is_timedelta(v):
- seen_timedelta=1
+ # nan or None
+ pass
+ elif v is NaT:
+ seen_nat = 1
+ elif is_datetime(v) or util.is_datetime64_object(v):
+ # datetime, or np.datetime64
+ seen_datetime = 1
+ elif is_date(v):
+ seen_date = 1
+ elif is_timedelta(v) or util.is_timedelta64_object(v):
+ # timedelta, or timedelta64
+ seen_timedelta = 1
else:
- return False
- return seen_datetime or seen_timedelta
+ return 'mixed'
+
+ if seen_date and not (seen_datetime or seen_timedelta):
+ return 'date'
+ elif seen_datetime and not seen_timedelta:
+ return 'datetime'
+ elif seen_timedelta and not seen_datetime:
+ return 'timedelta'
+ elif seen_nat:
+ return 'nat'
+
+ # short-circuit by trying to
+ # actually convert these strings
+ # this is for performance as we don't need to try
+ # convert *every* string array
+ if len(objs) == 3:
+ try:
+ tslib.array_to_datetime(objs, errors='raise')
+ return 'datetime'
+ except:
+ pass
+
+ # we are *not* going to infer from strings
+ # for timedelta as too much ambiguity
+
+ return 'mixed'
cdef inline bint is_null_datetimelike(v):
# determine if we have a null for a timedelta/datetime (or integer
- # versions)x
+ # versions)
if util._checknull(v):
return True
elif v is NaT:
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 1ab292649a973..6d28d3b4dfcd5 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1366,6 +1366,15 @@ def test_constructor_with_datetimes(self):
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
+ def test_constructor_datetimes_with_nulls(self):
+ # gh-15869
+ for arr in [np.array([None, None, None, None,
+ datetime.now(), None]),
+ np.array([None, None, datetime.now(), None])]:
+ result = DataFrame(arr).get_dtype_counts()
+ expected = Series({'datetime64[ns]': 1})
+ tm.assert_series_equal(result, expected)
+
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_misc_api.py
index 42427df90401d..50fa0dca6bf04 100644
--- a/pandas/tests/frame/test_misc_api.py
+++ b/pandas/tests/frame/test_misc_api.py
@@ -12,7 +12,7 @@
from numpy.random import randn
import numpy as np
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, date_range, timedelta_range
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
@@ -328,6 +328,16 @@ def test_empty_nonzero(self):
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
+ def test_with_datetimelikes(self):
+
+ df = DataFrame({'A': date_range('20130101', periods=10),
+ 'B': timedelta_range('1 day', periods=10)})
+ t = df.T
+
+ result = t.get_dtype_counts()
+ expected = Series({'object': 10})
+ tm.assert_series_equal(result, expected)
+
def test_inplace_return_self(self):
# re #1893
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 24e4355fa9f9a..dbe2db67359f3 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -327,6 +327,14 @@ def test_constructor_datelike_coercion(self):
result = df.loc['216']
self.assertTrue(result.dtype == object)
+ def test_constructor_datetimes_with_nulls(self):
+ # gh-15869
+ for arr in [np.array([None, None, None, None,
+ datetime.now(), None]),
+ np.array([None, None, datetime.now(), None])]:
+ result = Series(arr)
+ assert result.dtype == 'M8[ns]'
+
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 985e5b9f95831..4180ad1919315 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -774,6 +774,10 @@ def maybe_infer_to_datetimelike(value, convert_dates=False):
if not v.ndim == 1:
v = v.ravel()
+ # we only care about object dtypes
+ if not is_object_dtype(v):
+ return value
+
if len(v):
def _try_datetime(v):
@@ -806,25 +810,25 @@ def _try_timedelta(v):
except:
return v
- # do a quick inference for perf
- sample = v[:min(3, len(v))]
- inferred_type = lib.infer_dtype(sample)
+ inferred_type = lib.infer_datetimelike_array(_ensure_object(v))
- if (inferred_type in ['datetime', 'datetime64'] or
- (convert_dates and inferred_type in ['date'])):
+ if inferred_type == 'date' and convert_dates:
+ value = _try_datetime(v)
+ elif inferred_type == 'datetime':
value = _try_datetime(v)
- elif inferred_type in ['timedelta', 'timedelta64']:
+ elif inferred_type == 'timedelta':
value = _try_timedelta(v)
+ elif inferred_type == 'nat':
- # It's possible to have nulls intermixed within the datetime or
- # timedelta. These will in general have an inferred_type of 'mixed',
- # so have to try both datetime and timedelta.
-
- # try timedelta first to avoid spurious datetime conversions
- # e.g. '00:00:01' is a timedelta but technically is also a datetime
- elif inferred_type in ['mixed']:
+ # if all NaT, return as datetime
+ if isnull(v).all():
+ value = _try_datetime(v)
+ else:
- if lib.is_possible_datetimelike_array(_ensure_object(v)):
+ # We have at least a NaT and a string
+ # try timedelta first to avoid spurious datetime conversions
+ # e.g. '00:00:01' is a timedelta but
+ # technically is also a datetime
value = _try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = _try_datetime(v)
| closes #15869 | https://api.github.com/repos/pandas-dev/pandas/pulls/15892 | 2017-04-04T20:02:23Z | 2017-04-04T22:15:18Z | null | 2017-04-04T22:16:40Z |
DOC fixes in contributing.rst | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 467d6456d60cd..8af7de688a2ae 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -536,10 +536,10 @@ signatures and add deprecation warnings where needed.
.. _contributing.ci:
-Testing Thru Continuous Integration
+Testing With Continuous Integration
-----------------------------------
-The *pandas* testing suite will run automatically on `Travis-CI <https://travis-ci.org/>`__,
+The *pandas* test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__,
`Appveyor <https://www.appveyor.com/>`__, and `Circle CI <https://circleci.com/>`__ continuous integration
services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
@@ -548,14 +548,14 @@ for `Travis-CI <http://about.travis-ci.org/docs/user/getting-started/>`__,
`Appveyor <https://www.appveyor.com/docs/>`__ , and `CircleCI <https://circleci.com/>`__.
A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing,
-then you will get a red 'X', where you can click thru to see the individual failed tests.
+then you will get a red 'X', where you can click through to see the individual failed tests.
This is an example of a green build.
.. image:: _static/ci.png
.. note::
- Each time you push to *your* fork, a *new* run of the tests will trigger on the CI. Appveyor will auto-cancel
+ Each time you push to *your* fork, a *new* run of the tests will be triggered on the CI. Appveyor will auto-cancel
any non-currently-running tests for that same pull-request. You can enable the auto-cancel feature for
`Travis-CI here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__ and
for `CircleCI here <https://circleci.com/changelog-legacy/#option-to-auto-cancel-redundant-builds>`__.
@@ -623,12 +623,12 @@ testing of many cases in a concise way that enables an easy-to-read syntax.
.. note::
- .. code-block:: python
+ *pandas* existing test structure is *mostly* classed based, meaning that you will typically find tests wrapped in a class, inheriting from ``tm.TestCase``.
- *pandas* existing test structure is *mostly* classed based, meaning that you will typically find tests wrapped in a class, inheriting from ``tm.TestCase``.
+ .. code-block:: python
class TestReallyCoolFeature(tm.TestCase):
- ....
+ ....
Going forward we are moving to a more *functional* style, please see below.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index bcd58ea791083..1aaa106d2c68f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -80,7 +80,7 @@
If True, performs operation inplace and returns None.""",
unique='np.ndarray', duplicated='Series',
optional_by='',
- versionadded_to_excel='\n.. versionadded:: 0.20.0\n')
+ versionadded_to_excel='\n .. versionadded:: 0.20.0\n')
def _coerce_method(converter):
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 518e0bc2064e2..401d8d9ead2b8 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -114,10 +114,10 @@ def json_normalize(data, record_path=None, meta=None,
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
- * ignore : will ignore KeyError if keys listed in meta are not
- always present
- * raise : will raise KeyError if keys listed in meta are not
- always present
+ * 'ignore' : will ignore KeyError if keys listed in meta are not
+ always present
+ * 'raise' : will raise KeyError if keys listed in meta are not
+ always present
.. versionadded:: 0.20.0
| xref #15883 | https://api.github.com/repos/pandas-dev/pandas/pulls/15887 | 2017-04-04T09:36:48Z | 2017-04-04T12:29:38Z | 2017-04-04T12:29:38Z | 2017-04-04T12:29:41Z |
BUG: groupby's first/last functions maintain Series rather than convert to numpy array (#15884) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 74fe7916523c5..493f50617aceb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1072,6 +1072,7 @@ Groupby/Resample/Rolling
- Bug in ``.rolling()`` where ``pd.Timedelta`` or ``datetime.timedelta`` was not accepted as a ``window`` argument (:issue:`15440`)
- Bug in ``Rolling.quantile`` function that caused a segmentation fault when called with a quantile value outside of the range [0, 1] (:issue:`15463`)
- Bug in ``DataFrame.resample().median()`` if duplicate column names are present (:issue:`14233`)
+- Bug in ``.groupby()`` when calling ``first()`` or ``last()`` on TZ-aware timestamps (:issue:`15884`)
Sparse
^^^^^^
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index fe764a099bb63..8091a976c07ee 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1162,11 +1162,10 @@ def first_compat(x, axis=0):
def first(x):
- x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
- return x[0]
+ return x.iloc[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
@@ -1177,11 +1176,10 @@ def last_compat(x, axis=0):
def last(x):
- x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
- return x[-1]
+ return x.iloc[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c17c98c5448be..0582cb0b79903 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -81,6 +81,15 @@ def test_select_bad_cols(self):
# will have to rethink regex if you change message!
g[['A', 'C']]
+ def test_first_last_timestamp(self):
+ # GH15884
+ df = pd.DataFrame({'time': [pd.Timestamp('2012-01-01 13:00:00+00:00')],
+ 'A': [3]})
+ result = df.groupby('A', as_index=False).first()
+ assert_frame_equal(df, result)
+ result = df.groupby('A', as_index=False).last()
+ assert_frame_equal(df, result)
+
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
| - [x] closes #15884
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15885 | 2017-04-03T23:20:44Z | 2017-04-04T00:58:08Z | null | 2017-04-04T02:09:37Z |
DOC: add section on how to use parametrize to contributing.rst | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 82f9b18c1e2eb..467d6456d60cd 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -51,14 +51,9 @@ Bug reports must:
...
```
-#. Include the full version string of *pandas* and its dependencies. In versions
- of *pandas* after 0.12 you can use a built in function::
-
- >>> from pandas.util.print_versions import show_versions
- >>> show_versions()
-
- and in *pandas* 0.13.1 onwards::
+#. Include the full version string of *pandas* and its dependencies. You can use the built in function::
+ >>> import pandas as pd
>>> pd.show_versions()
#. Explain why the current behavior is wrong/not desired and what you expect instead.
@@ -209,7 +204,7 @@ At this point you can easily do an *in-place* install, as detailed in the next s
Creating a Windows development environment
------------------------------------------
-To build on Windows, you need to have compilers installed to build the extensions. You will need to install the appropriate Visual Studio compilers, VS 2008 for Python 2.7, VS 2010 for 3.4, and VS 2015 for Python 3.5.
+To build on Windows, you need to have compilers installed to build the extensions. You will need to install the appropriate Visual Studio compilers, VS 2008 for Python 2.7, VS 2010 for 3.4, and VS 2015 for Python 3.5 and 3.6.
For Python 2.7, you can install the ``mingw`` compiler which will work equivalently to VS 2008::
@@ -219,7 +214,7 @@ or use the `Microsoft Visual Studio VC++ compiler for Python <https://www.micros
For Python 3.4, you can download and install the `Windows 7.1 SDK <https://www.microsoft.com/en-us/download/details.aspx?id=8279>`__. Read the references below as there may be various gotchas during the installation.
-For Python 3.5, you can download and install the `Visual Studio 2015 Community Edition <https://www.visualstudio.com/en-us/downloads/visual-studio-2015-downloads-vs.aspx>`__.
+For Python 3.5 and 3.6, you can download and install the `Visual Studio 2015 Community Edition <https://www.visualstudio.com/en-us/downloads/visual-studio-2015-downloads-vs.aspx>`__.
Here are some references and blogs:
@@ -544,26 +539,26 @@ signatures and add deprecation warnings where needed.
Testing Thru Continuous Integration
-----------------------------------
-The pandas testing suite will run automatically on Travis-CI, Appveyor, and Circle CI
-continuous integration services, once your pull request is submitted.
+The *pandas* testing suite will run automatically on `Travis-CI <https://travis-ci.org/>`__,
+`Appveyor <https://www.appveyor.com/>`__, and `Circle CI <https://circleci.com/>`__ continuous integration
+services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
-then Travis-CI, Appveyor and/or CircleCI need to be hooked up to your GitHub repository.
-Instructions for doing so are `here <http://about.travis-ci.org/docs/user/getting-started/>`__ for
-Travis-CI, `here <https://www.appveyor.com/docs/>`__ for Appveyor, and
-`here <https://circleci.com/>`__ for CircleCI.
+then the continuous integration services need to be hooked to your GitHub repository. Instructions are here
+for `Travis-CI <http://about.travis-ci.org/docs/user/getting-started/>`__,
+`Appveyor <https://www.appveyor.com/docs/>`__ , and `CircleCI <https://circleci.com/>`__.
-A pull-request will be considered for merging when you have an all 'green' build. See
-this example.
+A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing,
+then you will get a red 'X', where you can click thru to see the individual failed tests.
+This is an example of a green build.
.. image:: _static/ci.png
-
.. note::
- Pushing to *your* branch will cancel any non-currently-running tests for that
- same pull-request for Appveyor. For Travis CI, you can enable the auto-cancel feature
- `here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__ and
- for CircleCI `here <https://circleci.com/changelog-legacy/#option-to-auto-cancel-redundant-builds>`__.
+ Each time you push to *your* fork, a *new* run of the tests will trigger on the CI. Appveyor will auto-cancel
+ any non-currently-running tests for that same pull-request. You can enable the auto-cancel feature for
+ `Travis-CI here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__ and
+ for `CircleCI here <https://circleci.com/changelog-legacy/#option-to-auto-cancel-redundant-builds>`__.
.. _contributing.tdd:
@@ -620,8 +615,96 @@ the expected correct result::
assert_frame_equal(pivoted, expected)
+How to use ``parametrize``
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`pytest <http://doc.pytest.org/en/latest/>`__ has a nice feature `parametrize <https://docs.pytest.org/en/latest/parametrize.html>`__ to allow
+testing of many cases in a concise way that enables an easy-to-read syntax.
+
+.. note::
+
+ .. code-block:: python
+
+ *pandas* existing test structure is *mostly* classed based, meaning that you will typically find tests wrapped in a class, inheriting from ``tm.TestCase``.
+
+ class TestReallyCoolFeature(tm.TestCase):
+ ....
+
+ Going forward we are moving to a more *functional* style, please see below.
+
+
+Here is an example of a self-contained set of tests that illustrate multiple features that we like to use.
+
+- functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters
+- using ``parametrize``: allow testing of multiple cases
+- ``fixture``, code for object construction, on a per-test basis
+- using bare ``assert`` for scalars and truth-testing
+- ``tm.assert_series_equal`` (and its counter part ``tm.assert_frame_equal``), for pandas object comparisons.
+- the typical pattern of constructing an ``expected`` and comparing versus the ``result``
+
+We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``pandas/tests/`` sturcture.
+
+.. code-block:: python
+
+ import pytest
+ import numpy as np
+ import pandas as pd
+ from pandas.util import testing as tm
+
+ @pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64'])
+ def test_dtypes(dtype):
+ assert str(np.dtype(dtype)) == dtype
+
+ @pytest.fixture
+ def series():
+ return pd.Series([1, 2, 3])
+
+ @pytest.fixture(params=['int8', 'int16', 'int32', 'int64'])
+ def dtype(request):
+ return request.param
+
+ def test_series(series, dtype):
+ result = series.astype(dtype)
+ assert result.dtype == dtype
+
+ expected = pd.Series([1, 2, 3], dtype=dtype)
+ tm.assert_series_equal(result, expected)
+
+
+A test run of this yields
+
+.. code-block:: shell
+
+ ((pandas) bash-3.2$ pytest test_cool_feature.py -v
+ =========================== test session starts ===========================
+ platform darwin -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0
+ collected 8 items
+
+ tester.py::test_dtypes[int8] PASSED
+ tester.py::test_dtypes[int16] PASSED
+ tester.py::test_dtypes[int32] PASSED
+ tester.py::test_dtypes[int64] PASSED
+ tester.py::test_series[int8] PASSED
+ tester.py::test_series[int16] PASSED
+ tester.py::test_series[int32] PASSED
+ tester.py::test_series[int64] PASSED
+
+Tests that we have ``parametrized`` are now accessible via the test name, for example we could run these with ``-k int8`` to sub-select *only* those tests which match ``int8``.
+
+
+.. code-block:: shell
+
+ ((pandas) bash-3.2$ pytest test_cool_feature.py -v -k int8
+ =========================== test session starts ===========================
+ platform darwin -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0
+ collected 8 items
+
+ test_cool_feature.py::test_dtypes[int8] PASSED
+ test_cool_feature.py::test_series[int8] PASSED
+
+
Running the test suite
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
The tests can then be run directly inside your Git clone (without having to
install *pandas*) by typing::
@@ -675,7 +758,8 @@ Furthermore one can run
with an imported pandas to run tests similarly.
Running the performance test suite
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------
+
Performance matters and it is worth considering whether your code has introduced
performance regressions. *pandas* is in the process of migrating to
`asv benchmarks <https://github.com/spacetelescope/asv>`__
| closes #15608 | https://api.github.com/repos/pandas-dev/pandas/pulls/15883 | 2017-04-03T19:48:51Z | 2017-04-03T19:49:00Z | 2017-04-03T19:49:00Z | 2017-04-04T09:07:31Z |
BUG: Patch handling no NA values in TextFileReader | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 63aea96ef3369..fd7744158829f 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -995,6 +995,7 @@ I/O
- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`)
- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
+- Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`)
- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 30b88de91ef76..0080ded1ac03d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2890,7 +2890,7 @@ def _clean_na_values(na_values, keep_default_na=True):
if keep_default_na:
na_values = _NA_VALUES
else:
- na_values = []
+ na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index 2cbd7cdedf2ab..cf29dbdfef49d 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -11,7 +11,7 @@
import pandas.io.parsers as parsers
import pandas.util.testing as tm
-from pandas import DataFrame, MultiIndex
+from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
@@ -303,3 +303,12 @@ def test_na_values_uint64(self):
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
+
+ def test_empty_na_values_no_default_with_index(self):
+ # see gh-15835
+ data = "a,1\nb,2"
+
+ expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
+ out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
+
+ tm.assert_frame_equal(out, expected)
| When cleaning `na_values` during initialization of `TextFileReader`, we return a `list` whenever we specify that `na_values` should be empty. However, the rest of the code expects a `set`.
Closes #15835.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15881 | 2017-04-03T17:56:15Z | 2017-04-03T20:47:30Z | null | 2017-04-03T20:49:07Z |
DOC: update contributing.rst for ci | diff --git a/doc/source/_static/ci.png b/doc/source/_static/ci.png
new file mode 100644
index 0000000000000..82985ff8c204a
Binary files /dev/null and b/doc/source/_static/ci.png differ
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 83cc1777b35f6..82f9b18c1e2eb 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -113,13 +113,6 @@ want to clone your fork to your machine::
This creates the directory `pandas-yourname` and connects your repository to
the upstream (main project) *pandas* repository.
-The testing suite will run automatically on Travis-CI and Appveyor once your
-pull request is submitted. However, if you wish to run the test suite on a
-branch prior to submitting the pull request, then Travis-CI and/or AppVeyor
-need to be hooked up to your GitHub repository. Instructions for doing so
-are `here <http://about.travis-ci.org/docs/user/getting-started/>`__ for
-Travis-CI and `here <https://www.appveyor.com/docs/>`__ for AppVeyor.
-
Creating a branch
-----------------
@@ -432,7 +425,8 @@ Building master branch documentation
When pull requests are merged into the *pandas* ``master`` branch, the main parts of
the documentation are also built by Travis-CI. These docs are then hosted `here
-<http://pandas-docs.github.io/pandas-docs-travis>`__.
+<http://pandas-docs.github.io/pandas-docs-travis>`__, see also
+the :ref:`Continuous Integration <contributing.ci>` section.
Contributing to the code base
=============================
@@ -444,8 +438,9 @@ Code standards
--------------
Writing good code is not just about what you write. It is also about *how* you
-write it. During testing on Travis-CI, several tools will be run to check your
-code for stylistic errors. Generating any warnings will cause the test to fail.
+write it. During :ref:`Continuous Integration <contributing.ci>` testing, several
+tools will be run to check your code for stylistic errors.
+Generating any warnings will cause the test to fail.
Thus, good style is a requirement for submitting code to *pandas*.
In addition, because a lot of people use our library, it is important that we
@@ -467,7 +462,8 @@ Here are *some* of the more common ``cpplint`` issues:
- we restrict line-length to 80 characters to promote readability
- every header file must include a header guard to avoid name collisions if re-included
-Travis-CI will run the `cpplint <https://pypi.python.org/pypi/cpplint>`_ tool
+:ref:`Continuous Integration <contributing.ci>`. will run the
+`cpplint <https://pypi.python.org/pypi/cpplint>`_ tool
and report any stylistic errors in your code. Therefore, it is helpful before
submitting code to run the check yourself::
@@ -514,7 +510,8 @@ the more common ``PEP8`` issues:
- we restrict line-length to 79 characters to promote readability
- passing arguments should have spaces after commas, e.g. ``foo(arg1, arg2, kw1='bar')``
-Travis-CI will run the `flake8 <http://pypi.python.org/pypi/flake8>`_ tool
+:ref:`Continuous Integration <contributing.ci>` will run
+the `flake8 <http://pypi.python.org/pypi/flake8>`_ tool
and report any stylistic errors in your code. Therefore, it is helpful before
submitting code to run the check yourself on the diff::
@@ -542,6 +539,35 @@ existing code, so don't break it if at all possible. If you think breakage is r
clearly state why as part of the pull request. Also, be careful when changing method
signatures and add deprecation warnings where needed.
+.. _contributing.ci:
+
+Testing Thru Continuous Integration
+-----------------------------------
+
+The pandas testing suite will run automatically on Travis-CI, Appveyor, and Circle CI
+continuous integration services, once your pull request is submitted.
+However, if you wish to run the test suite on a branch prior to submitting the pull request,
+then Travis-CI, Appveyor and/or CircleCI need to be hooked up to your GitHub repository.
+Instructions for doing so are `here <http://about.travis-ci.org/docs/user/getting-started/>`__ for
+Travis-CI, `here <https://www.appveyor.com/docs/>`__ for Appveyor, and
+`here <https://circleci.com/>`__ for CircleCI.
+
+A pull-request will be considered for merging when you have an all 'green' build. See
+this example.
+
+.. image:: _static/ci.png
+
+
+.. note::
+
+ Pushing to *your* branch will cancel any non-currently-running tests for that
+ same pull-request for Appveyor. For Travis CI, you can enable the auto-cancel feature
+ `here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__ and
+ for CircleCI `here <https://circleci.com/changelog-legacy/#option-to-auto-cancel-redundant-builds>`__.
+
+.. _contributing.tdd:
+
+
Test-driven development/code writing
------------------------------------
@@ -875,12 +901,8 @@ updated. Pushing them to GitHub again is done by::
git push -f origin shiny-new-feature
This will automatically update your pull request with the latest code and restart the
-Travis-CI tests.
+:ref:`Continuous Integration <contributing.ci>` tests.
-If your pull request is related to the ``pandas.io.gbq`` module, please see
-the section on :ref:`Running Google BigQuery Integration Tests
-<contributing.gbq_integration_tests>` to configure a Google BigQuery service
-account for your pull request on Travis-CI.
Delete your merged branch (optional)
------------------------------------
| xref #15878 | https://api.github.com/repos/pandas-dev/pandas/pulls/15880 | 2017-04-03T17:41:10Z | 2017-04-03T18:26:36Z | 2017-04-03T18:26:36Z | 2017-04-03T18:52:43Z |
DOC: remove gbq_integration instructions from contributing.rst | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 5e551a7fd5349..83cc1777b35f6 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -616,23 +616,23 @@ Or with one of the following constructs::
pytest pandas/tests/[test-module].py::[TestClass]
pytest pandas/tests/[test-module].py::[TestClass]::[test_method]
-Using `pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_, one can
+Using `pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_, one can
speed up local testing on multicore machines. To use this feature, you will
need to install `pytest-xdist` via::
pip install pytest-xdist
-
-Two scripts are provided to assist with this. These scripts distribute
+
+Two scripts are provided to assist with this. These scripts distribute
testing across 4 threads.
On Unix variants, one can type::
test_fast.sh
-
+
On Windows, one can type::
test_fast.bat
-
+
This can significantly reduce the time it takes to locally run tests before
submitting a pull request.
@@ -657,12 +657,6 @@ to enable easy monitoring of the performance of critical *pandas* operations.
These benchmarks are all found in the ``pandas/asv_bench`` directory. asv
supports both python2 and python3.
-.. note::
-
- The asv benchmark suite was translated from the previous framework, vbench,
- so many stylistic issues are likely a result of automated transformation of the
- code.
-
To use all features of asv, you will need either ``conda`` or
``virtualenv``. For more details please check the `asv installation
webpage <https://asv.readthedocs.io/en/latest/installing.html>`_.
@@ -722,73 +716,6 @@ This will display stderr from the benchmarks, and use your local
Information on how to write a benchmark and how to use asv can be found in the
`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_.
-.. _contributing.gbq_integration_tests:
-
-Running Google BigQuery Integration Tests
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You will need to create a Google BigQuery private key in JSON format in
-order to run Google BigQuery integration tests on your local machine and
-on Travis-CI. The first step is to create a `service account
-<https://console.developers.google.com/iam-admin/serviceaccounts/>`__.
-
-Integration tests for ``pandas.io.gbq`` are skipped in pull requests because
-the credentials that are required for running Google BigQuery integration
-tests are `encrypted <https://docs.travis-ci.com/user/encrypting-files/>`__
-on Travis-CI and are only accessible from the pandas-dev/pandas repository. The
-credentials won't be available on forks of pandas. Here are the steps to run
-gbq integration tests on a forked repository:
-
-#. Go to `Travis CI <https://travis-ci.org/>`__ and sign in with your GitHub
- account.
-#. Click on the ``+`` icon next to the ``My Repositories`` list and enable
- Travis builds for your fork.
-#. Click on the gear icon to edit your travis build, and add two environment
- variables:
-
- - ``GBQ_PROJECT_ID`` with the value being the ID of your BigQuery project.
-
- - ``SERVICE_ACCOUNT_KEY`` with the value being the contents of the JSON key
- that you downloaded for your service account. Use single quotes around
- your JSON key to ensure that it is treated as a string.
-
- For both environment variables, keep the "Display value in build log" option
- DISABLED. These variables contain sensitive data and you do not want their
- contents being exposed in build logs.
-#. Your branch should be tested automatically once it is pushed. You can check
- the status by visiting your Travis branches page which exists at the
- following location: https://travis-ci.org/your-user-name/pandas/branches .
- Click on a build job for your branch. Expand the following line in the
- build log: ``ci/print_skipped.py /tmp/pytest.xml`` . Search for the
- term ``test_gbq`` and confirm that gbq integration tests are not skipped.
-
-Running the vbench performance test suite (phasing out)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Historically, *pandas* used `vbench library <https://github.com/pydata/vbench>`_
-to enable easy monitoring of the performance of critical *pandas* operations.
-These benchmarks are all found in the ``pandas/vb_suite`` directory. vbench
-currently only works on python2.
-
-To install vbench::
-
- pip install git+https://github.com/pydata/vbench
-
-Vbench also requires ``sqlalchemy``, ``gitpython``, and ``psutil``, which can all be installed
-using pip. If you need to run a benchmark, change your directory to the *pandas* root and run::
-
- ./test_perf.sh -b master -t HEAD
-
-This will check out the master revision and run the suite on both master and
-your commit. Running the full test suite can take up to one hour and use up
-to 3GB of RAM. Usually it is sufficient to paste a subset of the results into the Pull Request to show that the committed changes do not cause unexpected
-performance regressions.
-
-You can run specific benchmarks using the ``-r`` flag, which takes a regular expression.
-
-See the `performance testing wiki <https://github.com/pandas-dev/pandas/wiki/Performance-Testing>`_ for information
-on how to write a benchmark.
-
Documenting your code
---------------------
| DOC: remove vbench instructions from contributing.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/15879 | 2017-04-03T17:25:49Z | 2017-04-03T17:25:55Z | 2017-04-03T17:25:55Z | 2019-12-26T03:32:34Z |
DOC: Fix a typo in dsintro.rst | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index cc69367017aed..4fcb63c18757a 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -153,7 +153,7 @@ Vectorized operations and label alignment with Series
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When doing data analysis, as with raw NumPy arrays looping through Series
-value-by-value is usually not necessary. Series can be also be passed into most
+value-by-value is usually not necessary. Series can also be passed into most
NumPy methods expecting an ndarray.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15877 | 2017-04-03T15:50:16Z | 2017-04-03T16:06:46Z | 2017-04-03T16:06:46Z | 2017-04-03T16:21:50Z | |
Revert "CI: add jdcal to 3.6 build as openpyxl >= 2.4.5 is broken" | diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run
index 8f81c4620558e..41c9680ce1b7e 100644
--- a/ci/requirements-3.6.run
+++ b/ci/requirements-3.6.run
@@ -2,10 +2,7 @@ python-dateutil
pytz
numpy
scipy
-# openpyxl >= 2.4.5 should be dependent on jdcal
-# but is not for some reason
openpyxl
-jdcal
xlsxwriter
xlrd
xlwt
| This reverts commit d1e1ba08ef259724ba71e0953c52e8e4ad81bd17.
closes #15861
| https://api.github.com/repos/pandas-dev/pandas/pulls/15875 | 2017-04-03T13:01:53Z | 2017-04-03T13:02:00Z | 2017-04-03T13:01:59Z | 2017-04-03T13:02:00Z |
VIS: Allow 'C0'-like plotting for plotting colors #15516 | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index ad190671cbbdc..a872bcee3ed12 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -342,6 +342,7 @@ Other Enhancements
- The ``skiprows`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`10882`)
- The ``nrows`` and ``chunksize`` arguments in ``pd.read_csv()`` are supported if both are passed (:issue:`6774`, :issue:`15755`)
- ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`)
+- ``pd.DataFrame.plot`` can pass `matplotlib 2.0 default color cycle as a single string as color parameter <http://matplotlib.org/2.0.0/users/colors.html#cn-color-selection>`__. (:issue:`15516`)
- ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`)
- ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`)
- ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 48af366f24ea4..474230369d1e1 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -141,6 +141,22 @@ def test_plot(self):
result = ax.get_axes() # deprecated
self.assertIs(result, axes[0])
+ # GH 15516
+ def test_mpl2_color_cycle_str(self):
+ # test CN mpl 2.0 color cycle
+ if self.mpl_ge_2_0_0:
+ colors = ['C' + str(x) for x in range(10)]
+ df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
+ for c in colors:
+ _check_plot_works(df.plot, color=c)
+ else:
+ pytest.skip("not supported in matplotlib < 2.0.0")
+
+ def test_color_empty_string(self):
+ df = DataFrame(randn(10, 2))
+ with tm.assertRaises(ValueError):
+ df.plot(color='')
+
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index f70a2b0b22140..99e56ca80cf97 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -225,10 +225,18 @@ def _maybe_valid_colors(colors):
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
- msg = ("'{0}' can be parsed as both single color and "
- "color cycle. Specify each color using a list "
- "like ['{0}'] or {1}")
- raise ValueError(msg.format(colors, list(colors)))
+ # Special case for single str 'CN' match and convert to hex
+ # for supporting matplotlib < 2.0.0
+ if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0():
+ hex_color = [c['color']
+ for c in list(plt.rcParams['axes.prop_cycle'])]
+ colors = [hex_color[int(colors[1])]]
+ else:
+ # this may no longer be required
+ msg = ("'{0}' can be parsed as both single color and "
+ "color cycle. Specify each color using a list "
+ "like ['{0}'] or {1}")
+ raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
@@ -237,7 +245,10 @@ def _maybe_valid_colors(colors):
pass
if len(colors) != num_colors:
- multiple = num_colors // len(colors) - 1
+ try:
+ multiple = num_colors // len(colors) - 1
+ except ZeroDivisionError:
+ raise ValueError("Invalid color argument: ''")
mod = num_colors % len(colors)
colors += multiple * colors
| - [x] closes #15516
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
Defined special case for 'CN' cases where N is 0 to 9. There may be custom color cycles that can also be parsed as a single color so the ValueError message is left intact (unless this is false).
```
running: pytest --skip-slow --skip-network pandas
============================= test session starts ==============================
platform linux2 -- Python 2.7.13, pytest-3.0.7, py-1.4.33, pluggy-0.4.0
rootdir: /home/sfoo/pandas, inifile: setup.cfg
collected 11894 items / 3 skipped
10234 passed, 1586 skipped, 77 xfailed, 2084 pytest-warnings in 486.80 seconds
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/15873 | 2017-04-03T09:02:06Z | 2017-04-12T20:49:50Z | 2017-04-12T20:49:49Z | 2017-04-13T20:11:35Z |
CLN: Remove "flake8: noqa" from even more files | diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 279a82fea1cc2..e268dc1d89ea0 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -1,13 +1,13 @@
-""" support pre 0.12 series pickle compatibility """
-
-# flake8: noqa
+"""
+Support pre-0.12 series pickle compatibility.
+"""
import sys
-import pandas
+import pandas # noqa
import copy
import pickle as pkl
from pandas import compat, Index
-from pandas.compat import u, string_types
+from pandas.compat import u, string_types # noqa
def load_reduce(self):
@@ -16,17 +16,19 @@ def load_reduce(self):
func = stack[-1]
if type(args[0]) is type:
- n = args[0].__name__
+ n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except Exception as e:
- # if we have a deprecated function
- # try to replace and try again
+ # If we have a deprecated function,
+ # try to replace and try again.
+
+ msg = '_reconstruct: First argument must be a sub-type of ndarray'
- if '_reconstruct: First argument must be a sub-type of ndarray' in str(e):
+ if msg in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
@@ -34,7 +36,7 @@ def load_reduce(self):
except:
pass
- # try to reencode the arguments
+ # try to re-encode the arguments
if getattr(self, 'encoding', None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
@@ -50,26 +52,31 @@ def load_reduce(self):
print(func, args)
raise
- stack[-1] = value
-
-# if classes are moved, provide compat here
+# If classes are moved, provide compat here.
_class_locations_map = {
# 15477
- ('pandas.core.base', 'FrozenNDArray'): ('pandas.indexes.frozen', 'FrozenNDArray'),
- ('pandas.core.base', 'FrozenList'): ('pandas.indexes.frozen', 'FrozenList'),
+ ('pandas.core.base', 'FrozenNDArray'):
+ ('pandas.indexes.frozen', 'FrozenNDArray'),
+ ('pandas.core.base', 'FrozenList'):
+ ('pandas.indexes.frozen', 'FrozenList'),
# 10890
- ('pandas.core.series', 'TimeSeries'): ('pandas.core.series', 'Series'),
- ('pandas.sparse.series', 'SparseTimeSeries'): ('pandas.sparse.series', 'SparseSeries'),
+ ('pandas.core.series', 'TimeSeries'):
+ ('pandas.core.series', 'Series'),
+ ('pandas.sparse.series', 'SparseTimeSeries'):
+ ('pandas.sparse.series', 'SparseSeries'),
# 12588, extensions moving
- ('pandas._sparse', 'BlockIndex'): ('pandas.sparse.libsparse', 'BlockIndex'),
- ('pandas.tslib', 'Timestamp'): ('pandas._libs.tslib', 'Timestamp'),
- ('pandas.tslib', '__nat_unpickle'): ('pandas._libs.tslib', '__nat_unpickle'),
+ ('pandas._sparse', 'BlockIndex'):
+ ('pandas.sparse.libsparse', 'BlockIndex'),
+ ('pandas.tslib', 'Timestamp'):
+ ('pandas._libs.tslib', 'Timestamp'),
+ ('pandas.tslib', '__nat_unpickle'):
+ ('pandas._libs.tslib', '__nat_unpickle'),
('pandas._period', 'Period'): ('pandas._libs.period', 'Period')
- }
+}
# our Unpickler sub-class to override methods and some dispatcher
@@ -112,6 +119,8 @@ def load_newobj(self):
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
+
+
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
@@ -126,6 +135,8 @@ def load_newobj_ex(self):
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
+
+
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except:
diff --git a/pandas/computation/engines.py b/pandas/computation/engines.py
index a3de78c2f2089..aebc5bb02d59d 100644
--- a/pandas/computation/engines.py
+++ b/pandas/computation/engines.py
@@ -1,13 +1,11 @@
-"""Engine classes for :func:`~pandas.eval`
"""
-
-# flake8: noqa
+Engine classes for :func:`~pandas.eval`
+"""
import abc
from pandas import compat
-from pandas.compat import DeepChainMap, map
-import pandas.core.common as com
+from pandas.compat import map
import pandas.formats.printing as printing
from pandas.computation.align import _align, _reconstruct_object
from pandas.computation.ops import (UndefinedVariableError,
diff --git a/pandas/util/clipboard/__init__.py b/pandas/util/clipboard/__init__.py
index 9e2b2faf858db..4066a3be5e850 100644
--- a/pandas/util/clipboard/__init__.py
+++ b/pandas/util/clipboard/__init__.py
@@ -25,8 +25,6 @@
"""
__version__ = '1.5.27'
-# flake8: noqa
-
import platform
import os
import subprocess
@@ -62,14 +60,16 @@ def determine_clipboard():
if HAS_DISPLAY:
# Determine which command/module is installed, if any.
try:
- import gtk # check if gtk is installed
+ # Check if gtk is installed
+ import gtk # noqa
except ImportError:
pass
else:
return init_gtk_clipboard()
try:
- import PyQt4 # check if PyQt4 is installed
+ # Check if PyQt4 is installed
+ import PyQt4 # noqa
except ImportError:
pass
else:
diff --git a/pandas/util/clipboard/clipboards.py b/pandas/util/clipboard/clipboards.py
index bd5528334168f..e32380a383374 100644
--- a/pandas/util/clipboard/clipboards.py
+++ b/pandas/util/clipboard/clipboards.py
@@ -1,5 +1,3 @@
-# flake8: noqa
-
import sys
import subprocess
from .exceptions import PyperclipException
@@ -8,7 +6,7 @@
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
-text_type = unicode if PY2 else str
+text_type = unicode if PY2 else str # noqa
def init_osx_clipboard():
diff --git a/pandas/util/clipboard/windows.py b/pandas/util/clipboard/windows.py
index 5c9be9ddaf508..5fc23f7102f41 100644
--- a/pandas/util/clipboard/windows.py
+++ b/pandas/util/clipboard/windows.py
@@ -1,4 +1,3 @@
-# flake8: noqa
"""
This module implements clipboard handling on Windows using ctypes.
"""
| Another round of house-cleaning that builds off #15867. Likely to be the last one for now. | https://api.github.com/repos/pandas-dev/pandas/pulls/15872 | 2017-04-03T08:03:05Z | 2017-04-03T16:35:46Z | null | 2017-04-03T16:36:48Z |
Support more sas7bdat date/datetime formats | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 85685ed7b430d..4f55c6388c728 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -127,6 +127,7 @@ Other Enhancements
- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`.
- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`)
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
+- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 20b0cf85e95b7..2b3a91e2062b1 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -44,8 +44,8 @@ class SAS7BDATReader(BaseIterator):
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
- Attempt to convert dates to Pandas datetime values. Note all
- SAS date formats are supported.
+ Attempt to convert dates to Pandas datetime values. Note that
+ some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
@@ -655,9 +655,15 @@ def _chunk_to_dataframe(self):
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
- if self.convert_dates and (self.column_formats[j] == "MMDDYY"):
- epoch = pd.datetime(1960, 1, 1)
- rslt[name] = epoch + pd.to_timedelta(rslt[name], unit='d')
+ if self.convert_dates:
+ unit = None
+ if self.column_formats[j] in const.sas_date_formats:
+ unit = 'd'
+ elif self.column_formats[j] in const.sas_datetime_formats:
+ unit = 's'
+ if unit:
+ rslt[name] = pd.to_datetime(rslt[name], unit=unit,
+ origin="1960-01-01")
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py
index 65ae1e9102cb2..c4b3588164305 100644
--- a/pandas/io/sas/sas_constants.py
+++ b/pandas/io/sas/sas_constants.py
@@ -145,3 +145,27 @@ class index:
b"\xFF\xFF\xFF\xFE": index.columnListIndex,
b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": index.columnListIndex,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": index.columnListIndex}
+
+
+# List of frequently used SAS date and datetime formats
+# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm
+# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java
+sas_date_formats = ("DATE", "DAY", "DDMMYY", "DOWNAME", "JULDAY", "JULIAN",
+ "MMDDYY", "MMYY", "MMYYC", "MMYYD", "MMYYP", "MMYYS",
+ "MMYYN", "MONNAME", "MONTH", "MONYY", "QTR", "QTRR",
+ "NENGO", "WEEKDATE", "WEEKDATX", "WEEKDAY", "WEEKV",
+ "WORDDATE", "WORDDATX", "YEAR", "YYMM", "YYMMC", "YYMMD",
+ "YYMMP", "YYMMS", "YYMMN", "YYMON", "YYMMDD", "YYQ",
+ "YYQC", "YYQD", "YYQP", "YYQS", "YYQN", "YYQR", "YYQRC",
+ "YYQRD", "YYQRP", "YYQRS", "YYQRN",
+ "YYMMDDP", "YYMMDDC", "E8601DA", "YYMMDDN", "MMDDYYC",
+ "MMDDYYS", "MMDDYYD", "YYMMDDS", "B8601DA", "DDMMYYN",
+ "YYMMDDD", "DDMMYYB", "DDMMYYP", "MMDDYYP", "YYMMDDB",
+ "MMDDYYN", "DDMMYYC", "DDMMYYD", "DDMMYYS",
+ "MINGUO")
+
+sas_datetime_formats = ("DATETIME", "DTWKDATX",
+ "B8601DN", "B8601DT", "B8601DX", "B8601DZ", "B8601LX",
+ "E8601DN", "E8601DT", "E8601DX", "E8601DZ", "E8601LX",
+ "DATEAMPM", "DTDATE", "DTMONYY", "DTMONYY", "DTWKDATX",
+ "DTYEAR", "TOD", "MDYAMPM")
diff --git a/pandas/tests/io/sas/data/datetime.csv b/pandas/tests/io/sas/data/datetime.csv
new file mode 100644
index 0000000000000..6126f6d04eaf0
--- /dev/null
+++ b/pandas/tests/io/sas/data/datetime.csv
@@ -0,0 +1,5 @@
+Date1,Date2,DateTime,DateTimeHi,Taiw
+1677-09-22,1677-09-22,1677-09-21 00:12:44,1677-09-21 00:12:43.145226,1912-01-01
+1960-01-01,1960-01-01,1960-01-01 00:00:00,1960-01-01 00:00:00.000000,1960-01-01
+2016-02-29,2016-02-29,2016-02-29 23:59:59,2016-02-29 23:59:59.123456,2016-02-29
+2262-04-11,2262-04-11,2262-04-11 23:47:16,2262-04-11 23:47:16.854774,2262-04-11
diff --git a/pandas/tests/io/sas/data/datetime.sas7bdat b/pandas/tests/io/sas/data/datetime.sas7bdat
new file mode 100644
index 0000000000000..6469dbf29f8ee
Binary files /dev/null and b/pandas/tests/io/sas/data/datetime.sas7bdat differ
diff --git a/pandas/tests/io/sas/data/productsales.csv b/pandas/tests/io/sas/data/productsales.csv
index fea9b68912297..1f6a4424e1a97 100644
--- a/pandas/tests/io/sas/data/productsales.csv
+++ b/pandas/tests/io/sas/data/productsales.csv
@@ -1,1441 +1,1441 @@
ACTUAL,PREDICT,COUNTRY,REGION,DIVISION,PRODTYPE,PRODUCT,QUARTER,YEAR,MONTH
-925,850,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-999,297,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-608,846,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-642,533,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-656,646,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-948,486,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-612,717,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-114,564,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-685,230,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-657,494,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-608,903,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-353,266,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-107,190,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-354,139,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-101,217,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-553,560,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-877,148,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-431,762,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-511,457,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-157,532,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-520,629,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-114,491,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-277,0,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-561,979,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-220,585,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,12054
-444,267,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,12085
-178,487,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,12113
-756,764,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,12144
-329,312,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,12174
-910,531,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,12205
-530,536,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,12235
-101,773,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,12266
-515,143,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,12297
-730,126,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,12327
-993,862,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,12358
-954,754,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,12388
-267,410,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,12419
-347,701,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,12450
-991,204,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,12478
-923,509,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,12509
-437,378,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,12539
-737,507,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,12570
-104,49,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,12600
-840,876,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,12631
-704,66,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,12662
-889,819,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,12692
-107,351,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,12723
-571,201,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,12753
-688,209,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,12054
-544,51,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,12085
-954,135,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,12113
-445,47,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,12144
-829,379,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,12174
-464,758,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,12205
-968,475,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,12235
-842,343,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,12266
-721,507,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,12297
-966,269,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,12327
-332,699,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,12358
-328,824,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,12388
-355,497,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,12419
-506,44,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,12450
-585,522,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,12478
-634,378,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,12509
-662,689,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,12539
-783,90,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,12570
-786,720,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,12600
-710,343,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,12631
-950,457,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,12662
-274,947,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,12692
-406,834,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,12723
-515,71,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,12753
-35,282,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-995,538,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-670,679,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-406,601,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-825,577,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-467,908,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-709,819,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-522,687,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-688,157,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-956,111,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-129,31,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-687,790,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-877,795,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-845,379,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-425,114,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-899,475,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-987,747,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-641,372,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-448,415,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-341,955,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-137,356,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-235,316,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-482,351,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-678,164,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-240,386,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,12054
-605,113,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,12085
-274,68,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,12113
-422,885,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,12144
-763,575,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,12174
-561,743,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,12205
-339,816,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,12235
-877,203,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,12266
-192,581,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,12297
-604,815,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,12327
-55,333,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,12358
-87,40,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,12388
-942,672,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,12419
-912,23,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,12450
-768,948,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,12478
-951,291,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,12509
-768,839,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,12539
-978,864,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,12570
-20,337,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,12600
-298,95,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,12631
-193,535,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,12662
-336,191,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,12692
-617,412,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,12723
-709,711,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,12753
-5,425,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-164,215,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-422,948,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-424,544,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-854,764,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-168,446,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-8,957,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-748,967,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-682,11,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-300,110,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-672,263,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-894,215,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-944,965,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-403,423,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-596,753,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-481,770,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-503,263,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-126,79,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-721,441,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-271,858,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-721,667,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-157,193,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-991,394,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-499,680,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-284,414,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,12054
-705,770,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,12085
-737,679,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,12113
-745,7,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,12144
-633,713,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,12174
-983,851,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,12205
-591,944,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,12235
-42,130,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,12266
-771,485,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,12297
-465,23,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,12327
-296,193,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,12358
-890,7,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,12388
-312,919,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,12419
-777,768,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,12450
-364,854,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,12478
-601,411,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,12509
-823,736,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,12539
-847,10,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,12570
-490,311,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,12600
-387,348,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,12631
-688,458,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,12662
-650,195,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,12692
-447,658,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,12723
-91,704,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,12753
-197,807,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,12054
-51,861,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,12085
-570,873,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,12113
-423,933,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,12144
-524,355,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,12174
-416,794,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,12205
-789,645,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,12235
-551,700,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,12266
-400,831,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,12297
-361,800,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,12327
-189,830,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,12358
-554,828,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,12388
-585,12,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,12419
-281,501,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,12450
-629,914,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,12478
-43,685,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,12509
-533,755,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,12539
-882,708,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,12570
-790,595,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,12600
-600,32,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,12631
-148,49,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,12662
-237,727,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,12692
-488,239,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,12723
-457,273,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,12753
-401,986,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-181,544,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-995,182,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-120,197,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-119,435,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-319,974,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-333,524,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-923,688,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-634,750,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-493,155,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-461,860,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-304,102,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-641,425,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-992,224,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-202,408,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-770,524,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-202,816,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-14,515,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-134,793,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-977,460,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-174,732,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-429,435,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-514,38,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-784,616,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-973,225,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,12054
-511,402,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,12085
-30,697,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,12113
-895,567,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,12144
-557,231,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,12174
-282,372,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,12205
-909,15,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,12235
-276,866,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,12266
-234,452,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,12297
-479,663,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,12327
-782,982,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,12358
-755,813,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,12388
-689,523,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,12419
-496,871,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,12450
-24,511,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,12478
-379,819,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,12509
-441,525,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,12539
-49,13,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,12570
-243,694,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,12600
-295,782,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,12631
-395,839,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,12662
-929,461,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,12692
-997,303,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,12723
-889,421,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,12753
-72,421,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-926,433,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-850,394,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-826,338,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-651,764,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-854,216,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-899,96,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-309,550,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-943,636,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-138,427,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-99,652,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-270,478,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-862,18,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-574,40,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-359,453,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-958,987,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-791,26,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-284,101,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-190,969,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-527,492,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-112,263,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-271,593,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-643,923,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-554,146,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-211,305,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,12054
-368,318,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,12085
-778,417,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,12113
-808,623,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,12144
-46,761,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,12174
-466,272,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,12205
-18,988,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,12235
-87,821,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,12266
-765,962,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,12297
-62,615,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,12327
-13,523,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,12358
-775,806,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,12388
-636,586,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,12419
-458,520,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,12450
-206,908,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,12478
-310,30,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,12509
-813,247,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,12539
-22,647,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,12570
-742,55,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,12600
-394,154,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,12631
-957,344,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,12662
-205,95,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,12692
-198,665,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,12723
-638,145,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,12753
-155,925,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,12054
-688,395,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,12085
-730,749,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,12113
-208,279,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,12144
-525,288,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,12174
-483,509,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,12205
-748,255,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,12235
-6,214,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,12266
-168,473,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,12297
-301,702,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,12327
-9,814,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,12358
-778,231,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,12388
-799,422,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,12419
-309,572,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,12450
-433,363,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,12478
-969,919,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,12509
-181,355,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,12539
-787,992,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,12570
-971,147,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,12600
-440,183,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,12631
-209,375,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,12662
-537,77,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,12692
-364,308,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,12723
-377,660,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,12753
-251,555,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-607,455,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-127,888,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-513,652,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-146,799,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-917,249,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-776,539,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-330,198,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-981,340,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-862,152,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-612,347,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-607,565,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-786,855,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-160,87,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-199,69,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-972,807,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-870,565,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-494,798,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-975,714,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-760,17,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-180,797,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-256,422,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-422,621,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-859,661,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-586,363,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,12054
-441,910,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,12085
-597,998,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,12113
-717,95,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,12144
-713,731,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,12174
-591,718,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,12205
-492,467,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,12235
-170,126,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,12266
-684,127,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,12297
-981,746,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,12327
-966,878,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,12358
-439,27,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,12388
-151,569,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,12419
-602,812,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,12450
-187,603,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,12478
-415,506,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,12509
-61,185,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,12539
-839,692,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,12570
-596,565,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,12600
-751,512,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,12631
-460,86,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,12662
-922,399,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,12692
-153,672,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,12723
-928,801,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,12753
-951,730,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-394,408,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-615,982,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-653,499,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-180,307,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-649,741,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-921,640,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-11,300,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-696,929,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-795,309,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-550,340,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-320,228,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-845,1000,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-245,21,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-142,583,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-717,506,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-3,405,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-790,556,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-646,72,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-230,103,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-938,262,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-629,102,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-317,841,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-812,159,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-141,570,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,12054
-64,375,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,12085
-207,298,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,12113
-435,32,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,12144
-96,760,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,12174
-252,338,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,12205
-956,149,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,12235
-633,343,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,12266
-190,151,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,12297
-227,44,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,12327
-24,583,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,12358
-420,230,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,12388
-910,907,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,12419
-709,783,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,12450
-810,117,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,12478
-723,416,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,12509
-911,318,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,12539
-230,888,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,12570
-448,60,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,12600
-945,596,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,12631
-508,576,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,12662
-262,576,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,12692
-441,280,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,12723
-15,219,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,12753
-795,133,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,12054
-301,273,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,12085
-304,86,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,12113
-49,400,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,12144
-576,364,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,12174
-669,63,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,12205
-325,929,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,12235
-272,344,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,12266
-80,768,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,12297
-46,668,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,12327
-223,407,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,12358
-774,536,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,12388
-784,657,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,12419
-92,215,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,12450
-67,966,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,12478
-747,674,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,12509
-686,574,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,12539
-93,266,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,12570
-192,680,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,12600
-51,362,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,12631
-498,412,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,12662
-546,431,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,12692
-485,94,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,12723
-925,345,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,12753
-292,445,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-540,632,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-21,855,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-100,36,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-49,250,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-353,427,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-911,367,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-823,245,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-278,893,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-576,490,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-655,88,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-763,964,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-88,62,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-746,506,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-927,680,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-297,153,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-291,403,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-838,98,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-112,376,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-509,477,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-472,50,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-495,592,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-1000,813,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-241,740,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-693,873,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,12054
-903,459,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,12085
-791,224,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,12113
-108,562,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,12144
-845,199,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,12174
-452,275,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,12205
-479,355,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,12235
-410,947,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,12266
-379,454,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,12297
-740,450,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,12327
-471,575,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,12358
-325,6,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,12388
-455,847,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,12419
-563,338,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,12450
-879,517,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,12478
-312,630,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,12509
-587,381,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,12539
-628,864,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,12570
-486,416,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,12600
-811,852,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,12631
-990,815,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,12662
-35,23,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,12692
-764,527,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,12723
-619,693,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,12753
-996,977,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-554,549,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-540,951,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-140,390,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-554,204,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-724,78,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-693,613,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-866,745,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-833,56,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-164,887,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-753,651,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-60,691,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-688,767,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-883,709,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-109,417,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-950,326,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-438,599,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-286,818,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-342,13,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-383,185,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-80,140,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-322,717,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-749,852,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-606,125,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-641,325,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,12054
-494,648,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,12085
-428,365,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,12113
-936,120,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,12144
-597,347,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,12174
-728,638,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,12205
-933,732,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,12235
-663,465,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,12266
-394,262,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,12297
-334,947,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,12327
-114,694,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,12358
-89,482,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,12388
-874,600,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,12419
-674,94,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,12450
-347,323,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,12478
-105,49,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,12509
-286,70,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,12539
-669,844,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,12570
-786,773,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,12600
-104,68,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,12631
-770,110,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,12662
-263,42,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,12692
-900,171,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,12723
-630,644,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,12753
-597,408,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,12054
-185,45,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,12085
-175,522,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,12113
-576,166,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,12144
-957,885,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,12174
-993,713,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,12205
-500,838,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,12235
-410,267,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,12266
-592,967,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,12297
-64,529,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,12327
-208,656,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,12358
-273,665,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,12388
-906,419,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,12419
-429,776,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,12450
-961,971,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,12478
-338,248,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,12509
-472,486,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,12539
-903,674,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,12570
-299,603,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,12600
-948,492,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,12631
-931,512,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,12662
-570,391,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,12692
-97,313,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,12723
-674,758,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,12753
-468,304,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-430,846,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-893,912,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-519,810,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-267,122,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-908,102,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-176,161,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-673,450,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-798,215,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-291,765,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-583,557,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-442,739,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-951,811,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-430,780,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-559,645,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-726,365,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-944,597,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-497,126,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-388,655,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-81,604,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-111,280,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-288,115,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-845,205,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-745,672,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-352,339,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,12054
-234,70,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,12085
-167,528,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,12113
-606,220,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,12144
-670,691,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,12174
-764,197,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,12205
-659,239,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,12235
-996,50,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,12266
-424,135,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,12297
-899,972,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,12327
-392,475,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,12358
-555,868,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,12388
-860,451,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,12419
-114,565,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,12450
-943,116,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,12478
-365,385,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,12509
-249,375,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,12539
-192,357,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,12570
-328,230,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,12600
-311,829,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,12631
-576,971,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,12662
-915,280,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,12692
-522,853,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,12723
-625,953,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,12753
-873,874,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-498,578,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-808,768,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-742,178,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-744,916,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-30,917,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-747,633,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-672,107,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-564,523,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-785,924,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-825,481,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-243,240,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-959,819,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-123,602,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-714,538,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-252,632,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-715,952,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-670,480,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-81,700,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-653,726,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-795,526,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-182,410,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-725,307,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-101,73,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-143,232,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,12054
-15,993,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,12085
-742,652,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,12113
-339,761,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,12144
-39,428,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,12174
-465,4,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,12205
-889,101,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,12235
-856,869,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,12266
-358,271,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,12297
-452,633,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,12327
-387,481,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,12358
-824,302,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,12388
-185,245,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,12419
-151,941,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,12450
-419,721,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,12478
-643,893,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,12509
-63,898,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,12539
-202,94,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,12570
-332,962,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,12600
-723,71,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,12631
-148,108,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,12662
-840,71,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,12692
-601,767,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,12723
-962,323,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,12753
-166,982,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,12054
-531,614,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,12085
-963,839,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,12113
-994,388,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,12144
-978,296,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,12174
-72,429,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,12205
-33,901,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,12235
-428,350,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,12266
-413,581,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,12297
-737,583,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,12327
-85,92,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,12358
-916,647,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,12388
-785,771,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,12419
-302,26,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,12450
-1000,598,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,12478
-458,715,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,12509
-896,74,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,12539
-615,580,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,12570
-174,848,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,12600
-651,118,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,12631
-784,54,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,12662
-121,929,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,12692
-341,393,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,12723
-615,820,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,12753
-697,336,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-215,299,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-197,747,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-205,154,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-256,486,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-377,251,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-577,225,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-686,77,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-332,74,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-534,596,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-485,493,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-594,782,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-413,487,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-13,127,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-483,538,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-820,94,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-745,252,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-79,722,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-36,536,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-950,958,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-74,466,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-458,309,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-609,680,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-429,539,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-956,511,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,12054
-205,505,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,12085
-629,720,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,12113
-277,823,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,12144
-266,21,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,12174
-872,142,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,12205
-435,95,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,12235
-988,398,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,12266
-953,328,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,12297
-556,151,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,12327
-211,978,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,12358
-389,918,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,12388
-351,542,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,12419
-14,96,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,12450
-181,496,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,12478
-452,77,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,12509
-511,236,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,12539
-193,913,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,12570
-797,49,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,12600
-988,967,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,12631
-487,502,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,12662
-941,790,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,12692
-577,121,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,12723
-456,55,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,12753
-982,739,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-593,683,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-702,610,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-528,248,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-873,530,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-301,889,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-769,245,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-724,473,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-466,938,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-774,150,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-111,772,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-954,201,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-780,945,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-210,177,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-93,378,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-332,83,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-186,803,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-782,398,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-41,215,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-222,194,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-992,287,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-477,410,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-948,50,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-817,204,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-597,239,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,12054
-649,637,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,12085
-3,938,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,12113
-731,788,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,12144
-181,399,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,12174
-468,576,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,12205
-891,187,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,12235
-226,703,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,12266
-28,455,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,12297
-609,244,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,12327
-224,868,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,12358
-230,353,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,12388
-216,101,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,12419
-282,924,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,12450
-501,144,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,12478
-320,0,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,12509
-720,910,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,12539
-464,259,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,12570
-363,107,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,12600
-49,63,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,12631
-223,270,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,12662
-452,554,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,12692
-210,154,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,12723
-444,205,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,12753
-222,441,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,12054
-678,183,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,12085
-25,459,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,12113
-57,810,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,12144
-981,268,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,12174
-740,916,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,12205
-408,742,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,12235
-966,522,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,12266
-107,299,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,12297
-488,677,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,12327
-759,709,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,12358
-504,310,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,12388
-99,160,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,12419
-503,698,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,12450
-724,540,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,12478
-309,901,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,12509
-625,34,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,12539
-294,536,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,12570
-890,780,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,12600
-501,716,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,12631
-34,532,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,12662
-203,871,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,12692
-140,199,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,12723
-845,845,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,12753
-774,591,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-645,378,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-986,942,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-296,686,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-936,720,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-341,546,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-32,845,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-277,667,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-548,627,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-727,142,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-812,655,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-168,556,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-150,459,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-136,89,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-695,726,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-363,38,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-853,60,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-621,369,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-764,381,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-669,465,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-772,981,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-228,758,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-261,31,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-821,237,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-100,285,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,12054
-465,94,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,12085
-350,561,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,12113
-991,143,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,12144
-910,95,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,12174
-206,341,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,12205
-263,388,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,12235
-374,272,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,12266
-875,890,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,12297
-810,734,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,12327
-398,364,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,12358
-565,619,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,12388
-417,517,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,12419
-291,781,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,12450
-251,327,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,12478
-449,48,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,12509
-774,809,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,12539
-386,73,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,12570
-22,936,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,12600
-940,400,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,12631
-132,736,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,12662
-103,211,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,12692
-152,271,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,12723
-952,855,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,12753
-872,923,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-748,854,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-749,769,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-876,271,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-860,383,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-900,29,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-705,185,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-913,351,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-315,560,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-466,840,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-233,517,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-906,949,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-148,633,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-661,636,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-847,138,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-768,481,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-866,408,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-475,130,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-112,813,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-136,661,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-763,311,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-388,872,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-996,643,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-486,174,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-494,528,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,12054
-771,124,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,12085
-49,126,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,12113
-322,440,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,12144
-878,881,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,12174
-827,292,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,12205
-852,873,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,12235
-716,357,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,12266
-81,247,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,12297
-916,18,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,12327
-673,395,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,12358
-242,620,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,12388
-914,946,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,12419
-902,72,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,12450
-707,691,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,12478
-223,95,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,12509
-619,878,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,12539
-254,757,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,12570
-688,898,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,12600
-477,172,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,12631
-280,419,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,12662
-546,849,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,12692
-630,807,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,12723
-455,599,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,12753
-505,59,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,12054
-823,790,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,12085
-891,574,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,12113
-840,96,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,12144
-436,376,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,12174
-168,352,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,12205
-177,741,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,12235
-727,12,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,12266
-278,157,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,12297
-443,10,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,12327
-905,544,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,12358
-881,817,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,12388
-507,754,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,12419
-363,425,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,12450
-603,492,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,12478
-473,485,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,12509
-128,369,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,12539
-105,560,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,12570
-325,651,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,12600
-711,326,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,12631
-983,180,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,12662
-241,935,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,12692
-71,403,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,12723
-395,345,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,12753
-168,278,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-512,376,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-291,104,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-776,543,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-271,798,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-946,333,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-195,833,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-165,132,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-238,629,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-409,337,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-720,300,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-309,470,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-812,875,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-441,237,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-500,272,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-517,860,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-924,415,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-572,140,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-768,367,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-692,195,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-28,245,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-202,285,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-76,98,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-421,932,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-636,898,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,12054
-52,330,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,12085
-184,603,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,12113
-739,280,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,12144
-841,507,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,12174
-65,202,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,12205
-623,513,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,12235
-517,132,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,12266
-636,21,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,12297
-845,657,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,12327
-232,195,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,12358
-26,323,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,12388
-680,299,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,12419
-364,811,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,12450
-572,739,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,12478
-145,889,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,12509
-644,189,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,12539
-87,698,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,12570
-620,646,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,12600
-535,562,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,12631
-661,753,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,12662
-884,425,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,12692
-689,693,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,12723
-646,941,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,12753
-4,975,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-813,455,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-773,260,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-205,69,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-657,147,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-154,533,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-747,881,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-787,457,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-867,441,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-307,859,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-571,177,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-92,633,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-269,382,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-764,707,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-662,566,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-818,349,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-617,128,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-649,231,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-895,258,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-750,812,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-738,362,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-107,133,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-278,60,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-32,88,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-129,378,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,12054
-187,569,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,12085
-670,186,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,12113
-678,875,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,12144
-423,636,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,12174
-389,360,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,12205
-257,677,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,12235
-780,708,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,12266
-159,158,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,12297
-97,384,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,12327
-479,927,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,12358
-9,134,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,12388
-614,273,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,12419
-261,27,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,12450
-115,209,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,12478
-358,470,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,12509
-133,219,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,12539
-891,907,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,12570
-702,778,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,12600
-58,998,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,12631
-606,194,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,12662
-668,933,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,12692
-813,708,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,12723
-450,949,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,12753
-956,579,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,12054
-276,131,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,12085
-889,689,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,12113
-708,908,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,12144
-14,524,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,12174
-904,336,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,12205
-272,916,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,12235
-257,236,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,12266
-343,965,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,12297
-80,350,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,12327
-530,599,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,12358
-340,901,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,12388
-595,935,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,12419
-47,667,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,12450
-279,104,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,12478
-293,803,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,12509
-162,64,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,12539
-935,825,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,12570
-689,839,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,12600
-484,184,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,12631
-230,348,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,12662
-164,904,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,12692
-401,219,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,12723
-607,381,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,12753
-229,524,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-786,902,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-92,212,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-455,762,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-409,182,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-166,442,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-277,919,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-92,67,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-631,741,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-390,617,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-403,214,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-964,202,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-223,788,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-684,639,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-645,336,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-470,937,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-424,399,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-862,21,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-736,125,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-554,635,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-790,229,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-115,770,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-853,622,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-643,109,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-794,975,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,12054
-892,820,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,12085
-728,123,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,12113
-744,135,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,12144
-678,535,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,12174
-768,971,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,12205
-234,166,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,12235
-333,814,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,12266
-968,557,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,12297
-119,820,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,12327
-469,486,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,12358
-261,429,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,12388
-984,65,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,12419
-845,977,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,12450
-374,410,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,12478
-687,150,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,12509
-157,630,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,12539
-49,488,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,12570
-817,112,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,12600
-223,598,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,12631
-433,705,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,12662
-41,226,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,12692
-396,979,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,12723
-131,19,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,12753
-521,204,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-751,805,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-45,549,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-144,912,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-119,427,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-728,1,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-120,540,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-657,940,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-409,644,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-881,821,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-113,560,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-831,309,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-129,1000,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-76,945,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-260,931,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-882,504,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-157,950,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-443,278,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-111,225,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-497,6,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-321,124,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-194,206,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-684,320,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-634,270,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-622,278,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,12054
-689,447,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,12085
-120,170,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,12113
-374,87,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,12144
-926,384,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,12174
-687,574,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,12205
-600,585,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,12235
-779,947,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,12266
-223,984,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,12297
-628,189,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,12327
-326,364,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,12358
-836,49,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,12388
-361,851,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,12419
-444,643,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,12450
-501,143,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,12478
-743,763,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,12509
-861,987,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,12539
-203,264,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,12570
-762,439,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,12600
-705,750,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,12631
-153,37,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,12662
-436,95,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,12692
-428,79,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,12723
-804,832,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,12753
-805,649,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,12054
-860,838,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,12085
-104,439,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,12113
-434,207,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,12144
-912,804,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,12174
-571,875,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,12205
-267,473,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,12235
-415,845,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,12266
-261,91,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,12297
-746,630,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,12327
-30,185,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,12358
-662,317,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,12388
-916,88,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,12419
-415,607,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,12450
-514,35,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,12478
-756,680,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,12509
-461,78,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,12539
-460,117,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,12570
-305,440,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,12600
-198,652,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,12631
-234,249,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,12662
-638,658,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,12692
-88,563,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,12723
-751,737,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,12753
-816,789,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-437,988,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-715,220,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-780,946,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-245,986,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-201,129,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-815,433,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-865,492,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-634,306,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-901,154,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-789,206,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-882,81,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-953,882,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-862,848,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-628,664,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-765,389,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-741,182,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-61,505,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-470,861,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-869,263,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-650,400,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-750,556,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-602,497,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-54,181,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-384,619,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,12054
-161,332,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,12085
-977,669,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,12113
-615,487,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,12144
-783,994,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,12174
-977,331,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,12205
-375,739,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,12235
-298,665,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,12266
-104,921,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,12297
-713,862,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,12327
-556,662,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,12358
-323,517,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,12388
-391,352,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,12419
-593,166,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,12450
-906,859,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,12478
-130,571,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,12509
-613,976,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,12539
-58,466,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,12570
-314,79,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,12600
-67,864,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,12631
-654,623,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,12662
-312,170,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,12692
-349,662,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,12723
-415,763,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,12753
-404,896,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12054
-22,973,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12085
-744,161,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,12113
-804,934,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12144
-101,697,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12174
-293,116,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,12205
-266,84,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12235
-372,604,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12266
-38,371,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,12297
-385,783,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12327
-262,335,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12358
-961,321,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,12388
-831,177,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12419
-579,371,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12450
-301,583,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,12478
-693,364,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12509
-895,343,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12539
-320,854,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,12570
-284,691,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12600
-362,387,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12631
-132,298,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,12662
-42,635,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12692
-118,81,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12723
-42,375,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,12753
-18,846,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,12054
-512,933,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,12085
-337,237,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,12113
-167,964,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,12144
-749,382,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,12174
-890,610,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,12205
-910,148,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,12235
-403,837,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,12266
-403,85,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,12297
-661,425,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,12327
-485,633,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,12358
-789,515,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,12388
-415,512,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,12419
-418,156,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,12450
-163,464,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,12478
-298,813,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,12509
-584,455,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,12539
-797,366,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,12570
-767,734,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,12600
-984,451,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,12631
-388,134,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,12662
-924,547,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,12692
-566,802,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,12723
-390,61,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,12753
-608,556,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,12054
-840,202,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,12085
-112,964,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,12113
-288,112,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,12144
-408,445,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,12174
-876,884,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,12205
-224,348,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,12235
-133,564,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,12266
-662,568,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,12297
-68,882,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,12327
-626,542,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,12358
-678,119,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,12388
-361,248,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,12419
-464,868,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,12450
-681,841,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,12478
-377,484,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,12509
-222,986,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,12539
-972,39,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,12570
-56,930,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,12600
-695,252,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,12631
-908,794,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,12662
-328,658,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,12692
-891,139,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,12723
-265,331,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,12753
-251,261,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12054
-783,122,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12085
-425,296,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,12113
-859,391,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12144
-314,75,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12174
-153,731,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,12205
-955,883,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12235
-654,707,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12266
-693,97,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,12297
-757,390,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12327
-221,237,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12358
-942,496,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,12388
-31,814,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12419
-540,765,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12450
-352,308,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,12478
-904,327,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12509
-436,266,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12539
-281,699,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,12570
-801,599,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12600
-273,950,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12631
-716,117,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,12662
-902,632,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12692
-341,35,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12723
-155,562,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,12753
-796,144,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,12054
-257,142,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,12085
-611,273,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,12113
-6,915,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,12144
-125,920,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,12174
-745,294,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,12205
-437,681,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,12235
-906,86,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,12266
-844,764,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,12297
-413,269,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,12327
-869,138,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,12358
-403,834,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,12388
-137,112,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,12419
-922,921,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,12450
-202,859,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,12478
-955,442,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,12509
-781,593,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,12539
-12,346,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,12570
-931,312,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,12600
-95,690,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,12631
-795,344,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,12662
-542,784,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,12692
-935,639,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,12723
-269,726,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,12753
-197,596,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12054
-828,263,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12085
-461,194,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,12113
-35,895,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12144
-88,502,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12174
-832,342,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,12205
-900,421,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12235
-368,901,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12266
-201,474,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,12297
-758,571,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12327
-504,511,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12358
-864,379,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,12388
-574,68,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12419
-61,210,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12450
-565,478,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,12478
-475,296,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12509
-44,664,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12539
-145,880,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,12570
-813,607,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12600
-703,97,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12631
-757,908,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,12662
-96,152,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12692
-860,622,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12723
-750,309,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,12753
-585,912,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,12054
-127,429,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,12085
-669,580,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,12113
-708,179,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,12144
-830,119,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,12174
-550,369,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,12205
-762,882,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,12235
-468,727,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,12266
-151,823,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,12297
-103,783,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,12327
-876,884,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,12358
-881,891,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,12388
-116,909,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,12419
-677,765,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,12450
-477,180,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,12478
-154,712,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,12509
-331,175,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,12539
-784,869,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,12570
-563,820,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,12600
-229,554,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,12631
-451,126,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,12662
-974,760,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,12692
-484,446,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,12723
-69,254,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,12753
-755,516,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,12054
-331,779,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,12085
-482,987,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,12113
-632,318,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,12144
-750,427,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,12174
-618,86,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,12205
-935,553,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,12235
-716,315,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,12266
-205,328,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,12297
-215,521,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,12327
-871,156,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,12358
-552,841,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,12388
-619,623,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,12419
-701,849,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,12450
-104,438,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,12478
-114,719,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,12509
-854,906,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,12539
-563,267,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,12570
-73,542,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,12600
-427,552,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,12631
-348,428,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,12662
-148,158,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,12692
-895,379,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,12723
-394,142,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,12753
-792,588,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12054
-175,506,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12085
-208,382,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,12113
-354,132,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12144
-163,652,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12174
-336,723,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,12205
-804,682,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12235
-863,382,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12266
-326,125,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,12297
-568,321,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12327
-691,922,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12358
-152,884,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,12388
-565,38,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12419
-38,194,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12450
-185,996,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,12478
-318,532,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12509
-960,391,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12539
-122,104,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,12570
-400,22,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12600
-301,650,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12631
-909,143,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,12662
-433,999,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12692
-508,415,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12723
-648,350,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,12753
-793,342,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,12054
-129,215,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,12085
-481,52,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,12113
-406,292,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,12144
-512,862,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,12174
-668,309,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,12205
-551,886,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,12235
-124,172,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,12266
-655,912,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,12297
-523,666,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,12327
-739,656,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,12358
-87,145,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,12388
-890,664,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,12419
-665,639,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,12450
-329,707,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,12478
-417,891,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,12509
-828,466,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,12539
-298,451,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,12570
-356,451,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,12600
-909,874,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,12631
-251,805,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,12662
-526,426,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,12692
-652,932,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,12723
-573,581,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,12753
+925,850,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+999,297,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+608,846,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+642,533,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+656,646,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+948,486,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+612,717,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+114,564,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+685,230,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+657,494,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+608,903,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+353,266,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+107,190,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+354,139,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+101,217,CANADA,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+553,560,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+877,148,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+431,762,CANADA,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+511,457,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+157,532,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+520,629,CANADA,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+114,491,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+277,0,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+561,979,CANADA,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+220,585,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+444,267,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+178,487,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+756,764,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+329,312,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+910,531,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+530,536,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+101,773,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+515,143,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+730,126,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+993,862,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+954,754,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+267,410,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+347,701,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+991,204,CANADA,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+923,509,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+437,378,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+737,507,CANADA,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+104,49,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+840,876,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+704,66,CANADA,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+889,819,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+107,351,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+571,201,CANADA,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+688,209,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+544,51,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+954,135,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+445,47,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+829,379,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+464,758,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+968,475,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+842,343,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+721,507,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+966,269,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+332,699,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+328,824,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+355,497,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+506,44,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+585,522,CANADA,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+634,378,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+662,689,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+783,90,CANADA,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+786,720,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+710,343,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+950,457,CANADA,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+274,947,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+406,834,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+515,71,CANADA,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+35,282,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+995,538,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+670,679,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+406,601,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+825,577,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+467,908,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+709,819,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+522,687,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+688,157,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+956,111,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+129,31,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+687,790,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+877,795,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+845,379,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+425,114,CANADA,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+899,475,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+987,747,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+641,372,CANADA,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+448,415,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+341,955,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+137,356,CANADA,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+235,316,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+482,351,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+678,164,CANADA,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+240,386,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+605,113,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+274,68,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+422,885,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+763,575,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+561,743,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+339,816,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+877,203,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+192,581,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+604,815,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+55,333,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+87,40,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+942,672,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+912,23,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+768,948,CANADA,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+951,291,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+768,839,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+978,864,CANADA,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+20,337,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+298,95,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+193,535,CANADA,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+336,191,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+617,412,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+709,711,CANADA,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+5,425,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+164,215,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+422,948,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+424,544,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+854,764,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+168,446,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+8,957,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+748,967,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+682,11,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+300,110,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+672,263,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+894,215,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+944,965,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+403,423,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+596,753,CANADA,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+481,770,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+503,263,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+126,79,CANADA,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+721,441,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+271,858,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+721,667,CANADA,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+157,193,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+991,394,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+499,680,CANADA,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+284,414,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+705,770,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+737,679,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+745,7,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+633,713,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+983,851,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+591,944,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+42,130,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+771,485,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+465,23,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+296,193,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+890,7,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+312,919,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+777,768,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+364,854,CANADA,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+601,411,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+823,736,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+847,10,CANADA,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+490,311,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+387,348,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+688,458,CANADA,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+650,195,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+447,658,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+91,704,CANADA,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+197,807,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+51,861,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+570,873,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+423,933,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+524,355,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+416,794,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+789,645,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+551,700,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+400,831,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+361,800,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+189,830,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+554,828,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+585,12,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+281,501,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+629,914,CANADA,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+43,685,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+533,755,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+882,708,CANADA,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+790,595,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+600,32,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+148,49,CANADA,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+237,727,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+488,239,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+457,273,CANADA,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+401,986,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+181,544,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+995,182,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+120,197,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+119,435,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+319,974,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+333,524,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+923,688,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+634,750,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+493,155,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+461,860,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+304,102,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+641,425,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+992,224,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+202,408,CANADA,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+770,524,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+202,816,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+14,515,CANADA,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+134,793,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+977,460,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+174,732,CANADA,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+429,435,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+514,38,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+784,616,CANADA,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+973,225,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+511,402,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+30,697,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+895,567,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+557,231,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+282,372,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+909,15,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+276,866,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+234,452,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+479,663,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+782,982,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+755,813,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+689,523,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+496,871,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+24,511,CANADA,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+379,819,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+441,525,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+49,13,CANADA,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+243,694,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+295,782,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+395,839,CANADA,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+929,461,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+997,303,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+889,421,CANADA,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
+72,421,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+926,433,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+850,394,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+826,338,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+651,764,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+854,216,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+899,96,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+309,550,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+943,636,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+138,427,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+99,652,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+270,478,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+862,18,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+574,40,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+359,453,CANADA,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+958,987,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+791,26,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+284,101,CANADA,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+190,969,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+527,492,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+112,263,CANADA,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+271,593,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+643,923,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+554,146,CANADA,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+211,305,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+368,318,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+778,417,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+808,623,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+46,761,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+466,272,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+18,988,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+87,821,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+765,962,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+62,615,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+13,523,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+775,806,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+636,586,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+458,520,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+206,908,CANADA,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+310,30,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+813,247,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+22,647,CANADA,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+742,55,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+394,154,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+957,344,CANADA,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+205,95,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+198,665,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+638,145,CANADA,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+155,925,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+688,395,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+730,749,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+208,279,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+525,288,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+483,509,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+748,255,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+6,214,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+168,473,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+301,702,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+9,814,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+778,231,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+799,422,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+309,572,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+433,363,CANADA,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+969,919,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+181,355,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+787,992,CANADA,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+971,147,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+440,183,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+209,375,CANADA,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+537,77,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+364,308,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+377,660,CANADA,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+251,555,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+607,455,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+127,888,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+513,652,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+146,799,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+917,249,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+776,539,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+330,198,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+981,340,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+862,152,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+612,347,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+607,565,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+786,855,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+160,87,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+199,69,CANADA,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+972,807,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+870,565,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+494,798,CANADA,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+975,714,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+760,17,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+180,797,CANADA,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+256,422,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+422,621,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+859,661,CANADA,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+586,363,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+441,910,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+597,998,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+717,95,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+713,731,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+591,718,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+492,467,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+170,126,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+684,127,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+981,746,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+966,878,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+439,27,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+151,569,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+602,812,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+187,603,CANADA,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+415,506,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+61,185,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+839,692,CANADA,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+596,565,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+751,512,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+460,86,CANADA,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+922,399,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+153,672,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+928,801,CANADA,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+951,730,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+394,408,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+615,982,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+653,499,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+180,307,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+649,741,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+921,640,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+11,300,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+696,929,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+795,309,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+550,340,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+320,228,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+845,1000,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+245,21,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+142,583,CANADA,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+717,506,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+3,405,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+790,556,CANADA,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+646,72,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+230,103,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+938,262,CANADA,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+629,102,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+317,841,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+812,159,CANADA,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+141,570,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+64,375,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+207,298,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+435,32,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+96,760,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+252,338,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+956,149,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+633,343,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+190,151,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+227,44,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+24,583,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+420,230,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+910,907,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+709,783,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+810,117,CANADA,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+723,416,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+911,318,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+230,888,CANADA,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+448,60,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+945,596,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+508,576,CANADA,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+262,576,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+441,280,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+15,219,CANADA,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+795,133,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+301,273,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+304,86,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+49,400,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+576,364,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+669,63,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+325,929,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+272,344,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+80,768,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+46,668,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+223,407,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+774,536,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+784,657,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+92,215,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+67,966,CANADA,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+747,674,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+686,574,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+93,266,CANADA,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+192,680,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+51,362,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+498,412,CANADA,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+546,431,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+485,94,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+925,345,CANADA,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+292,445,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+540,632,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+21,855,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+100,36,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+49,250,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+353,427,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+911,367,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+823,245,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+278,893,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+576,490,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+655,88,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+763,964,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+88,62,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+746,506,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+927,680,CANADA,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+297,153,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+291,403,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+838,98,CANADA,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+112,376,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+509,477,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+472,50,CANADA,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+495,592,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+1000,813,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+241,740,CANADA,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+693,873,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+903,459,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+791,224,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+108,562,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+845,199,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+452,275,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+479,355,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+410,947,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+379,454,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+740,450,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+471,575,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+325,6,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+455,847,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+563,338,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+879,517,CANADA,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+312,630,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+587,381,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+628,864,CANADA,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+486,416,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+811,852,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+990,815,CANADA,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+35,23,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+764,527,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+619,693,CANADA,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
+996,977,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+554,549,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+540,951,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+140,390,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+554,204,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+724,78,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+693,613,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+866,745,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+833,56,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+164,887,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+753,651,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+60,691,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+688,767,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+883,709,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+109,417,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+950,326,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+438,599,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+286,818,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+342,13,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+383,185,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+80,140,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+322,717,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+749,852,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+606,125,GERMANY,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+641,325,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+494,648,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+428,365,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+936,120,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+597,347,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+728,638,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+933,732,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+663,465,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+394,262,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+334,947,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+114,694,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+89,482,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+874,600,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+674,94,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+347,323,GERMANY,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+105,49,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+286,70,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+669,844,GERMANY,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+786,773,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+104,68,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+770,110,GERMANY,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+263,42,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+900,171,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+630,644,GERMANY,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+597,408,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+185,45,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+175,522,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+576,166,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+957,885,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+993,713,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+500,838,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+410,267,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+592,967,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+64,529,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+208,656,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+273,665,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+906,419,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+429,776,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+961,971,GERMANY,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+338,248,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+472,486,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+903,674,GERMANY,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+299,603,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+948,492,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+931,512,GERMANY,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+570,391,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+97,313,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+674,758,GERMANY,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+468,304,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+430,846,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+893,912,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+519,810,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+267,122,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+908,102,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+176,161,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+673,450,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+798,215,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+291,765,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+583,557,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+442,739,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+951,811,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+430,780,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+559,645,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+726,365,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+944,597,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+497,126,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+388,655,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+81,604,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+111,280,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+288,115,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+845,205,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+745,672,GERMANY,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+352,339,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+234,70,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+167,528,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+606,220,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+670,691,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+764,197,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+659,239,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+996,50,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+424,135,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+899,972,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+392,475,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+555,868,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+860,451,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+114,565,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+943,116,GERMANY,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+365,385,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+249,375,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+192,357,GERMANY,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+328,230,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+311,829,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+576,971,GERMANY,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+915,280,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+522,853,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+625,953,GERMANY,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+873,874,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+498,578,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+808,768,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+742,178,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+744,916,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+30,917,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+747,633,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+672,107,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+564,523,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+785,924,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+825,481,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+243,240,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+959,819,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+123,602,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+714,538,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+252,632,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+715,952,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+670,480,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+81,700,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+653,726,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+795,526,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+182,410,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+725,307,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+101,73,GERMANY,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+143,232,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+15,993,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+742,652,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+339,761,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+39,428,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+465,4,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+889,101,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+856,869,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+358,271,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+452,633,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+387,481,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+824,302,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+185,245,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+151,941,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+419,721,GERMANY,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+643,893,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+63,898,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+202,94,GERMANY,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+332,962,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+723,71,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+148,108,GERMANY,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+840,71,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+601,767,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+962,323,GERMANY,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+166,982,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+531,614,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+963,839,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+994,388,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+978,296,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+72,429,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+33,901,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+428,350,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+413,581,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+737,583,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+85,92,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+916,647,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+785,771,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+302,26,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+1000,598,GERMANY,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+458,715,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+896,74,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+615,580,GERMANY,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+174,848,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+651,118,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+784,54,GERMANY,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+121,929,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+341,393,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+615,820,GERMANY,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+697,336,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+215,299,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+197,747,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+205,154,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+256,486,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+377,251,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+577,225,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+686,77,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+332,74,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+534,596,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+485,493,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+594,782,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+413,487,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+13,127,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+483,538,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+820,94,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+745,252,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+79,722,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+36,536,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+950,958,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+74,466,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+458,309,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+609,680,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+429,539,GERMANY,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+956,511,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+205,505,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+629,720,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+277,823,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+266,21,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+872,142,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+435,95,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+988,398,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+953,328,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+556,151,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+211,978,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+389,918,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+351,542,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+14,96,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+181,496,GERMANY,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+452,77,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+511,236,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+193,913,GERMANY,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+797,49,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+988,967,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+487,502,GERMANY,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+941,790,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+577,121,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+456,55,GERMANY,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
+982,739,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+593,683,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+702,610,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+528,248,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+873,530,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+301,889,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+769,245,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+724,473,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+466,938,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+774,150,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+111,772,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+954,201,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+780,945,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+210,177,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+93,378,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+332,83,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+186,803,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+782,398,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+41,215,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+222,194,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+992,287,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+477,410,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+948,50,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+817,204,GERMANY,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+597,239,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+649,637,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+3,938,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+731,788,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+181,399,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+468,576,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+891,187,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+226,703,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+28,455,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+609,244,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+224,868,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+230,353,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+216,101,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+282,924,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+501,144,GERMANY,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+320,0,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+720,910,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+464,259,GERMANY,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+363,107,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+49,63,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+223,270,GERMANY,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+452,554,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+210,154,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+444,205,GERMANY,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+222,441,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+678,183,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+25,459,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+57,810,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+981,268,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+740,916,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+408,742,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+966,522,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+107,299,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+488,677,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+759,709,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+504,310,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+99,160,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+503,698,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+724,540,GERMANY,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+309,901,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+625,34,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+294,536,GERMANY,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+890,780,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+501,716,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+34,532,GERMANY,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+203,871,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+140,199,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+845,845,GERMANY,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+774,591,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+645,378,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+986,942,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+296,686,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+936,720,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+341,546,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+32,845,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+277,667,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+548,627,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+727,142,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+812,655,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+168,556,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+150,459,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+136,89,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+695,726,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+363,38,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+853,60,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+621,369,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+764,381,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+669,465,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+772,981,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+228,758,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+261,31,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+821,237,GERMANY,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+100,285,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+465,94,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+350,561,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+991,143,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+910,95,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+206,341,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+263,388,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+374,272,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+875,890,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+810,734,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+398,364,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+565,619,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+417,517,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+291,781,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+251,327,GERMANY,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+449,48,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+774,809,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+386,73,GERMANY,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+22,936,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+940,400,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+132,736,GERMANY,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+103,211,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+152,271,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+952,855,GERMANY,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+872,923,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+748,854,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+749,769,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+876,271,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+860,383,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+900,29,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+705,185,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+913,351,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+315,560,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+466,840,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+233,517,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+906,949,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+148,633,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+661,636,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+847,138,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+768,481,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+866,408,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+475,130,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+112,813,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+136,661,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+763,311,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+388,872,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+996,643,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+486,174,GERMANY,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+494,528,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+771,124,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+49,126,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+322,440,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+878,881,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+827,292,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+852,873,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+716,357,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+81,247,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+916,18,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+673,395,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+242,620,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+914,946,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+902,72,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+707,691,GERMANY,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+223,95,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+619,878,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+254,757,GERMANY,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+688,898,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+477,172,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+280,419,GERMANY,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+546,849,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+630,807,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+455,599,GERMANY,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+505,59,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+823,790,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+891,574,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+840,96,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+436,376,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+168,352,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+177,741,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+727,12,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+278,157,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+443,10,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+905,544,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+881,817,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+507,754,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+363,425,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+603,492,GERMANY,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+473,485,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+128,369,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+105,560,GERMANY,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+325,651,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+711,326,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+983,180,GERMANY,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+241,935,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+71,403,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+395,345,GERMANY,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+168,278,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+512,376,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+291,104,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+776,543,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+271,798,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+946,333,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+195,833,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+165,132,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+238,629,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+409,337,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+720,300,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+309,470,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+812,875,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+441,237,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+500,272,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+517,860,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+924,415,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+572,140,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+768,367,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+692,195,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+28,245,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+202,285,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+76,98,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+421,932,GERMANY,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+636,898,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+52,330,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+184,603,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+739,280,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+841,507,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+65,202,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+623,513,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+517,132,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+636,21,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+845,657,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+232,195,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+26,323,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+680,299,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+364,811,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+572,739,GERMANY,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+145,889,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+644,189,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+87,698,GERMANY,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+620,646,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+535,562,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+661,753,GERMANY,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+884,425,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+689,693,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+646,941,GERMANY,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
+4,975,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+813,455,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+773,260,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+205,69,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+657,147,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+154,533,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+747,881,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+787,457,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+867,441,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+307,859,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+571,177,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+92,633,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+269,382,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+764,707,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+662,566,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+818,349,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+617,128,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+649,231,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+895,258,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+750,812,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+738,362,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+107,133,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+278,60,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+32,88,U.S.A.,EAST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+129,378,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+187,569,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+670,186,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+678,875,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+423,636,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+389,360,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+257,677,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+780,708,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+159,158,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+97,384,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+479,927,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+9,134,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+614,273,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+261,27,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+115,209,U.S.A.,EAST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+358,470,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+133,219,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+891,907,U.S.A.,EAST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+702,778,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+58,998,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+606,194,U.S.A.,EAST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+668,933,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+813,708,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+450,949,U.S.A.,EAST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+956,579,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+276,131,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+889,689,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+708,908,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+14,524,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+904,336,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+272,916,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+257,236,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+343,965,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+80,350,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+530,599,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+340,901,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+595,935,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+47,667,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+279,104,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+293,803,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+162,64,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+935,825,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+689,839,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+484,184,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+230,348,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+164,904,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+401,219,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+607,381,U.S.A.,EAST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+229,524,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+786,902,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+92,212,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+455,762,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+409,182,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+166,442,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+277,919,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+92,67,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+631,741,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+390,617,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+403,214,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+964,202,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+223,788,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+684,639,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+645,336,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+470,937,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+424,399,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+862,21,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+736,125,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+554,635,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+790,229,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+115,770,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+853,622,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+643,109,U.S.A.,EAST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+794,975,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+892,820,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+728,123,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+744,135,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+678,535,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+768,971,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+234,166,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+333,814,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+968,557,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+119,820,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+469,486,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+261,429,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+984,65,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+845,977,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+374,410,U.S.A.,EAST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+687,150,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+157,630,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+49,488,U.S.A.,EAST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+817,112,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+223,598,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+433,705,U.S.A.,EAST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+41,226,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+396,979,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+131,19,U.S.A.,EAST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+521,204,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+751,805,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+45,549,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+144,912,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+119,427,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+728,1,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+120,540,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+657,940,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+409,644,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+881,821,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+113,560,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+831,309,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+129,1000,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+76,945,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+260,931,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+882,504,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+157,950,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+443,278,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+111,225,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+497,6,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+321,124,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+194,206,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+684,320,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+634,270,U.S.A.,EAST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+622,278,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+689,447,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+120,170,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+374,87,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+926,384,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+687,574,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+600,585,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+779,947,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+223,984,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+628,189,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+326,364,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+836,49,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+361,851,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+444,643,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+501,143,U.S.A.,EAST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+743,763,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+861,987,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+203,264,U.S.A.,EAST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+762,439,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+705,750,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+153,37,U.S.A.,EAST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+436,95,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+428,79,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+804,832,U.S.A.,EAST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+805,649,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+860,838,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+104,439,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+434,207,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+912,804,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+571,875,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+267,473,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+415,845,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+261,91,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+746,630,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+30,185,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+662,317,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+916,88,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+415,607,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+514,35,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+756,680,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+461,78,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+460,117,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+305,440,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+198,652,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+234,249,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+638,658,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+88,563,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+751,737,U.S.A.,EAST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+816,789,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+437,988,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+715,220,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+780,946,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+245,986,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+201,129,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+815,433,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+865,492,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+634,306,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+901,154,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+789,206,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+882,81,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+953,882,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+862,848,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+628,664,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+765,389,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+741,182,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+61,505,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+470,861,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+869,263,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+650,400,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+750,556,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+602,497,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+54,181,U.S.A.,EAST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+384,619,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+161,332,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+977,669,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+615,487,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+783,994,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+977,331,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+375,739,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+298,665,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+104,921,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+713,862,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+556,662,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+323,517,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+391,352,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+593,166,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+906,859,U.S.A.,EAST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+130,571,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+613,976,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+58,466,U.S.A.,EAST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+314,79,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+67,864,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+654,623,U.S.A.,EAST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+312,170,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+349,662,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+415,763,U.S.A.,EAST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
+404,896,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-01-01
+22,973,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-02-01
+744,161,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1993,1993-03-01
+804,934,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-04-01
+101,697,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-05-01
+293,116,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1993,1993-06-01
+266,84,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-07-01
+372,604,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-08-01
+38,371,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1993,1993-09-01
+385,783,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-10-01
+262,335,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-11-01
+961,321,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1993,1993-12-01
+831,177,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-01-01
+579,371,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-02-01
+301,583,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,1,1994,1994-03-01
+693,364,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-04-01
+895,343,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-05-01
+320,854,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,2,1994,1994-06-01
+284,691,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-07-01
+362,387,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-08-01
+132,298,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,3,1994,1994-09-01
+42,635,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-10-01
+118,81,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-11-01
+42,375,U.S.A.,WEST,EDUCATION,FURNITURE,SOFA,4,1994,1994-12-01
+18,846,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-01-01
+512,933,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-02-01
+337,237,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1993,1993-03-01
+167,964,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-04-01
+749,382,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-05-01
+890,610,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1993,1993-06-01
+910,148,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-07-01
+403,837,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-08-01
+403,85,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1993,1993-09-01
+661,425,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-10-01
+485,633,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-11-01
+789,515,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1993,1993-12-01
+415,512,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-01-01
+418,156,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-02-01
+163,464,U.S.A.,WEST,EDUCATION,FURNITURE,BED,1,1994,1994-03-01
+298,813,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-04-01
+584,455,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-05-01
+797,366,U.S.A.,WEST,EDUCATION,FURNITURE,BED,2,1994,1994-06-01
+767,734,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-07-01
+984,451,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-08-01
+388,134,U.S.A.,WEST,EDUCATION,FURNITURE,BED,3,1994,1994-09-01
+924,547,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-10-01
+566,802,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-11-01
+390,61,U.S.A.,WEST,EDUCATION,FURNITURE,BED,4,1994,1994-12-01
+608,556,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-01-01
+840,202,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-02-01
+112,964,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1993,1993-03-01
+288,112,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-04-01
+408,445,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-05-01
+876,884,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1993,1993-06-01
+224,348,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-07-01
+133,564,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-08-01
+662,568,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1993,1993-09-01
+68,882,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-10-01
+626,542,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-11-01
+678,119,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1993,1993-12-01
+361,248,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-01-01
+464,868,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-02-01
+681,841,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,1,1994,1994-03-01
+377,484,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-04-01
+222,986,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-05-01
+972,39,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,2,1994,1994-06-01
+56,930,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-07-01
+695,252,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-08-01
+908,794,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,3,1994,1994-09-01
+328,658,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-10-01
+891,139,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-11-01
+265,331,U.S.A.,WEST,EDUCATION,OFFICE,TABLE,4,1994,1994-12-01
+251,261,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-01-01
+783,122,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-02-01
+425,296,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1993,1993-03-01
+859,391,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-04-01
+314,75,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-05-01
+153,731,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1993,1993-06-01
+955,883,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-07-01
+654,707,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-08-01
+693,97,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1993,1993-09-01
+757,390,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-10-01
+221,237,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-11-01
+942,496,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1993,1993-12-01
+31,814,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-01-01
+540,765,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-02-01
+352,308,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,1,1994,1994-03-01
+904,327,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-04-01
+436,266,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-05-01
+281,699,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,2,1994,1994-06-01
+801,599,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-07-01
+273,950,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-08-01
+716,117,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,3,1994,1994-09-01
+902,632,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-10-01
+341,35,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-11-01
+155,562,U.S.A.,WEST,EDUCATION,OFFICE,CHAIR,4,1994,1994-12-01
+796,144,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-01-01
+257,142,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-02-01
+611,273,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1993,1993-03-01
+6,915,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-04-01
+125,920,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-05-01
+745,294,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1993,1993-06-01
+437,681,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-07-01
+906,86,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-08-01
+844,764,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1993,1993-09-01
+413,269,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-10-01
+869,138,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-11-01
+403,834,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1993,1993-12-01
+137,112,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-01-01
+922,921,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-02-01
+202,859,U.S.A.,WEST,EDUCATION,OFFICE,DESK,1,1994,1994-03-01
+955,442,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-04-01
+781,593,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-05-01
+12,346,U.S.A.,WEST,EDUCATION,OFFICE,DESK,2,1994,1994-06-01
+931,312,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-07-01
+95,690,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-08-01
+795,344,U.S.A.,WEST,EDUCATION,OFFICE,DESK,3,1994,1994-09-01
+542,784,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-10-01
+935,639,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-11-01
+269,726,U.S.A.,WEST,EDUCATION,OFFICE,DESK,4,1994,1994-12-01
+197,596,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-01-01
+828,263,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-02-01
+461,194,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1993,1993-03-01
+35,895,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-04-01
+88,502,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-05-01
+832,342,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1993,1993-06-01
+900,421,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-07-01
+368,901,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-08-01
+201,474,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1993,1993-09-01
+758,571,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-10-01
+504,511,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-11-01
+864,379,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1993,1993-12-01
+574,68,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-01-01
+61,210,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-02-01
+565,478,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,1,1994,1994-03-01
+475,296,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-04-01
+44,664,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-05-01
+145,880,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,2,1994,1994-06-01
+813,607,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-07-01
+703,97,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-08-01
+757,908,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,3,1994,1994-09-01
+96,152,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-10-01
+860,622,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-11-01
+750,309,U.S.A.,WEST,CONSUMER,FURNITURE,SOFA,4,1994,1994-12-01
+585,912,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-01-01
+127,429,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-02-01
+669,580,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1993,1993-03-01
+708,179,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-04-01
+830,119,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-05-01
+550,369,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1993,1993-06-01
+762,882,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-07-01
+468,727,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-08-01
+151,823,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1993,1993-09-01
+103,783,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-10-01
+876,884,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-11-01
+881,891,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1993,1993-12-01
+116,909,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-01-01
+677,765,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-02-01
+477,180,U.S.A.,WEST,CONSUMER,FURNITURE,BED,1,1994,1994-03-01
+154,712,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-04-01
+331,175,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-05-01
+784,869,U.S.A.,WEST,CONSUMER,FURNITURE,BED,2,1994,1994-06-01
+563,820,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-07-01
+229,554,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-08-01
+451,126,U.S.A.,WEST,CONSUMER,FURNITURE,BED,3,1994,1994-09-01
+974,760,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-10-01
+484,446,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-11-01
+69,254,U.S.A.,WEST,CONSUMER,FURNITURE,BED,4,1994,1994-12-01
+755,516,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-01-01
+331,779,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-02-01
+482,987,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1993,1993-03-01
+632,318,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-04-01
+750,427,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-05-01
+618,86,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1993,1993-06-01
+935,553,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-07-01
+716,315,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-08-01
+205,328,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1993,1993-09-01
+215,521,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-10-01
+871,156,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-11-01
+552,841,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1993,1993-12-01
+619,623,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-01-01
+701,849,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-02-01
+104,438,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,1,1994,1994-03-01
+114,719,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-04-01
+854,906,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-05-01
+563,267,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,2,1994,1994-06-01
+73,542,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-07-01
+427,552,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-08-01
+348,428,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,3,1994,1994-09-01
+148,158,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-10-01
+895,379,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-11-01
+394,142,U.S.A.,WEST,CONSUMER,OFFICE,TABLE,4,1994,1994-12-01
+792,588,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-01-01
+175,506,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-02-01
+208,382,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1993,1993-03-01
+354,132,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-04-01
+163,652,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-05-01
+336,723,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1993,1993-06-01
+804,682,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-07-01
+863,382,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-08-01
+326,125,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1993,1993-09-01
+568,321,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-10-01
+691,922,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-11-01
+152,884,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1993,1993-12-01
+565,38,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-01-01
+38,194,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-02-01
+185,996,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,1,1994,1994-03-01
+318,532,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-04-01
+960,391,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-05-01
+122,104,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,2,1994,1994-06-01
+400,22,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-07-01
+301,650,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-08-01
+909,143,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,3,1994,1994-09-01
+433,999,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-10-01
+508,415,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-11-01
+648,350,U.S.A.,WEST,CONSUMER,OFFICE,CHAIR,4,1994,1994-12-01
+793,342,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-01-01
+129,215,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-02-01
+481,52,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1993,1993-03-01
+406,292,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-04-01
+512,862,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-05-01
+668,309,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1993,1993-06-01
+551,886,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-07-01
+124,172,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-08-01
+655,912,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1993,1993-09-01
+523,666,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-10-01
+739,656,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-11-01
+87,145,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1993,1993-12-01
+890,664,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-01-01
+665,639,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-02-01
+329,707,U.S.A.,WEST,CONSUMER,OFFICE,DESK,1,1994,1994-03-01
+417,891,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-04-01
+828,466,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-05-01
+298,451,U.S.A.,WEST,CONSUMER,OFFICE,DESK,2,1994,1994-06-01
+356,451,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-07-01
+909,874,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-08-01
+251,805,U.S.A.,WEST,CONSUMER,OFFICE,DESK,3,1994,1994-09-01
+526,426,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-10-01
+652,932,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-11-01
+573,581,U.S.A.,WEST,CONSUMER,OFFICE,DESK,4,1994,1994-12-01
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 730bf94cb2987..c3fb85811ca2a 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -139,8 +139,8 @@ def test_productsales():
fname = os.path.join(dirpath, "productsales.sas7bdat")
df = pd.read_sas(fname, encoding='utf-8')
fname = os.path.join(dirpath, "productsales.csv")
- df0 = pd.read_csv(fname)
- vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR", "MONTH"]
+ df0 = pd.read_csv(fname, parse_dates=['MONTH'])
+ vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
df0[vn] = df0[vn].astype(np.float64)
tm.assert_frame_equal(df, df0)
@@ -163,3 +163,14 @@ def test_airline():
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0, check_exact=False)
+
+
+def test_date_time():
+ # Support of different SAS date/datetime formats (PR #15871)
+ dirpath = tm.get_data_path()
+ fname = os.path.join(dirpath, "datetime.sas7bdat")
+ df = pd.read_sas(fname)
+ fname = os.path.join(dirpath, "datetime.csv")
+ df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime',
+ 'DateTimeHi', 'Taiw'])
+ tm.assert_frame_equal(df, df0)
| updated p.r. #13089 after `origin` parameter was added by #15828
- [ v ] tests added / passed
- [ .. ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15871 | 2017-04-03T06:34:49Z | 2017-08-18T00:52:35Z | 2017-08-18T00:52:35Z | 2017-08-18T00:52:43Z |
COMPAT: NaT support tz_localize / tz_convert (#15830) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 230f39db67197..781a912555e14 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -837,6 +837,8 @@ Other API Changes
ignored (no longer needed to specify the new behaviour) and is deprecated.
- ``NaT`` will now correctly report ``False`` for datetimelike boolean operations such as ``is_month_start`` (:issue:`15781`)
- ``NaT`` will now correctly return ``np.nan`` for ``Timedelta`` and ``Period`` accessors such as ``days`` and ``quarter`` (:issue:`15782`)
+- ``NaT`` will now returns ``NaT`` for ``tz_localize`` and ``tz_convert``
+ methods (:issue:`15830`)
.. _whatsnew_0200.deprecations:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index d441f1ec4759b..5aa8e15d0d087 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -3835,7 +3835,8 @@ for field in fields:
# to the NaTType class; these can return NaT, np.nan
# or raise respectively
_nat_methods = ['date', 'now', 'replace', 'to_pydatetime',
- 'today', 'round', 'floor', 'ceil']
+ 'today', 'round', 'floor', 'ceil', 'tz_convert',
+ 'tz_localize']
_nan_methods = ['weekday', 'isoweekday', 'total_seconds']
_implemented_methods = ['to_datetime', 'to_datetime64', 'isoformat']
_implemented_methods.extend(_nat_methods)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index ce2ed237f5559..0695fe2243947 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -129,7 +129,8 @@ def test_NaT_methods():
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple']
- nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
+ nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
+ 'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
| closes #15830
* add tz_convert/tz_localize methods
* add tests
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
| https://api.github.com/repos/pandas-dev/pandas/pulls/15868 | 2017-04-02T09:17:48Z | 2017-04-02T21:57:01Z | 2017-04-02T21:57:01Z | 2017-04-02T21:57:07Z |
CLN: Remove "flake8: noqa" from more files | diff --git a/pandas/_version.py b/pandas/_version.py
index d764923fd7247..4695b512feff5 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -1,4 +1,3 @@
-
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -8,8 +7,6 @@
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
-# flake8: noqa
-
import errno
import os
import re
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index ed6006440441e..81e9b7c77a81b 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -1,10 +1,6 @@
-
-# flake8: noqa
-
import warnings
import operator
from itertools import product
-from distutils.version import LooseVersion
import pytest
@@ -28,12 +24,11 @@
import pandas.computation.expr as expr
import pandas.util.testing as tm
-import pandas._libs.lib as lib
from pandas.util.testing import (assert_frame_equal, randbool,
assertRaisesRegexp, assert_numpy_array_equal,
assert_produces_warning, assert_series_equal,
slow)
-from pandas.compat import PY3, u, reduce
+from pandas.compat import PY3, reduce
_series_frame_incompatible = _bool_ops_syms
_scalar_skip = 'in', 'not in'
@@ -43,9 +38,9 @@
pytest.mark.skipif(engine == 'numexpr' and not _USE_NUMEXPR,
reason='numexpr enabled->{enabled}, '
'installed->{installed}'.format(
- enabled=_USE_NUMEXPR,
- installed=_NUMEXPR_INSTALLED))(engine)
- for engine in _engines
+ enabled=_USE_NUMEXPR,
+ installed=_NUMEXPR_INSTALLED))(engine)
+ for engine in _engines # noqa
))
def engine(request):
return request.param
@@ -66,7 +61,8 @@ def _eval_single_bin(lhs, cmp1, rhs, engine):
try:
return c(lhs, rhs)
except ValueError as e:
- if str(e).startswith('negative number cannot be raised to a fractional power'):
+ if str(e).startswith('negative number cannot be '
+ 'raised to a fractional power'):
return np.nan
raise
return c(lhs, rhs)
@@ -74,14 +70,14 @@ def _eval_single_bin(lhs, cmp1, rhs, engine):
def _series_and_2d_ndarray(lhs, rhs):
return ((isinstance(lhs, Series) and
- isinstance(rhs, np.ndarray) and rhs.ndim > 1)
- or (isinstance(rhs, Series) and
- isinstance(lhs, np.ndarray) and lhs.ndim > 1))
+ isinstance(rhs, np.ndarray) and rhs.ndim > 1) or
+ (isinstance(rhs, Series) and
+ isinstance(lhs, np.ndarray) and lhs.ndim > 1))
def _series_and_frame(lhs, rhs):
- return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame))
- or (isinstance(rhs, Series) and isinstance(lhs, DataFrame)))
+ return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame)) or
+ (isinstance(rhs, Series) and isinstance(lhs, DataFrame)))
def _bool_and_frame(lhs, rhs):
@@ -228,19 +224,22 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
else:
lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)
rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)
- if (isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame)
- and binop in _series_frame_incompatible):
+ if (isinstance(lhs_new, Series) and
+ isinstance(rhs_new, DataFrame) and
+ binop in _series_frame_incompatible):
pass
# TODO: the code below should be added back when left and right
# hand side bool ops are fixed.
-
+ #
# try:
- # self.assertRaises(Exception, pd.eval, ex,
- #local_dict={'lhs': lhs, 'rhs': rhs},
- # engine=self.engine, parser=self.parser)
+ # self.assertRaises(Exception, pd.eval, ex,
+ # local_dict={'lhs': lhs, 'rhs': rhs},
+ # engine=self.engine, parser=self.parser)
# except AssertionError:
- #import ipdb; ipdb.set_trace()
- # raise
+ # import ipdb
+ #
+ # ipdb.set_trace()
+ # raise
else:
expected = _eval_single_bin(
lhs_new, binop, rhs_new, self.engine)
@@ -248,7 +247,6 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
self.check_equal(result, expected)
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
- skip_these = _scalar_skip
def check_operands(left, right, cmp_op):
return _eval_single_bin(left, cmp_op, right, self.engine)
@@ -334,7 +332,8 @@ def get_expected_pow_result(self, lhs, rhs):
try:
expected = _eval_single_bin(lhs, '**', rhs, self.engine)
except ValueError as e:
- if str(e).startswith('negative number cannot be raised to a fractional power'):
+ if str(e).startswith('negative number cannot be '
+ 'raised to a fractional power'):
if self.engine == 'python':
pytest.skip(str(e))
else:
@@ -650,7 +649,7 @@ def test_disallow_scalar_bool_ops(self):
exprs += '2 * x > 2 or 1 and 2',
exprs += '2 * df > 3 and 1 or a',
- x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))
+ x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2)) # noqa
for ex in exprs:
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
@@ -682,7 +681,7 @@ def test_identical(self):
tm.assert_numpy_array_equal(result, np.array([1.5]))
self.assertEqual(result.shape, (1, ))
- x = np.array([False])
+ x = np.array([False]) # noqa
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([False]))
self.assertEqual(result.shape, (1, ))
@@ -792,9 +791,8 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
f = lambda *args, **kwargs: np.random.randn()
-#-------------------------------------
-# typecasting rules consistency with python
-# issue #12388
+# -------------------------------------
+# gh-12388: Typecasting rules consistency with python
class TestTypeCasting(object):
@@ -817,8 +815,8 @@ def test_binop_typecasting(self, engine, parser, op, dt):
assert_frame_equal(res, eval(s))
-#-------------------------------------
-# basic and complex alignment
+# -------------------------------------
+# Basic and complex alignment
def _is_datetime(x):
return issubclass(x.dtype.type, np.datetime64)
@@ -1064,8 +1062,8 @@ def test_performance_warning_for_poor_alignment(self, engine, parser):
tm.assert_equal(msg, expected)
-#------------------------------------
-# slightly more complex ops
+# ------------------------------------
+# Slightly more complex ops
class TestOperationsNumExprPandas(tm.TestCase):
@@ -1156,7 +1154,7 @@ def test_single_variable(self):
def test_truediv(self):
s = np.array([1])
ex = 's / 1'
- d = {'s': s}
+ d = {'s': s} # noqa
if PY3:
res = self.eval(ex, truediv=False)
@@ -1204,7 +1202,7 @@ def test_truediv(self):
self.assertEqual(res, expec)
def test_failing_subscript_with_name_error(self):
- df = DataFrame(np.random.randn(5, 3))
+ df = DataFrame(np.random.randn(5, 3)) # noqa
with tm.assertRaises(NameError):
self.eval('df[x > 2] > 2')
@@ -1501,7 +1499,7 @@ def setUpClass(cls):
cls.arith_ops)
def test_check_many_exprs(self):
- a = 1
+ a = 1 # noqa
expr = ' * '.join('a' * 33)
expected = 1
res = pd.eval(expr, engine=self.engine, parser=self.parser)
@@ -1526,13 +1524,13 @@ def test_fails_not(self):
engine=self.engine)
def test_fails_ampersand(self):
- df = DataFrame(np.random.randn(5, 3))
+ df = DataFrame(np.random.randn(5, 3)) # noqa
ex = '(df + 2)[df > 1] > 0 & (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_fails_pipe(self):
- df = DataFrame(np.random.randn(5, 3))
+ df = DataFrame(np.random.randn(5, 3)) # noqa
ex = '(df + 2)[df > 1] > 0 | (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
@@ -1728,7 +1726,7 @@ def test_global_scope(self, engine, parser):
parser=parser))
def test_no_new_locals(self, engine, parser):
- x = 1
+ x = 1 # noqa
lcls = locals().copy()
pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser)
lcls2 = locals().copy()
@@ -1736,7 +1734,7 @@ def test_no_new_locals(self, engine, parser):
tm.assert_equal(lcls, lcls2)
def test_no_new_globals(self, engine, parser):
- x = 1
+ x = 1 # noqa
gbls = globals().copy()
pd.eval('x + 1', engine=engine, parser=parser)
gbls2 = globals().copy()
@@ -1787,15 +1785,16 @@ def test_name_error_exprs(engine, parser):
def test_invalid_local_variable_reference(engine, parser):
- a, b = 1, 2
+ a, b = 1, 2 # noqa
exprs = 'a + @b', '@a + b', '@a + @b'
- for expr in exprs:
+
+ for _expr in exprs:
if parser != 'pandas':
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is only"):
- pd.eval(exprs, engine=engine, parser=parser)
+ pd.eval(_expr, engine=engine, parser=parser)
else:
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is not"):
- pd.eval(exprs, engine=engine, parser=parser)
+ pd.eval(_expr, engine=engine, parser=parser)
def test_numexpr_builtin_raises(engine, parser):
@@ -1834,9 +1833,9 @@ def test_more_than_one_expression_raises(engine, parser):
def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
gen = {int: lambda: np.random.randint(10), float: np.random.randn}
- mid = gen[lhs]()
- lhs = gen[lhs]()
- rhs = gen[rhs]()
+ mid = gen[lhs]() # noqa
+ lhs = gen[lhs]() # noqa
+ rhs = gen[rhs]() # noqa
ex1 = 'lhs {0} mid {1} rhs'.format(cmp, cmp)
ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp, cmp)
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index ef1be7e60e0e8..0d75ba5f2bd46 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -1,8 +1,5 @@
# -*- coding: utf-8 -*-
-# TODO(wesm): fix long line flake8 issues
-# flake8: noqa
-
import pandas.util.testing as tm
from pandas.indexes.api import Index, CategoricalIndex
from .common import Base
@@ -215,7 +212,8 @@ def test_map(self):
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
- Index(np.array([1] * 5, dtype=np.int64), name='XXX'))
+ Index(np.array([1] * 5, dtype=np.int64),
+ name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
@@ -225,7 +223,8 @@ def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
- exp = pd.CategoricalIndex([10, 20, 10, 20, 30], categories=[20, 10, 30],
+ exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
+ categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
@@ -589,10 +588,10 @@ def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
- expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
- expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
# multiple lines
@@ -601,7 +600,7 @@ def test_string_categorical_index_repr(self):
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
- categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
+ categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -609,7 +608,7 @@ def test_string_categorical_index_repr(self):
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
- categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')"""
+ categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -619,7 +618,7 @@ def test_string_categorical_index_repr(self):
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
- categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)"""
+ categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -628,7 +627,7 @@ def test_string_categorical_index_repr(self):
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
- categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)"""
+ categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -637,23 +636,23 @@ def test_string_categorical_index_repr(self):
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
- categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')"""
+ categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
- categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')"""
+ categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
- expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
- expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
# multiple lines
@@ -662,7 +661,7 @@ def test_string_categorical_index_repr(self):
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -670,7 +669,7 @@ def test_string_categorical_index_repr(self):
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
- categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
+ categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -680,7 +679,7 @@ def test_string_categorical_index_repr(self):
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -689,7 +688,7 @@ def test_string_categorical_index_repr(self):
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
- categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)"""
+ categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -698,13 +697,13 @@ def test_string_categorical_index_repr(self):
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
- categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
+ categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
- categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')"""
+ categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -714,10 +713,10 @@ def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
- expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
- expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
+ expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
# multiple lines
@@ -727,7 +726,7 @@ def test_string_categorical_index_repr(self):
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -736,7 +735,7 @@ def test_string_categorical_index_repr(self):
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
- categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
+ categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -748,7 +747,7 @@ def test_string_categorical_index_repr(self):
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(repr(idx), expected)
else:
@@ -757,7 +756,7 @@ def test_string_categorical_index_repr(self):
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
- categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)"""
+ categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
self.assertEqual(unicode(idx), expected)
@@ -766,13 +765,13 @@ def test_string_categorical_index_repr(self):
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
- categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
+ categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
- categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')"""
+ categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(unicode(idx), expected)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index ea2697ec19df3..63c1ae70e35a6 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -10,7 +10,6 @@
from pandas.types.dtypes import CategoricalDtype
from pandas.types.common import (is_categorical_dtype,
- is_object_dtype,
is_float_dtype,
is_integer_dtype)
@@ -25,9 +24,6 @@
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
-# GH 12066
-# flake8: noqa
-
class TestCategorical(tm.TestCase):
@@ -291,7 +287,6 @@ def test_constructor_with_null(self):
pd.Categorical(DatetimeIndex(['nat', '20160101']),
categories=[NaT, Timestamp('20160101')])
-
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
@@ -710,8 +705,7 @@ def test_unicode_print(self):
self.assertEqual(_rep(c), expected)
- c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
- * 20)
+ c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
@@ -723,8 +717,7 @@ def test_unicode_print(self):
# the repr width
with option_context('display.unicode.east_asian_width', True):
- c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
- * 20)
+ c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
@@ -1279,7 +1272,8 @@ def test_mode(self):
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
- exp = Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1], ordered=True)
+ exp = Categorical([5, 4, 3, 2, 1],
+ categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
@@ -2233,7 +2227,7 @@ def test_categorical_repr_datetime_ordered(self):
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]"""
+ 2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2242,14 +2236,14 @@ def test_categorical_repr_period(self):
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2262,7 +2256,7 @@ def test_categorical_repr_period(self):
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2271,14 +2265,14 @@ def test_categorical_repr_period_ordered(self):
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2291,7 +2285,7 @@ def test_categorical_repr_period_ordered(self):
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2305,7 +2299,7 @@ def test_categorical_repr_timedelta(self):
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2315,7 +2309,7 @@ def test_categorical_repr_timedelta(self):
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]"""
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2324,7 +2318,7 @@ def test_categorical_repr_timedelta(self):
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]"""
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2332,13 +2326,13 @@ def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2348,7 +2342,7 @@ def test_categorical_repr_timedelta_ordered(self):
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]"""
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2357,7 +2351,7 @@ def test_categorical_repr_timedelta_ordered(self):
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]"""
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
self.assertEqual(repr(c), exp)
@@ -2423,7 +2417,7 @@ def test_categorical_series_repr_datetime(self):
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
- 2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2438,7 +2432,7 @@ def test_categorical_series_repr_datetime(self):
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]"""
+ 2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2452,7 +2446,7 @@ def test_categorical_series_repr_datetime_ordered(self):
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2467,7 +2461,7 @@ def test_categorical_series_repr_datetime_ordered(self):
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]"""
+ 2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2481,7 +2475,7 @@ def test_categorical_series_repr_period(self):
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2507,7 +2501,7 @@ def test_categorical_series_repr_period_ordered(self):
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]"""
+ 2011-01-01 13:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2551,7 +2545,7 @@ def test_categorical_series_repr_timedelta(self):
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
- 8 days 01:00:00, 9 days 01:00:00]"""
+ 8 days 01:00:00, 9 days 01:00:00]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2564,7 +2558,7 @@ def test_categorical_series_repr_timedelta_ordered(self):
3 4 days
4 5 days
dtype: category
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
self.assertEqual(repr(s), exp)
@@ -2583,26 +2577,26 @@ def test_categorical_series_repr_timedelta_ordered(self):
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
- 8 days 01:00:00 < 9 days 01:00:00]"""
+ 8 days 01:00:00 < 9 days 01:00:00]""" # noqa
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
@@ -2611,7 +2605,7 @@ def test_categorical_index_repr_datetime(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2621,7 +2615,7 @@ def test_categorical_index_repr_datetime(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2631,7 +2625,7 @@ def test_categorical_index_repr_datetime_ordered(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2641,7 +2635,7 @@ def test_categorical_index_repr_datetime_ordered(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2651,7 +2645,7 @@ def test_categorical_index_repr_datetime_ordered(self):
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2659,24 +2653,24 @@ def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2685,13 +2679,13 @@ def test_categorical_index_repr_period(self):
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
@@ -2699,19 +2693,19 @@ def test_categorical_index_repr_period_ordered(self):
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
@@ -2720,14 +2714,14 @@ def test_categorical_index_repr_timedelta(self):
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
@@ -2736,7 +2730,7 @@ def test_categorical_index_repr_timedelta_ordered(self):
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
self.assertEqual(repr(i), exp)
@@ -2833,7 +2827,8 @@ def test_mode(self):
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
- exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1], ordered=True))
+ exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1],
+ ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
@@ -4275,10 +4270,10 @@ def test_str_accessor_api_for_categorical(self):
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
- str_func_names = [f
- for f in dir(s.str)
- if not (f.startswith("_") or f in _special_func_names
- or f in _ignore_names)]
+ str_func_names = [f for f in dir(s.str) if not (
+ f.startswith("_") or
+ f in _special_func_names or
+ f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
@@ -4418,10 +4413,3 @@ def test_map(self):
self.assertIsInstance(res, tm.SubclassedCategorical)
exp = Categorical(['A', 'B', 'C'])
tm.assert_categorical_equal(res, exp)
-
- def test_map(self):
- sc = tm.SubclassedCategorical(['a', 'b', 'c'])
- res = sc.map(lambda x: x.upper())
- self.assertIsInstance(res, tm.SubclassedCategorical)
- exp = Categorical(['A', 'B', 'C'])
- tm.assert_categorical_equal(res, exp)
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index 7940efc7e1b59..955edce2591e6 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -4,8 +4,7 @@
import numpy as np
-from pandas.types.common import (_NS_DTYPE, _TD_DTYPE,
- is_period_arraylike,
+from pandas.types.common import (is_period_arraylike,
is_datetime_arraylike, is_integer_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_categorical_dtype,
@@ -13,7 +12,7 @@
from pandas.core.base import PandasDelegate, NoNewAttributesMixin
from pandas.tseries.index import DatetimeIndex
-from pandas._libs.period import IncompatibleFrequency # flake8: noqa
+from pandas._libs.period import IncompatibleFrequency # noqa
from pandas.tseries.period import PeriodIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.core.algorithms import take_1d
@@ -162,6 +161,7 @@ class DatetimeProperties(Properties):
def to_pydatetime(self):
return self.values.to_pydatetime()
+
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_ops,
@@ -201,6 +201,7 @@ def components(self):
"""
return self.values.components.set_index(self.index)
+
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_ops,
@@ -225,6 +226,7 @@ class PeriodProperties(Properties):
Raises TypeError if the Series does not contain datetimelike values.
"""
+
PeriodProperties._add_delegate_accessors(
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_ops,
| Another round of house-cleaning that builds off #15842.
xref <a href="https://github.com/pandas-dev/pandas/issues/12066#issuecomment-172285473">#12066 (comment)</a> : the issue remains unresolved, but it does not seem entirely necessary to disable style-checking on the entire file for that IMO. | https://api.github.com/repos/pandas-dev/pandas/pulls/15867 | 2017-04-02T09:04:51Z | 2017-04-02T22:01:42Z | null | 2017-04-03T00:13:21Z |
CLN: [WIP] trial pull-request to make sure everything is in order before proceeding (GH14468) | diff --git a/.travis.yml b/.travis.yml
index f0ece15de65db..9cfd937ac5f1e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -50,7 +50,7 @@ matrix:
- python-gtk2
- os: linux
env:
- - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true
+ - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true TYPING=true
addons:
apt:
packages:
@@ -116,6 +116,7 @@ script:
- ci/script_single.sh
- ci/script_multi.sh
- ci/lint.sh
+ - ci/typing.sh
- echo "script done"
after_success:
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 8cf6f2ce636da..c5480a2bc39e0 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -114,6 +114,10 @@ if [ "$LINT" ]; then
pip install cpplint
fi
+if [ "$TYPING" ]; then
+ pip install mypy-lang
+fi
+
if [ "$COVERAGE" ]; then
pip install coverage pytest-cov
fi
diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip
index eb796368e7820..0dedc5d7ade67 100644
--- a/ci/requirements-2.7.pip
+++ b/ci/requirements-2.7.pip
@@ -6,3 +6,4 @@ py
PyCrypto
mock
ipython
+typing
diff --git a/ci/typing.sh b/ci/typing.sh
new file mode 100755
index 0000000000000..1cb151a600d48
--- /dev/null
+++ b/ci/typing.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+echo "inside $0"
+
+source activate pandas
+
+RET=0
+
+echo "Typing *.py"
+mypy \
+ pandas/core/base.py
+if [ $? -ne "0" ]; then
+ RET=1
+fi
+echo "Typing *.py DONE"
+
+
+exit $RET
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index aacfe25b91564..f860e94d3513b 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -494,8 +494,8 @@ many errors as possible, but it may not correct *all* of them. Thus, it is
recommended that you run ``cpplint`` to double check and make any other style
fixes manually.
-Python (PEP8)
-~~~~~~~~~~~~~
+Python (PEP8 and mypy)
+~~~~~~~~~~~~~~~~~~~~~~
*pandas* uses the `PEP8 <http://www.python.org/dev/peps/pep-0008/>`_ standard.
There are several tools to ensure you abide by this standard. Here are *some* of
@@ -525,6 +525,16 @@ run this slightly modified command::
git diff master --name-only -- '*.py' | grep 'pandas/' | xargs flake8
+Pandas is gradually introducing static type annotations to the code base with
+`mypy <http://mypy.readthedocs.io/>`_. To run the checker, you'll need to install
+``mypy`` (``pip install mypy``) and run::
+
+ sh ci/typing.sh
+
+or on individual files with::
+
+ mypy path/to/module.py
+
Backwards Compatibility
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/core/base.py b/pandas/core/base.py
index a3ef24c80f883..2c40941b3b185 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -7,7 +7,9 @@
import numpy as np
from pandas.core.dtypes.missing import isnull
-from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
+from pandas.core.dtypes.generic import (
+ ABCDataFrame, ABCSeries, ABCIndexClass, ABCPandasObject,
+ ABCSelectionMixin, ABCIndexOpsMixin, ABCGroupbyMixin)
from pandas.core.dtypes.common import is_object_dtype, is_list_like, is_scalar
from pandas.util._validators import validate_bool_kwarg
@@ -19,9 +21,26 @@
deprecate_kwarg, Substitution)
from pandas.core.common import AbstractMethodError
-_shared_docs = dict()
-_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
- unique='IndexOpsMixin', duplicated='IndexOpsMixin')
+try:
+ from pandas.types.hinting import ( # noqa
+ typing,
+ Any, Callable, Text, Optional, Union,
+ Tuple, Dict,
+ ArrayLike, Scalar, PythonScalar, Buffer,
+ SelectionKey, SelectionFunction
+ )
+except ImportError:
+ pass
+
+MYPY = False
+if MYPY:
+ from pandas import Series, Index # noqa
+
+
+_shared_docs = dict() # type: Dict[str, str]
+_indexops_doc_kwargs = dict(
+ klass='IndexOpsMixin', inplace='',
+ unique='IndexOpsMixin', duplicated='IndexOpsMixin') # type: Dict[str, str]
class StringMixin(object):
@@ -37,21 +56,23 @@ class StringMixin(object):
# Formatting
def __unicode__(self):
+ # type: () -> Text
raise AbstractMethodError(self)
def __str__(self):
+ # type: () -> Text
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
-
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
+ # type: () -> bytes
"""
Return a string representation for a particular object.
@@ -64,6 +85,7 @@ def __bytes__(self):
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
+ # type: () -> str
"""
Return a string representation for a particular object.
@@ -72,16 +94,18 @@ def __repr__(self):
return str(self)
-class PandasObject(StringMixin):
+class PandasObject(StringMixin, ABCPandasObject):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
+ # type: () -> Any
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
+ # type: () -> Text
"""
Return a string representation for a particular object.
@@ -92,14 +116,17 @@ def __unicode__(self):
return object.__repr__(self)
def _dir_additions(self):
+ # type: () -> typing.Set[str]
""" add addtional __dir__ for this object """
return set()
def _dir_deletions(self):
+ # type: () -> typing.Set[str]
""" delete unwanted __dir__ for this object """
return set()
def __dir__(self):
+ # type: () -> typing.List[str]
"""
Provide method name lookup and completion
Only provide 'public' methods
@@ -109,6 +136,7 @@ def __dir__(self):
return sorted(rv)
def _reset_cache(self, key=None):
+ # type: (Optional[str]) -> None
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
@@ -120,11 +148,13 @@ def _reset_cache(self, key=None):
self._cache.pop(key, None)
def __sizeof__(self):
+ # type: () -> int
+ # might have to do these lower down...
"""
Generates the total memory usage for a object that returns
either a value or Series of values
"""
- if hasattr(self, 'memory_usage'):
+ if getattr(self, 'memory_usage', None) is not None:
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
@@ -147,11 +177,13 @@ class NoNewAttributesMixin(object):
"""
def _freeze(self):
+ # type: () -> None
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
+ # type: (Text, Any) -> None
# _cache is used by a decorator
# dict lookup instead of getattr as getattr is false for getter
# which error
@@ -166,18 +198,22 @@ class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
def _delegate_property_get(self, name, *args, **kwargs):
+ # type: (str, *Any, **Any) -> None
raise TypeError("You cannot access the "
"property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
+ # type: (str, Any, *Any, **Any) -> None
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
+ # type: (str, *Any, **Any) -> None
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
+ # type: (Any, Any, typing.List[str], str, bool) -> None
"""
add accessors to cls from the delegate class
@@ -192,11 +228,15 @@ def _add_delegate_accessors(cls, delegate, accessors, typ,
"""
def _create_delegator_property(name):
+ # type: (str) -> Any
+ # See https://github.com/python/mypy/issues/220? for properties
def _getter(self):
+ # type: () -> Any
return self._delegate_property_get(name)
def _setter(self, new_values):
+ # type: (Any) -> Any
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
@@ -206,8 +246,10 @@ def _setter(self, new_values):
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
+ # type: (str) -> Any
def f(self, *args, **kwargs):
+ # type: (*Any, **Any) -> Any
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
@@ -232,20 +274,24 @@ class AccessorProperty(object):
"""
def __init__(self, accessor_cls, construct_accessor):
+ # type: (Any, Any) -> None
self.accessor_cls = accessor_cls
self.construct_accessor = construct_accessor
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
+ # type: (Any, Optional[Any]) -> Any
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
+ # type: (Any, Any) -> None
raise AttributeError("can't set attribute")
def __delete__(self, instance):
+ # type: (Any, Any) -> None
raise AttributeError("can't delete attribute")
@@ -261,12 +307,11 @@ class SpecificationError(GroupByError):
pass
-class SelectionMixin(object):
+class SelectionMixin(ABCSelectionMixin):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
- _selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
@@ -292,6 +337,8 @@ class SelectionMixin(object):
@property
def _selection_name(self):
+ # type: () -> str
+ # TODO: can this be a list?
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
@@ -304,6 +351,7 @@ def _selection_name(self):
@property
def _selection_list(self):
+ # type: () -> typing.List[str]
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
@@ -311,6 +359,8 @@ def _selection_list(self):
@cache_readonly
def _selected_obj(self):
+ # type: () -> PandasObject
+ # TODO: should this be NDFrame?
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
@@ -319,10 +369,12 @@ def _selected_obj(self):
@cache_readonly
def ndim(self):
+ # type: () -> int
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
+ # type: () -> PandasObject
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
@@ -333,6 +385,8 @@ def _obj_with_exclusions(self):
return self.obj
def __getitem__(self, key):
+ # type: (SelectionKey) -> Any
+ # TODO: This could be a Groupby, _Window, anything else?
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
@@ -355,6 +409,8 @@ def __getitem__(self, key):
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
+ # type: (SelectionKey, int, Any) -> Any
+ # TODO: stricter subset
"""
sub-classes to define
return a sliced object
@@ -371,11 +427,13 @@ def _gotitem(self, key, ndim, subset=None):
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
+ # type: (SelectionFunction, *Any, **Any) -> Any
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
+ # type: (str, *Any, **Any) -> Any
"""
if arg is a string, then try to operate on it:
- try to find a function on ourselves
@@ -396,6 +454,7 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs):
raise ValueError("{} is an unknown string function".format(arg))
def _aggregate(self, arg, *args, **kwargs):
+ # type: (SelectionFunction, *Any, **Any) -> Tuple[Any, Optional[str]]
"""
provide an implementation for the aggregators
@@ -435,6 +494,7 @@ def _aggregate(self, arg, *args, **kwargs):
obj = self._selected_obj
def nested_renaming_depr(level=4):
+ # type: (int) -> None
# deprecation of nested renaming
# GH 15931
warnings.warn(
@@ -489,6 +549,7 @@ def nested_renaming_depr(level=4):
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
+ # type: (str, str, Optional[Any]) -> Any
"""
aggregate a 1-dim with how
"""
@@ -499,6 +560,7 @@ def _agg_1dim(name, how, subset=None):
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
+ # type: (str, str) -> Any
"""
aggregate a 2-dim with how
"""
@@ -507,6 +569,7 @@ def _agg_2dim(name, how):
return colg.aggregate(how, _level=None)
def _agg(arg, func):
+ # type: (Dict[str, str], Callable) -> compat.OrderedDict
"""
run the aggregations over the arg with func
return an OrderedDict
@@ -572,11 +635,13 @@ def _agg(arg, func):
# combine results
def is_any_series():
+ # type: () -> bool
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCSeries)
for r in compat.itervalues(result)])
def is_any_frame():
+ # type: () -> bool
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCDataFrame)
for r in compat.itervalues(result)])
@@ -609,7 +674,7 @@ def is_any_frame():
return result, True
# fall thru
- from pandas import DataFrame, Series
+ from pandas import DataFrame, Series # noqa
try:
result = DataFrame(result)
except ValueError:
@@ -635,6 +700,8 @@ def is_any_frame():
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
+ # type: (Any, Any, Any) -> Any
+ # TODO: typecheck
from pandas.core.reshape.concat import concat
if _axis != 0:
@@ -690,7 +757,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
- from pandas import Series
+ from pandas import Series # noqa
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
@@ -698,6 +765,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
+ # type: (PandasObject, Callable, **Any) -> PandasObject
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
@@ -711,10 +779,12 @@ def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
+ # type: (Callable) -> str
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
+ # type: (Callable) -> Callable
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
@@ -722,15 +792,17 @@ def _is_builtin_func(self, arg):
return self._builtin_table.get(arg, arg)
-class GroupByMixin(object):
+class GroupByMixin(ABCGroupbyMixin):
""" provide the groupby facilities to the mixed object """
@staticmethod
def _dispatch(name, *args, **kwargs):
+ # type: (str, *Any, **Any) -> Callable
""" dispatch to apply """
def outer(self, *args, **kwargs):
- def f(x):
+ # type: (*Any, **Any) -> Callable
+ def f(x): # type: (PandasObject) -> Any
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
@@ -738,6 +810,7 @@ def f(x):
return outer
def _gotitem(self, key, ndim, subset=None):
+ # type: (Union[str, list], int, Any) -> GroupByMixin
"""
sub-classes to define
return a sliced object
@@ -770,7 +843,7 @@ def _gotitem(self, key, ndim, subset=None):
return self
-class IndexOpsMixin(object):
+class IndexOpsMixin(ABCIndexOpsMixin):
""" common ops mixin to support a unified inteface / docs for Series /
Index
"""
@@ -779,6 +852,7 @@ class IndexOpsMixin(object):
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
+ # type: (*int, **int) -> IndexOpsMixin
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
@@ -788,17 +862,20 @@ def transpose(self, *args, **kwargs):
@property
def shape(self):
+ # type: () -> Tuple
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
+ # type: () -> int
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
+ # type: () -> PythonScalar
""" return the first element of the underlying data as a python
scalar
"""
@@ -811,36 +888,43 @@ def item(self):
@property
def data(self):
+ # type: () -> Buffer
""" return the data pointer of the underlying data """
return self.values.data
@property
def itemsize(self):
+ # type: () -> int
""" return the size of the dtype of the item of the underlying data """
return self._values.itemsize
@property
def nbytes(self):
+ # type: () -> int
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
+ # type: () -> Tuple[int]
""" return the strides of the underlying data """
return self._values.strides
@property
def size(self):
+ # type: () -> int
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
+ # type: () -> np.core.multiarray.flagsobj
""" return the ndarray.flags for the underlying data """
return self.values.flags
@property
def base(self):
+ # type: () -> Union[object, None]
""" return the base object if the memory of the underlying data is
shared
"""
@@ -848,18 +932,22 @@ def base(self):
@property
def _values(self):
+ # type: () -> np.ndarray
""" the internal implementation """
return self.values
@property
def empty(self):
+ # type: () -> bool
return not self.size
def max(self):
+ # type: () -> Scalar
""" The maximum value of the object """
return nanops.nanmax(self.values)
def argmax(self, axis=None):
+ # type: (int) -> np.ndarray
"""
return a ndarray of the maximum argument indexer
@@ -870,10 +958,12 @@ def argmax(self, axis=None):
return nanops.nanargmax(self.values)
def min(self):
+ # type: () -> Scalar
""" The minimum value of the object """
return nanops.nanmin(self.values)
def argmin(self, axis=None):
+ # type: (int) -> np.ndarray
"""
return a ndarray of the minimum argument indexer
@@ -885,11 +975,19 @@ def argmin(self, axis=None):
@cache_readonly
def hasnans(self):
+ # type: () -> bool
""" return if I have any nans; enables various perf speedups """
return isnull(self).any()
- def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
- filter_type=None, **kwds):
+ def _reduce(self,
+ op, # type: Callable
+ name, # type: str
+ axis=0, # type: int
+ skipna=True, # type: bool
+ numeric_only=None, # type: Optional[bool]
+ filter_type=None, # type: Optional[Any]
+ **kwds
+ ): # type: (...) -> Callable
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
@@ -897,8 +995,13 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
klass=self.__class__.__name__, op=name))
return func(**kwds)
- def value_counts(self, normalize=False, sort=True, ascending=False,
- bins=None, dropna=True):
+ def value_counts(self,
+ normalize=False, # type: bool
+ sort=True, # type: bool
+ ascending=False, # type: bool
+ bins=None, # type: Optional[int]
+ dropna=True # type: bool
+ ): # type: (...) -> 'Series'
"""
Returns object containing counts of unique values.
@@ -955,6 +1058,8 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
@Appender(_shared_docs['unique'] % _indexops_doc_kwargs)
def unique(self):
+ # type: () -> np.ndarray
+
values = self._values
if hasattr(values, 'unique'):
@@ -966,6 +1071,7 @@ def unique(self):
return result
def nunique(self, dropna=True):
+ # type: (bool) -> int
"""
Return number of unique elements in the object.
@@ -988,6 +1094,7 @@ def nunique(self, dropna=True):
@property
def is_unique(self):
+ # type: () -> bool
"""
Return boolean if values in the object are unique
@@ -999,6 +1106,7 @@ def is_unique(self):
@property
def is_monotonic(self):
+ # type: () -> bool
"""
Return boolean if values in the object are
monotonic_increasing
@@ -1009,13 +1117,14 @@ def is_monotonic(self):
-------
is_monotonic : boolean
"""
- from pandas import Index
+ from pandas import Index # noqa
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
+ # type: () -> bool
"""
Return boolean if values in the object are
monotonic_decreasing
@@ -1026,10 +1135,11 @@ def is_monotonic_decreasing(self):
-------
is_monotonic_decreasing : boolean
"""
- from pandas import Index
+ from pandas import Index # noqa
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
+ # type: (bool) -> int
"""
Memory usage of my values
@@ -1062,6 +1172,7 @@ def memory_usage(self, deep=False):
return v
def factorize(self, sort=False, na_sentinel=-1):
+ # type: (bool, int) -> Tuple[np.ndarray, 'Index']
"""
Encode the object as an enumerated type or categorical variable
@@ -1155,6 +1266,8 @@ def factorize(self, sort=False, na_sentinel=-1):
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
+ # type: (ArrayLike, str, Optional[ArrayLike]) -> np.ndarray
+
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
@@ -1177,6 +1290,8 @@ def searchsorted(self, value, side='left', sorter=None):
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
+ # type: (str, bool) -> IndexOpsMixin
+
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
@@ -1208,6 +1323,8 @@ def drop_duplicates(self, keep='first', inplace=False):
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
+ # type: (str) -> Union[np.ndarray, IndexOpsMixin]
+
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
@@ -1221,4 +1338,5 @@ def duplicated(self, keep='first'):
# abstracts
def _update_inplace(self, result, **kwargs):
+ # type: (Any, **Any) -> Any
raise AbstractMethodError(self)
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 90608c18ae503..c065ee3da6513 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -53,6 +53,19 @@ def _check(cls, inst):
("categorical"))
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
+# For mypy
+ABCPandasObject = create_pandas_abc_type("ABCPandasObject", "_typ", ("_cache"))
+ABCSelectionMixin = create_pandas_abc_type("ABCSelectionMixin", "_type",
+ ("_selection", "_selection_name",
+ "obj", "exclusions", "name",
+ "_constructor", "_attributes"))
+ABCGroupbyMixin = create_pandas_abc_type("ABCGroupbyMixin", "_type",
+ ("obj", "_attributes", "_groupby",
+ "_reset_cache"))
+ABCIndexOpsMixin = create_pandas_abc_type("ABCIndexOpsMixin", "_type",
+ ("values", "_shallow_copy",
+ "_constructor", "index"))
+
class _ABCGeneric(type):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6ec163bbaa73d..e6b3623dffd0c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -76,6 +76,7 @@
from pandas._libs import index as libindex, tslib as libts, lib, iNaT
from pandas.core.config import get_option
+
__all__ = ['Series']
_shared_doc_kwargs = dict(
diff --git a/pandas/types/hinting.py b/pandas/types/hinting.py
new file mode 100644
index 0000000000000..b3a6982de0acd
--- /dev/null
+++ b/pandas/types/hinting.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+
+import typing # noqa
+from typing import ( # noqa
+ TypeVar, AnyStr, Any, Callable, Optional, Tuple, Union,
+ Dict, Text, Iterable
+)
+
+from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
+
+Buffer = Any
+ArrayLike = TypeVar('ArrayLike', Buffer, list, dict, np.array)
+Scalar = TypeVar('Scalar', int, float)
+PythonScalar = TypeVar('PythonScalar', int, float, AnyStr)
+
+SelectionKey = Union[str, list, tuple, ABCSeries, ABCIndexClass, np.ndarray]
+
+# An argument to `.agg/.transform/.apply`
+SelectionFunction = Union[str, Callable]
diff --git a/setup.cfg b/setup.cfg
index 8b32f0f62fe28..58b4745e87f55 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,3 +27,9 @@ split_penalty_logical_operator = 30
testpaths = pandas
markers =
single: mark a test as single cpu only
+
+[mypy]
+disallow_untyped_defs = True
+python_version = 2.7
+ignore_missing_imports = True
+follow_imports = skip
\ No newline at end of file
| The most uncertain aspect is how to annotate "array-like." numpy doesn't even have a clear definition or recommendation. see the commit message for 60783c7 and numpy/numpy/issues/7370 for more details.
For now `pandas.types.hinting` are essentially just place holders for complex types that need to be defined.
Any feed back is appreciated | https://api.github.com/repos/pandas-dev/pandas/pulls/15866 | 2017-04-02T05:49:52Z | 2017-06-21T00:58:29Z | null | 2017-06-21T00:58:30Z |
BUG: Fix rollover handling in json encoding | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a34b9feb2b2fa..be9b52e1051f1 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -988,6 +988,7 @@ I/O
- Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`)
- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
+- Bug in ``pd.to_json()`` for the C engine where rollover was not correctly handled for case where frac is odd and diff is exactly 0.5 (:issue:`15716`, :issue:`15864`)
Plotting
^^^^^^^^
diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
index 5a15071938c1a..6bf2297749006 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
@@ -823,17 +823,19 @@ int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc,
if (diff > 0.5) {
++frac;
- /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */
- if (frac >= pow10) {
- frac = 0;
- ++whole;
- }
} else if (diff == 0.5 && ((frac == 0) || (frac & 1))) {
/* if halfway, round up if odd, OR
if last digit is 0. That last part is strange */
++frac;
}
+ // handle rollover, e.g.
+ // case 0.99 with prec 1 is 1.0 and case 0.95 with prec is 1.0 as well
+ if (frac >= pow10) {
+ frac = 0;
+ ++whole;
+ }
+
if (enc->doublePrecision == 0) {
diff = value - whole;
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 7dbcf25c60b45..8fc8ecbdf8abc 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -380,6 +380,31 @@ def test_frame_from_json_nones(self):
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(np.isnan(unser[2][0]))
+ def test_frame_to_json_float_precision(self):
+ df = pd.DataFrame([dict(a_float=0.95)])
+ encoded = df.to_json(double_precision=1)
+ self.assertEqual(encoded, '{"a_float":{"0":1.0}}')
+
+ df = pd.DataFrame([dict(a_float=1.95)])
+ encoded = df.to_json(double_precision=1)
+ self.assertEqual(encoded, '{"a_float":{"0":2.0}}')
+
+ df = pd.DataFrame([dict(a_float=-1.95)])
+ encoded = df.to_json(double_precision=1)
+ self.assertEqual(encoded, '{"a_float":{"0":-2.0}}')
+
+ df = pd.DataFrame([dict(a_float=0.995)])
+ encoded = df.to_json(double_precision=2)
+ self.assertEqual(encoded, '{"a_float":{"0":1.0}}')
+
+ df = pd.DataFrame([dict(a_float=0.9995)])
+ encoded = df.to_json(double_precision=3)
+ self.assertEqual(encoded, '{"a_float":{"0":1.0}}')
+
+ df = pd.DataFrame([dict(a_float=0.99999999999999944)])
+ encoded = df.to_json(double_precision=15)
+ self.assertEqual(encoded, '{"a_float":{"0":1.0}}')
+
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
self.assertRaises(ValueError, df.to_json, orient="garbage")
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index e66721beed288..c2cbbe1ca65ab 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -43,6 +43,48 @@ def test_encodeDecimal(self):
decoded = ujson.decode(encoded)
self.assertEqual(decoded, 1337.1337)
+ sut = decimal.Decimal("0.95")
+ encoded = ujson.encode(sut, double_precision=1)
+ self.assertEqual(encoded, "1.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 1.0)
+
+ sut = decimal.Decimal("0.94")
+ encoded = ujson.encode(sut, double_precision=1)
+ self.assertEqual(encoded, "0.9")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 0.9)
+
+ sut = decimal.Decimal("1.95")
+ encoded = ujson.encode(sut, double_precision=1)
+ self.assertEqual(encoded, "2.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 2.0)
+
+ sut = decimal.Decimal("-1.95")
+ encoded = ujson.encode(sut, double_precision=1)
+ self.assertEqual(encoded, "-2.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, -2.0)
+
+ sut = decimal.Decimal("0.995")
+ encoded = ujson.encode(sut, double_precision=2)
+ self.assertEqual(encoded, "1.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 1.0)
+
+ sut = decimal.Decimal("0.9995")
+ encoded = ujson.encode(sut, double_precision=3)
+ self.assertEqual(encoded, "1.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 1.0)
+
+ sut = decimal.Decimal("0.99999999999999944")
+ encoded = ujson.encode(sut, double_precision=15)
+ self.assertEqual(encoded, "1.0")
+ decoded = ujson.decode(encoded)
+ self.assertEqual(decoded, 1.0)
+
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
| This is a fix attempt for issue #15716 as well as #15864.
Note that whenever the frac is incremented, there is a chance that its
value may hit the value of pow10. | https://api.github.com/repos/pandas-dev/pandas/pulls/15865 | 2017-04-02T05:04:30Z | 2017-04-03T12:43:26Z | null | 2017-04-06T15:02:49Z |
BUG: Check integrity of sparse int indices | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a34b9feb2b2fa..e397365dc8d50 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -944,6 +944,7 @@ Indexing
^^^^^^^^
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
+- Bug in sparse array indexing in which indices were not being validated (:issue:`15863`)
- Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`)
- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`)
- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`, :issue:`15424`)
diff --git a/pandas/sparse/sparse.pyx b/pandas/sparse/sparse.pyx
index 00d317c42b18d..0c2e056ead7fa 100644
--- a/pandas/sparse/sparse.pyx
+++ b/pandas/sparse/sparse.pyx
@@ -34,8 +34,9 @@ cdef inline int int_min(int a, int b): return a if a <= b else b
cdef class SparseIndex:
"""
- Abstract superclass for sparse index types
+ Abstract superclass for sparse index types.
"""
+
def __init__(self):
raise NotImplementedError
@@ -48,8 +49,9 @@ cdef class IntIndex(SparseIndex):
----------
length : integer
indices : array-like
- Contains integers corresponding to
+ Contains integers corresponding to the indices.
"""
+
cdef readonly:
Py_ssize_t length, npoints
ndarray indices
@@ -59,9 +61,11 @@ cdef class IntIndex(SparseIndex):
self.indices = np.ascontiguousarray(indices, dtype=np.int32)
self.npoints = len(self.indices)
+ self.check_integrity()
+
def __reduce__(self):
args = (self.length, self.indices)
- return (IntIndex, args)
+ return IntIndex, args
def __repr__(self):
output = 'IntIndex\n'
@@ -70,10 +74,40 @@ cdef class IntIndex(SparseIndex):
def check_integrity(self):
"""
- Only need be strictly ascending and nothing less than 0 or greater than
- total length
+ Checks the following:
+
+ - Indices are strictly ascending
+ - Number of indices is at most self.length
+ - Indices are at least 0 and at most the total length less one
+
+ A ValueError is raised if any of these conditions is violated.
"""
- pass
+
+ cdef:
+ int32_t index, prev = -1
+
+ if self.npoints > self.length:
+ msg = ("Too many indices. Expected "
+ "{exp} but found {act}").format(
+ exp=self.length, act=self.npoints)
+ raise ValueError(msg)
+
+ # Indices are vacuously ordered and non-negative
+ # if the sequence of indices is empty.
+ if self.npoints == 0:
+ return
+
+ if min(self.indices) < 0:
+ raise ValueError("No index can be less than zero")
+
+ if max(self.indices) >= self.length:
+ raise ValueError("All indices must be less than the length")
+
+ for index in self.indices:
+ if prev != -1 and index <= prev:
+ raise ValueError("Indices must be strictly increasing")
+
+ prev = index
def equals(self, other):
if not isinstance(other, IntIndex):
@@ -320,7 +354,7 @@ cdef class BlockIndex(SparseIndex):
def __reduce__(self):
args = (self.length, self.blocs, self.blengths)
- return (BlockIndex, args)
+ return BlockIndex, args
def __repr__(self):
output = 'BlockIndex\n'
diff --git a/pandas/tests/sparse/test_libsparse.py b/pandas/tests/sparse/test_libsparse.py
index b6ab99dc66cda..696d2cf47f4c0 100644
--- a/pandas/tests/sparse/test_libsparse.py
+++ b/pandas/tests/sparse/test_libsparse.py
@@ -474,6 +474,44 @@ def test_to_block_index(self):
class TestIntIndex(tm.TestCase):
+ def test_check_integrity(self):
+
+ # Too many indices than specified in self.length
+ msg = "Too many indices"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=1, indices=[1, 2, 3])
+
+ # No index can be negative.
+ msg = "No index can be less than zero"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, -2, 3])
+
+ # No index can be negative.
+ msg = "No index can be less than zero"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, -2, 3])
+
+ # All indices must be less than the length.
+ msg = "All indices must be less than the length"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, 2, 5])
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, 2, 6])
+
+ # Indices must be strictly ascending.
+ msg = "Indices must be strictly increasing"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, 3, 2])
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ IntIndex(length=5, indices=[1, 3, 3])
+
def test_int_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer')
self.assertIsInstance(idx, IntIndex)
| The `check_integrity` method of `IntIndex` in `pandas.sparse` was un-implemented despite having documentation. This PR implements the method and calls it when initializing `IntIndex`.
xref <a href="https://github.com/pandas-dev/pandas/pull/15844#discussion_r108840154">#15844 (comment)</a> | https://api.github.com/repos/pandas-dev/pandas/pulls/15863 | 2017-04-02T03:21:19Z | 2017-04-02T14:21:39Z | null | 2017-04-02T21:17:37Z |
API/BUG: Handling Dtype Coercions in Series/Index (GH 15832) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index a6b6d704737bd..7b8312d25641d 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -54,7 +54,7 @@ Backwards incompatible API changes
Other API Changes
^^^^^^^^^^^^^^^^^
-
+- Series and Index constructors now raises when data is incompatible with a passed dtype= kwarg (:issue:`15832`)
- Moved definition of ``MergeError`` to the ``pandas.errors`` module.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index fd61813a57c98..56235a99d5f02 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -16,6 +16,7 @@
is_timedelta64_dtype, is_dtype_equal,
is_float_dtype, is_complex_dtype,
is_integer_dtype,
+ is_unsigned_integer_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype, is_scalar,
_string_dtypes,
@@ -1026,3 +1027,56 @@ def find_common_type(types):
return np.object
return np.find_common_type(types, [])
+
+
+def maybe_cast_to_integer_array(arr, dtype, copy=False):
+ """
+ Takes any dtype and returns the casted version, raising for when data is
+ incompatible with integer/unsigned integer dtypes.
+
+ .. versionadded:: 0.21.0
+
+ Parameters
+ ----------
+ arr : ndarray
+ dtype : np.dtype
+ copy: boolean, default False
+
+ Returns
+ -------
+ integer or unsigned integer array
+
+ Raises
+ ------
+ OverflowError
+ * If ``dtype`` is incompatible
+ ValueError
+ * If coercion from float to integer loses precision
+
+ Examples
+ --------
+ If you try to coerce negative values to unsigned integers, it raises:
+
+ >>> Series([-1], dtype='uint64')
+ Traceback (most recent call last):
+ ...
+ OverflowError: Trying to coerce negative values to unsigned integers
+
+ Also, if you try to coerce float values to integers, it raises:
+ >>> Series([1, 2, 3.5], dtype='int64')
+ Traceback (most recent call last):
+ ...
+ ValueError: Trying to coerce float values to integers
+
+ """
+ casted = arr.astype(dtype, copy=copy)
+ if np.array(arr == casted).all():
+ return casted
+
+ if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
+ raise OverflowError("Trying to coerce negative values to unsigned "
+ "integers")
+
+ if is_integer_dtype(dtype) and (is_float_dtype(arr) or
+ is_object_dtype(arr)):
+ raise ValueError("Trying to coerce float values to integers")
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2af4f112ca941..0c69508146e1a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -15,6 +15,7 @@
from pandas.core.dtypes.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex
from pandas.core.dtypes.missing import isnull, array_equivalent
+from pandas.core.dtypes.cast import maybe_cast_to_integer_array
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
@@ -212,11 +213,14 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
- data = np.array(data, copy=copy, dtype=dtype)
+ data = maybe_cast_to_integer_array(data, dtype,
+ copy=copy)
elif inferred in ['floating', 'mixed-integer-float']:
if isnull(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
+ if inferred == 'mixed-integer-float':
+ maybe_cast_to_integer_array(data, dtype)
# If we are actually all equal to integers,
# then coerce to integer.
@@ -246,7 +250,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
except (TypeError, ValueError) as e:
msg = str(e)
- if 'cannot convert float' in msg:
+ if ('cannot convert float' in msg or
+ 'Trying to coerce float values to integer') in msg:
raise
# maybe coerce to a sub-class
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 129f291e5f843..9d16619c47f3a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -35,7 +35,8 @@
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_convert_platform,
- maybe_cast_to_datetime, maybe_castable)
+ maybe_cast_to_datetime, maybe_castable,
+ maybe_cast_to_integer_array)
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.common import (is_bool_indexer,
@@ -2941,9 +2942,13 @@ def _try_cast(arr, take_fast_path):
return arr
try:
+ if is_float_dtype(dtype) or is_integer_dtype(dtype):
+ subarr = maybe_cast_to_integer_array(np.asarray(arr), dtype)
+
subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
+
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 3d06f1672ae32..16db27cd7e2d2 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -304,6 +304,20 @@ def test_astype(self):
i = Float64Index([0, 1.1, np.NAN])
pytest.raises(ValueError, lambda: i.astype(dtype))
+ @pytest.mark.parametrize("int_dtype", ['uint8', 'uint16', 'uint32',
+ 'uint64', 'int32', 'int64', 'int16',
+ 'int8'])
+ @pytest.mark.parametrize("float_dtype", ['float16', 'float32'])
+ def test_type_coercion(self, int_dtype, float_dtype):
+
+ # GH 15832
+ msg = 'Trying to coerce float values to integers'
+ with tm.assert_raises_regex(ValueError, msg):
+ Index([1, 2, 3.5], dtype=int_dtype)
+
+ i = Index([1, 2, 3.5], dtype=float_dtype)
+ tm.assert_index_equal(i, Index([1, 2, 3.5]))
+
def test_equals_numeric(self):
i = Float64Index([1.0, 2.0])
@@ -678,6 +692,13 @@ def test_constructor_corner(self):
with tm.assert_raises_regex(TypeError, 'casting'):
Int64Index(arr_with_floats)
+ @pytest.mark.parametrize("uints", ['uint8', 'uint16', 'uint32', 'uint64'])
+ def test_constructor_overflow_coercion_signed_to_unsigned(self, uints):
+ # GH 15832
+ msg = 'Trying to coerce negative values to unsigned integers'
+ with tm.assert_raises_regex(OverflowError, msg):
+ Index([-1], dtype=uints)
+
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 17f524cc279c0..5d5aefa3fad37 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -2080,7 +2080,7 @@ def test_table_values_dtypes_roundtrip(self):
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
- df1 = DataFrame(dict([(c, Series(np.random.randn(5), dtype=c))
+ df1 = DataFrame(dict([(c, Series(np.random.randn(5).astype(c)))
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']]))
df1['string'] = 'foo'
@@ -2094,7 +2094,8 @@ def test_table_values_dtypes_roundtrip(self):
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
- 'int64': 1, 'object': 1, 'datetime64[ns]': 2})
+ 'int64': 1, 'object': 1,
+ 'datetime64[ns]': 2})
result = result.sort_index()
result = expected.sort_index()
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index d591aa4f567a9..691cb60a102f4 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1,30 +1,26 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime, timedelta
-from numpy import nan
import numpy as np
import numpy.ma as ma
import pandas as pd
-
-from pandas.core.dtypes.common import (
- is_categorical_dtype,
- is_datetime64tz_dtype)
+import pytest
+from numpy import nan
from pandas import (Index, Series, isnull, date_range,
NaT, period_range, MultiIndex, IntervalIndex)
-from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex
+from pandas import compat
+from pandas.compat import lrange, range, zip, OrderedDict, long
+import pandas.util.testing as tm
from pandas._libs import lib
from pandas._libs.tslib import iNaT
-
-from pandas.compat import lrange, range, zip, OrderedDict, long
-from pandas import compat
+from pandas.core.dtypes.common import (
+ is_categorical_dtype,
+ is_datetime64tz_dtype)
+from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex
from pandas.util.testing import assert_series_equal
-import pandas.util.testing as tm
-
from .common import TestData
@@ -301,12 +297,35 @@ def test_constructor_pass_nan_nat(self):
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
- pytest.raises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
+ msg = "could not convert string to float"
+ with tm.assert_raises_regex(ValueError, msg):
+ Series(['a', 'b', 'c'], dtype=float)
+
+ @pytest.mark.parametrize("unsigned_integers", ['uint8', 'uint16', 'uint32',
+ 'uint64'])
+ def test_constructor_unsigned_dtype_overflow(self, unsigned_integers):
+ # GH 15832
+ msg = 'Trying to coerce negative values to unsigned integers'
+ with tm.assert_raises_regex(OverflowError, msg):
+ Series([-1], dtype=unsigned_integers)
+
+ @pytest.mark.parametrize("integers", ['uint8', 'uint16', 'uint32',
+ 'uint64', 'int32', 'int64', 'int16',
+ 'int8'])
+ @pytest.mark.parametrize("floats", ['float16', 'float32'])
+ def test_constructor_coerce_float_fail(self, integers, floats):
+ # GH 15832
+ msg = 'Trying to coerce float values to integers'
+ with tm.assert_raises_regex(ValueError, msg):
+ Series([1, 2, 3.5], dtype=integers)
+
+ s = Series([1, 2, 3.5], dtype=floats)
+ expected = Series([1, 2, 3.5]).astype(floats)
+ assert_series_equal(s, expected)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
-
s2 = Series(s, dtype=np.int64)
s2[1] = 5
| - [x] closes #15832
- [x] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
Hey @jreback , need a quick help here. I was able to raise an overflow exception (case II), but I wasn't able to raise it on case III. Can you help? Thanks | https://api.github.com/repos/pandas-dev/pandas/pulls/15859 | 2017-04-01T15:18:32Z | 2017-08-17T10:34:25Z | null | 2017-08-17T10:34:25Z |
TST: test 3.4 on windows | diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 26a68b8a9ae3a..f2c0b18d35131 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -401,7 +401,8 @@ static void *PyStringToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
size_t *_outLen) {
- PyObject *obj = (PyObject *)_obj;
+ PyObject *obj, *newObj;
+ obj = (PyObject *)_obj;
#if (PY_VERSION_HEX >= 0x03030000)
if (PyUnicode_IS_COMPACT_ASCII(obj)) {
@@ -412,8 +413,8 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
}
#endif
- PyObject *newObj = PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(obj),
- PyUnicode_GET_SIZE(obj), NULL);
+ newObj = PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(obj),
+ PyUnicode_GET_SIZE(obj), NULL);
GET_TC(tc)->newObj = newObj;
| BLD: bug in building json compiled code | https://api.github.com/repos/pandas-dev/pandas/pulls/15857 | 2017-03-31T22:22:58Z | 2017-04-01T15:37:17Z | 2017-04-01T15:37:17Z | 2017-04-01T15:38:14Z |
ENH: Citing source in README file | diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt
index e2f6ec042e9cc..d32fe5bcd05a6 100644
--- a/doc/cheatsheet/README.txt
+++ b/doc/cheatsheet/README.txt
@@ -2,3 +2,7 @@ The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013.
To create the PDF version, within Powerpoint, simply do a "Save As"
and pick "PDF' as the format.
+This cheat sheet was inspired by the RstudioData Wrangling Cheatsheet[1], written by Irv Lustig, Princeton Consultants[2].
+
+[1]: https://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf
+[2]: http://www.princetonoptimization.com/
| For GH users who strictly or heavily use the web-view instead of a local Git, having a direct link is handy, as it does not require downloading the PDF _if_ the user wanted to go to the source of it directly. It's an alternative that allows those interested in more uploads similar to this PDF from the same author(s).
- [ ] closes: N/A
- [ ] tests added / passed: N/A
- [ ] whatsnew: Added source of PDF to README file for the cheatsheet
| https://api.github.com/repos/pandas-dev/pandas/pulls/15856 | 2017-03-31T20:41:11Z | 2017-04-04T22:32:47Z | 2017-04-04T22:32:47Z | 2017-04-04T22:32:52Z |
Only call validation functions when args/kwargs are passed | diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 4053994efa005..f448a9aad04c6 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -37,23 +37,24 @@ def __init__(self, defaults, fname=None, method=None,
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
- fname = self.fname if fname is None else fname
- max_fname_arg_count = (self.max_fname_arg_count if
- max_fname_arg_count is None
- else max_fname_arg_count)
- method = self.method if method is None else method
-
- if method == 'args':
- validate_args(fname, args, max_fname_arg_count, self.defaults)
- elif method == 'kwargs':
- validate_kwargs(fname, kwargs, self.defaults)
- elif method == 'both':
- validate_args_and_kwargs(fname, args, kwargs,
- max_fname_arg_count,
- self.defaults)
- else:
- raise ValueError("invalid validation method "
- "'{method}'".format(method=method))
+ if args or kwargs:
+ fname = self.fname if fname is None else fname
+ max_fname_arg_count = (self.max_fname_arg_count if
+ max_fname_arg_count is None
+ else max_fname_arg_count)
+ method = self.method if method is None else method
+
+ if method == 'args':
+ validate_args(fname, args, max_fname_arg_count, self.defaults)
+ elif method == 'kwargs':
+ validate_kwargs(fname, kwargs, self.defaults)
+ elif method == 'both':
+ validate_args_and_kwargs(fname, args, kwargs,
+ max_fname_arg_count,
+ self.defaults)
+ else:
+ raise ValueError("invalid validation method "
+ "'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
| cc @gfyoung | https://api.github.com/repos/pandas-dev/pandas/pulls/15850 | 2017-03-30T20:50:23Z | 2017-03-31T06:40:59Z | 2017-03-31T06:40:59Z | 2017-10-16T09:46:05Z |
TST: incorrect localization in append testing | diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index fd5421abc89ad..5584c1ac6a239 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -83,9 +83,9 @@ def test_append_index(self):
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
- expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
- (1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
- (1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
+ expected_tuples = [(1.1, tz.localize(datetime.datetime(2011, 1, 1))),
+ (1.2, tz.localize(datetime.datetime(2011, 1, 2))),
+ (1.3, tz.localize(datetime.datetime(2011, 1, 3)))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
tm.assert_index_equal(result, expected)
@@ -103,9 +103,9 @@ def test_append_index(self):
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
- np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
- (1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
- (1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')] +
+ np.array([(1.1, tz.localize(datetime.datetime(2011, 1, 1)), 'A'),
+ (1.2, tz.localize(datetime.datetime(2011, 1, 2)), 'B'),
+ (1.3, tz.localize(datetime.datetime(2011, 1, 3)), 'C')] +
expected_tuples), None)
tm.assert_index_equal(result, expected)
| and when ``pytz`` version changes our tests break because of this incorrect (old) method, which works when you *dont'* have a tz change, but fails when the tz's actually change. | https://api.github.com/repos/pandas-dev/pandas/pulls/15849 | 2017-03-30T20:29:24Z | 2017-03-30T20:42:50Z | null | 2017-03-30T20:42:50Z |
COMPAT: add 0.19.2 msgpack/pickle files | diff --git a/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_2.7.12.msgpack b/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_2.7.12.msgpack
new file mode 100644
index 0000000000000..f2dc38766025e
Binary files /dev/null and b/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_2.7.12.msgpack differ
diff --git a/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_3.6.1.msgpack b/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_3.6.1.msgpack
new file mode 100644
index 0000000000000..4137629f53cf2
Binary files /dev/null and b/pandas/tests/io/data/legacy_msgpack/0.19.2/0.19.2_x86_64_darwin_3.6.1.msgpack differ
diff --git a/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.12.pickle b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.12.pickle
new file mode 100644
index 0000000000000..d702ab444df62
Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.12.pickle differ
diff --git a/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_3.6.1.pickle b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_3.6.1.pickle
new file mode 100644
index 0000000000000..6bb02672a4151
Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_3.6.1.pickle differ
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index fad6237d851fb..f46f62e781006 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -207,7 +207,8 @@ def test_pickles(current_pickle_data, version):
if data is None:
continue
n += 1
- assert n > 0, 'Pickle files are not tested'
+ assert n > 0, ('Pickle files are not '
+ 'tested: {version}'.format(version=version))
def test_round_trip_current(current_pickle_data):
| https://api.github.com/repos/pandas-dev/pandas/pulls/15848 | 2017-03-30T14:55:43Z | 2017-03-30T21:02:15Z | 2017-03-30T21:02:15Z | 2017-03-30T21:02:15Z | |
CLN: Fix a typo in comment | diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h
index d0588348baa44..4f51fa8b3eb38 100644
--- a/pandas/_libs/src/ujson/lib/ultrajson.h
+++ b/pandas/_libs/src/ujson/lib/ultrajson.h
@@ -233,7 +233,7 @@ typedef struct __JSONObjectEncoder {
int recursionMax;
/*
- Configuration for max decimals of double floating poiunt numbers to encode (0-9) */
+ Configuration for max decimals of double floating point numbers to encode (0-9) */
int doublePrecision;
/*
| https://api.github.com/repos/pandas-dev/pandas/pulls/15847 | 2017-03-30T13:18:03Z | 2017-03-30T13:20:52Z | 2017-03-30T13:20:52Z | 2017-03-30T13:39:26Z | |
DOC: prettify bug fixes section | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 1ecdd6dd8fbef..399f91fc60810 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -913,147 +913,141 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+Conversion
+^^^^^^^^^^
+
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
+- Bug in ``Timestamp.replace`` with compat for passing long integers (:issue:`15030`)
- Bug in ``Timestamp`` returning UTC based time/date attributes when a timezone was provided (:issue:`13303`)
-- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
+- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
- Bug in ``DatetimeIndex.round()`` and ``Timestamp.round()`` floating point accuracy when rounding by milliseconds or less (:issue:`14440`, :issue:`15578`)
- Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`)
- Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`)
- Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`)
-- Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`)
-- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`)
-- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`)
- Cleaned up ``PeriodIndex`` constructor, including raising on floats more consistently (:issue:`13277`)
-- Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`)
-- Bug in ``pd.read_fwf()`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
-- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`)
-- Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`)
-- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
-- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
-- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
- Bug in using ``__deepcopy__`` on empty NDFrame objects (:issue:`15370`)
-- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`, :issue:`15424`)
-- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a numpy array (:issue:`15434`)
-- Bug in ``Rolling.quantile`` function that caused a segmentation fault when called with a quantile value outside of the range [0, 1] (:issue:`15463`)
-- Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`)
-- Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`)
-- Compat with SciPy 0.19.0 for testing on ``.interpolate()`` (:issue:`15662`)
-- Bug in ``Series.asof`` which raised if the series contained all ``np.nan`` (:issue:`15713`)
-
-- Compat for 32-bit platforms for ``.qcut/cut``; bins will now be ``int64`` dtype (:issue:`14866`)
-
-- Properly set ``__name__`` and ``__qualname__`` for ``Groupby.*`` functions (:issue:`14620`)
-- Bug in ``.at`` when selecting from a tz-aware column (:issue:`15822`)
-- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
- Bug in ``Series.replace`` which replaced a numeric by string (:issue:`15743`)
-
+- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
+- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
+- Bug in ``Series.dt.round()`` inconsistent behaviour on ``NaT`` 's with different arguments (:issue:`14940`)
+- Bug in ``Series`` constructor when both ``copy=True`` and ``dtype`` arguments are provided (:issue:`15125`)
+- Incorrect dtyped ``Series`` was returned by comparison methods (e.g., ``lt``, ``gt``, ...) against a constant for an empty ``DataFrame`` (:issue:`15077`)
+- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
+- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
-- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
-- Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`)
-- Bug in interactions with ``Qt`` when a ``QtApplication`` already exists (:issue:`14372`)
-- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
+Indexing
+^^^^^^^^
+- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
+- Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`)
+- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`)
+- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`, :issue:`15424`)
+- Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a numpy array (:issue:`15434`)
+- Bug in ``Series.asof`` which raised if the series contained all ``np.nan`` (:issue:`15713`)
+- Bug in ``.at`` when selecting from a tz-aware column (:issue:`15822`)
- Bug in ``Series.where()`` and ``DataFrame.where()`` where array-like conditionals were being rejected (:issue:`15414`)
- Bug in ``Series.where()`` where TZ-aware data was converted to float representation (:issue:`15701`)
-- Bug in ``Index`` construction with ``NaN`` elements and integer dtype specified (:issue:`15187`)
-- Bug in ``Series`` construction with a datetimetz (:issue:`14928`)
+- Bug in ``.loc`` that would not return the correct dtype for scalar access for a DataFrame (:issue:`11617`)
- Bug in output formatting of a ``MultiIndex`` when names are integers (:issue:`12223`, :issue:`15262`)
+- Bug in ``Categorical.searchsorted()`` where alphabetical instead of the provided categorical order was used (:issue:`14522`)
+- Bug in ``Series.iloc`` where a ``Categorical`` object for list-like indexes input was returned, where a ``Series`` was expected. (:issue:`14580`)
+- Bug in ``DataFrame.isin`` comparing datetimelike to empty frame (:issue:`15473`)
+- Bug in ``.reset_index()`` when an all ``NaN`` level of a ``MultiIndex`` would fail (:issue:`6322`)
+- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
+- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
+- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
-- Bug in compat for passing long integers to ``Timestamp.replace`` (:issue:`15030`)
-- Bug in ``.loc`` that would not return the correct dtype for scalar access for a DataFrame (:issue:`11617`)
-- Bug in ``GroupBy.get_group()`` failing with a categorical grouper (:issue:`15155`)
-- Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`)
+I/O
+^^^
+- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`)
+- Bug in ``pd.read_fwf()`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
+- Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`)
+- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`)
+- Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`)
+- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`)
+- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
+- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
+- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
+- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
+- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
+- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
+- Bug in ``.read_json()`` for Python 2 where ``lines=True`` and contents contain non-ascii unicode characters (:issue:`15132`)
+- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`)
+- Bug in ``pd.read_msgpack()`` which did not allow loading of a dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+- Bug in ``pd.read_msgpack()`` when deserializing a ``CategoricalIndex`` (:issue:`15487`)
- Bug in ``DataFrame.to_records()`` with converting a ``DatetimeIndex`` with a timezone (:issue:`13937`)
+- Bug in ``DataFrame.to_records()`` which failed with unicode characters in column names (:issue:`11879`)
+- Bug in ``.to_sql()`` when writing a DataFrame with numeric index names (:issue:`15404`).
+- Bug in ``DataFrame.to_html()`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`)
+- Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`)
+- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
+- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
+Plotting
+^^^^^^^^
-- Bug in ``.groupby(...).rolling(...)`` when ``on`` is specified and using a ``DatetimeIndex`` (:issue:`15130`)
-
-
-- Bug in ``to_sql`` when writing a DataFrame with numeric index names (:issue:`15404`).
-- Bug in ``Series.iloc`` where a ``Categorical`` object for list-like indexes input was returned, where a ``Series`` was expected. (:issue:`14580`)
-- Bug in repr-formatting a ``SparseDataFrame`` after a value was set on (a copy of) one of its series (:issue:`15488`)
-- Bug in ``SparseSeries.reindex`` on single level with list of length 1 (:issue:`15447`)
+- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 2.0.1``) (:issue:`9351`)
+- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
+Groupby/Resample/Rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
+- Properly set ``__name__`` and ``__qualname__`` for ``Groupby.*`` functions (:issue:`14620`)
+- Bug in ``GroupBy.get_group()`` failing with a categorical grouper (:issue:`15155`)
+- Bug in ``.groupby(...).rolling(...)`` when ``on`` is specified and using a ``DatetimeIndex`` (:issue:`15130`)
- Bug in groupby operations with timedelta64 when passing ``numeric_only=False`` (:issue:`5724`)
- Bug in ``groupby.apply()`` coercing ``object`` dtypes to numeric types, when not all values were numeric (:issue:`14423`, :issue:`15421`, :issue:`15670`)
-
-
-- Bug in ``DataFrame.to_html`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`)
-
-- Bug in ``Categorical.searchsorted()`` where alphabetical instead of the provided categorical order was used (:issue:`14522`)
-
-
-
-- Bug in ``resample``, where a non-string ```loffset`` argument would not be applied when resampling a timeseries (:issue:`13218`)
-
-
-
-- Bug in ``.rank()`` which incorrectly ranks ordered categories (:issue:`15420`)
-- Bug in ``.corr()`` and ``.cov()`` where the column and index were the same object (:issue:`14617`)
-
-
-- Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`)
-- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`)
-
-- Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`)
-- Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`)
-- Bug in ``.read_json()`` for Python 2 where ``lines=True`` and contents contain non-ascii unicode characters (:issue:`15132`)
+- Bug in ``resample``, where a non-string ``loffset`` argument would not be applied when resampling a timeseries (:issue:`13218`)
+- Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`)
+- Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`)
+- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
+- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
- Bug in ``.rolling/expanding()`` functions where ``count()`` was not counting ``np.Inf``, nor handling ``object`` dtypes (:issue:`12541`)
- Bug in ``.rolling()`` where ``pd.Timedelta`` or ``datetime.timedelta`` was not accepted as a ``window`` argument (:issue:`15440`)
+- Bug in ``Rolling.quantile`` function that caused a segmentation fault when called with a quantile value outside of the range [0, 1] (:issue:`15463`)
- Bug in ``DataFrame.resample().median()`` if duplicate column names are present (:issue:`14233`)
-- Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`)
-- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
-- Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`)
+Sparse
+^^^^^^
-- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
-- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
+- Bug in ``SparseSeries.reindex`` on single level with list of length 1 (:issue:`15447`)
+- Bug in repr-formatting a ``SparseDataFrame`` after a value was set on (a copy of) one of its series (:issue:`15488`)
+- Bug in ``SparseDataFrame`` construction with lists not coercing to dtype (:issue:`15682`)
+Reshaping
+^^^^^^^^^
- Bug in ``pd.merge_asof()`` where ``left_index`` or ``right_index`` caused a failure when multiple ``by`` was specified (:issue:`15676`)
- Bug in ``pd.merge_asof()`` where ``left_index``/``right_index`` together caused a failure when ``tolerance`` was specified (:issue:`15135`)
- Bug in ``DataFrame.pivot_table()`` where ``dropna=True`` would not drop all-NaN columns when the columns was a ``category`` dtype (:issue:`15193`)
-
-
-- Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`)
-
-
-- Bug in ``Series`` constructor when both ``copy=True`` and ``dtype`` arguments are provided (:issue:`15125`)
-- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`)
-- Incorrect dtyped ``Series`` was returned by comparison methods (e.g., ``lt``, ``gt``, ...) against a constant for an empty ``DataFrame`` (:issue:`15077`)
-- Bug in ``Series.dt.round`` inconsistent behaviour on NAT's with different arguments (:issue:`14940`)
-- Bug in ``DataFrame.fillna()`` where the argument ``downcast`` was ignored when fillna value was of type ``dict`` (:issue:`15277`)
-- Bug in ``.reset_index()`` when an all ``NaN`` level of a ``MultiIndex`` would fail (:issue:`6322`)
-
-- Bug in ``pd.read_msgpack()`` when deserializing a ``CategoricalIndex`` (:issue:`15487`)
-- Bug in ``pd.DataFrame.to_records()`` which failed with unicode characters in column names (:issue:`11879`)
-
-
-- Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`)
-- Avoid use of ``np.finfo()`` during ``import pandas`` removed to mitigate deadlock on Python GIL misuse (:issue:`14641`)
-
-- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
-- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
-- Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`)
-
+- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
+- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`)
- Bug in ``pd.concat()`` in which concatting with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
-- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
+- Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`)
-- Bug in ``SparseDataFrame`` construction with lists not coercing to dtype (:issue:`15682`)
+Numeric
+^^^^^^^
-- Bug in ``.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
-- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
+- Bug in ``.rank()`` which incorrectly ranks ordered categories (:issue:`15420`)
+- Bug in ``.corr()`` and ``.cov()`` where the column and index were the same object (:issue:`14617`)
- Bug in ``.mode()`` where ``mode`` was not returned if was only a single value (:issue:`15714`)
-
-- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 2.0.1``) (:issue:`9351`)
-- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
-- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
+- Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`)
+- Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`)
+- Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
-- Bug in ``pd.read_msgpack()`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+
+Other
+^^^^^
+
+- Compat with SciPy 0.19.0 for testing on ``.interpolate()`` (:issue:`15662`)
+- Compat for 32-bit platforms for ``.qcut/cut``; bins will now be ``int64`` dtype (:issue:`14866`)
+- Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`)
+- Bug in interactions with ``Qt`` when a ``QtApplication`` already exists (:issue:`14372`)
+- Avoid use of ``np.finfo()`` during ``import pandas`` removed to mitigate deadlock on Python GIL misuse (:issue:`14641`)
| makes bug fixes *much* easier as its not organized. | https://api.github.com/repos/pandas-dev/pandas/pulls/15846 | 2017-03-30T12:46:21Z | 2017-03-30T12:46:30Z | 2017-03-30T12:46:30Z | 2017-03-30T12:46:30Z |
na_position doesn't work for sort_index() with MultiIndex | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 6b6f532ed2323..63693b4583ff4 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1564,6 +1564,7 @@ Indexing
- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``pd.concat()`` where the names of ``MultiIndex`` of resulting ``DataFrame`` are not handled correctly when ``None`` is presented in the names of ``MultiIndex`` of input ``DataFrame`` (:issue:`15787`)
+- Bug in ``DataFrame.sort_index()`` and ``Series.sort_index()`` ``na_position`` doesn't work with ``MultiIndex``
I/O
^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 153042d4a09c9..7fbfa7962c2c6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3352,7 +3352,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
- indexer = lexsort_indexer(labels.labels, orders=ascending,
+ indexer = lexsort_indexer(labels._get_labels_for_sorting(),
+ orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 6d9a9aa691f66..92baf9d289cd2 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1635,6 +1635,22 @@ def reorder_levels(self, order):
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
+ def _get_labels_for_sorting(self):
+ """
+ we categorizing our labels by using the
+ available catgories (all, not just observed)
+ excluding any missing ones (-1); this is in preparation
+ for sorting, where we need to disambiguate that -1 is not
+ a valid valid
+ """
+ from pandas.core.categorical import Categorical
+
+ return [Categorical.from_codes(label,
+ np.arange(np.array(label).max() + 1,
+ dtype=label.dtype),
+ ordered=True)
+ for label in self.labels]
+
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8a2351527856d..e0364ad629c5d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1753,7 +1753,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
- indexer = lexsort_indexer(labels.labels, orders=ascending)
+ indexer = lexsort_indexer(labels._get_labels_for_sorting(),
+ orders=ascending,
+ na_position=na_position)
else:
from pandas.core.sorting import nargsort
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 24bbf895508d7..f7e7ab6b190b5 100755
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2634,3 +2634,60 @@ def test_sort_non_lexsorted(self):
with pytest.raises(UnsortedIndexError):
result.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
+
+ def test_sort_index_nan(self):
+ tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
+ mi = MultiIndex.from_tuples(tuples)
+
+ df = DataFrame(np.arange(16).reshape(4, 4),
+ index=mi, columns=list('ABCD'))
+ s = Series(np.arange(4), index=mi)
+
+ df2 = DataFrame({
+ 'date': pd.to_datetime([
+ '20121002', '20121007', '20130130', '20130202', '20130305',
+ '20121002', '20121207', '20130130', '20130202', '20130305',
+ '20130202', '20130305'
+ ]),
+ 'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
+ 'whole_cost': [1790, np.nan, 280, 259, np.nan, 623, 90, 312,
+ np.nan, 301, 359, 801],
+ 'cost': [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12]
+ }).set_index(['date', 'user_id'])
+
+ # sorting frame, default nan position is last
+ result = df.sort_index()
+ expected = df.iloc[[3, 0, 2, 1], :]
+ tm.assert_frame_equal(result, expected)
+
+ # sorting frame, nan position last
+ result = df.sort_index(na_position='last')
+ expected = df.iloc[[3, 0, 2, 1], :]
+ tm.assert_frame_equal(result, expected)
+
+ # sorting frame, nan position first
+ result = df.sort_index(na_position='first')
+ expected = df.iloc[[1, 2, 3, 0], :]
+ tm.assert_frame_equal(result, expected)
+
+ # sorting frame with removed rows
+ result = df2.dropna().sort_index()
+ expected = df2.sort_index().dropna()
+ tm.assert_frame_equal(result, expected)
+
+ # sorting series, default nan position is last
+ result = s.sort_index()
+ expected = s.iloc[[3, 0, 2, 1]]
+ tm.assert_series_equal(result, expected)
+
+ # sorting series, nan position last
+ result = s.sort_index(na_position='last')
+ expected = s.iloc[[3, 0, 2, 1]]
+ tm.assert_series_equal(result, expected)
+
+ # sorting series, nan position first
+ result = s.sort_index(na_position='first')
+ expected = s.iloc[[1, 2, 3, 0]]
+ tm.assert_series_equal(result, expected)
+
+
| - [X] closes #14784
- [X] tests added / passed
- [X] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15845 | 2017-03-30T06:57:03Z | 2017-04-19T22:29:29Z | null | 2017-04-20T08:41:36Z |
Fix a typo | diff --git a/pandas/sparse/sparse.pyx b/pandas/sparse/sparse.pyx
index 7ab29414499fc..00d317c42b18d 100644
--- a/pandas/sparse/sparse.pyx
+++ b/pandas/sparse/sparse.pyx
@@ -71,7 +71,7 @@ cdef class IntIndex(SparseIndex):
def check_integrity(self):
"""
Only need be strictly ascending and nothing less than 0 or greater than
- totall ength
+ total length
"""
pass
| Fixes a small typo. | https://api.github.com/repos/pandas-dev/pandas/pulls/15844 | 2017-03-30T04:33:14Z | 2017-03-30T11:57:01Z | 2017-03-30T11:57:01Z | 2017-03-30T11:57:57Z |
DOC: Fix up _DeprecatedModule parameters doc | diff --git a/pandas/util/depr_module.py b/pandas/util/depr_module.py
index b181c4627b1e1..af7faf9dd96c8 100644
--- a/pandas/util/depr_module.py
+++ b/pandas/util/depr_module.py
@@ -13,8 +13,9 @@ class _DeprecatedModule(object):
Parameters
----------
deprmod : name of module to be deprecated.
- deprmodto : name of module as a replacement, optional
- if not givent will __module__
+ deprmodto : name of module as a replacement, optional.
+ If not given, the __module__ attribute will
+ be used when needed.
removals : objects or methods in module that will no longer be
accessible once module is removed.
"""
| Patches a minor doc error from #15537.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15843 | 2017-03-30T00:29:22Z | 2017-03-30T11:55:17Z | 2017-03-30T11:55:17Z | 2017-03-31T08:32:10Z |
CLN: Remove "flake8: noqa" from files | diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index 59bdde83aedd8..56a7cab730f1f 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -1,8 +1,4 @@
-
-# flake8: noqa
-
import pytest
-from itertools import product
from distutils.version import LooseVersion
import pandas as pd
@@ -32,7 +28,7 @@ def test_compat():
@pytest.mark.parametrize('parser', expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
- a, b = 1, 2
+ a, b = 1, 2 # noqa
res = pd.eval('a + b', engine=engine, parser=parser)
tm.assert_equal(res, 3)
diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py
index 44a7f2b45e759..83458c82a3d7c 100644
--- a/pandas/tests/formats/test_format.py
+++ b/pandas/tests/formats/test_format.py
@@ -1,13 +1,9 @@
# -*- coding: utf-8 -*-
"""
-test output formatting for Series/DataFrame
-including to_string & reprs
+Test output formatting for Series/DataFrame, including to_string & reprs
"""
-# TODO(wesm): lots of issues making flake8 hard
-# flake8: noqa
-
from __future__ import print_function
import re
@@ -57,8 +53,14 @@ def has_info_repr(df):
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
- nv = len(r.split(
- '\n')) == 6 # 1. <class>, 2. Index, 3. Columns, 4. dtype, 5. memory usage, 6. trailing newline
+
+ # 1. <class>
+ # 2. Index
+ # 3. Columns
+ # 4. dtype
+ # 5. memory usage
+ # 6. trailing newline
+ nv = len(r.split('\n')) == 6
return has_info and nv
@@ -477,7 +479,7 @@ def test_east_asian_unicode_frame(self):
if PY3:
_rep = repr
else:
- _rep = unicode
+ _rep = unicode # noqa
# not alighned properly because of east asian width
@@ -529,27 +531,39 @@ def test_east_asian_unicode_frame(self):
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
- index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
- expected = (u" a b\nおおおお \nあ あああああ あ\n"
- u"い い いいい\nうう う う\nえ えええ ええええええ"
- )
+ index=pd.Index([u'あ', u'い', u'うう', u'え'],
+ name=u'おおおお'))
+ expected = (u" a b\n"
+ u"おおおお \n"
+ u"あ あああああ あ\n"
+ u"い い いいい\n"
+ u"うう う う\n"
+ u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
- index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
- expected = (u" あああ いいいいい\nお \nあ あああ あ\n"
- u"いいい い いいい\nうう う う\nえ えええええ ええ")
+ index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
+ name=u'お'))
+ expected = (u" あああ いいいいい\n"
+ u"お \n"
+ u"あ あああ あ\n"
+ u"いいい い いいい\n"
+ u"うう う う\n"
+ u"え えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
- 'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
- expected = (u" a b\nあ いい あああああ あ\n"
- u"う え い いいい\nおおお かかかか う う\n"
+ 'b': [u'あ', u'いいい', u'う', u'ええええええ']},
+ index=idx)
+ expected = (u" a b\n"
+ u"あ いい あああああ あ\n"
+ u"う え い いいい\n"
+ u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
@@ -597,18 +611,21 @@ def test_east_asian_unicode_frame(self):
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
- expected = (u" a b\na あああああ あ\n"
- u"bb い いいい\nc う う\n"
- u"ddd えええ ええええええ"
- "")
+ expected = (u" a b\n"
+ u"a あああああ あ\n"
+ u"bb い いいい\n"
+ u"c う う\n"
+ u"ddd えええ ええええええ")
self.assertEqual(_rep(df), expected)
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
- expected = (u" b あああああ\na あ 1\n"
- u"bb いいい 222\nc う 33333\n"
+ expected = (u" b あああああ\n"
+ u"a あ 1\n"
+ u"bb いいい 222\n"
+ u"c う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
@@ -616,37 +633,49 @@ def test_east_asian_unicode_frame(self):
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
- expected = (u" a b\nあああ あああああ あ\n"
- u"いいいいいい い いいい\nうう う う\n"
+ expected = (u" a b\n"
+ u"あああ あああああ あ\n"
+ u"いいいいいい い いいい\n"
+ u"うう う う\n"
u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
- index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
- expected = (u" a b\nおおおお \n"
- u"あ あああああ あ\nい い いいい\n"
- u"うう う う\nえ えええ ええええええ"
- )
+ index=pd.Index([u'あ', u'い', u'うう', u'え'],
+ name=u'おおおお'))
+ expected = (u" a b\n"
+ u"おおおお \n"
+ u"あ あああああ あ\n"
+ u"い い いいい\n"
+ u"うう う う\n"
+ u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
- index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
- expected = (u" あああ いいいいい\nお \n"
- u"あ あああ あ\nいいい い いいい\n"
- u"うう う う\nえ えええええ ええ")
+ index=pd.Index([u'あ', u'いいい', u'うう', u'え'],
+ name=u'お'))
+ expected = (u" あああ いいいいい\n"
+ u"お \n"
+ u"あ あああ あ\n"
+ u"いいい い いいい\n"
+ u"うう う う\n"
+ u"え えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
- 'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
- expected = (u" a b\nあ いい あああああ あ\n"
- u"う え い いいい\nおおお かかかか う う\n"
+ 'b': [u'あ', u'いいい', u'う', u'ええええええ']},
+ index=idx)
+ expected = (u" a b\n"
+ u"あ いい あああああ あ\n"
+ u"う え い いいい\n"
+ u"おおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
@@ -660,14 +689,18 @@ def test_east_asian_unicode_frame(self):
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
- expected = (u" a ... ああああ\n0 あああああ ... さ\n"
- u".. ... ... ...\n3 えええ ... せ\n"
+ expected = (u" a ... ああああ\n"
+ u"0 あああああ ... さ\n"
+ u".. ... ... ...\n"
+ u"3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
- expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
- u"... ... ... ...\naaa えええ ... せ\n"
+ expected = (u" a ... ああああ\n"
+ u"あああ あああああ ... さ\n"
+ u"... ... ... ...\n"
+ u"aaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
@@ -675,8 +708,10 @@ def test_east_asian_unicode_frame(self):
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'¡¡', u'ええええええ']},
index=['a', 'bb', 'c', '¡¡¡'])
- expected = (u" b あああああ\na あ 1\n"
- u"bb いいい 222\nc ¡¡ 33333\n"
+ expected = (u" b あああああ\n"
+ u"a あ 1\n"
+ u"bb いいい 222\n"
+ u"c ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
self.assertEqual(_rep(df), expected)
@@ -753,7 +788,8 @@ def test_truncate_with_different_dtypes(self):
# 11594
import datetime
s = Series([datetime.datetime(2012, 1, 1)] * 10 +
- [datetime.datetime(1012, 1, 2)] + [datetime.datetime(2012, 1, 3)] * 10)
+ [datetime.datetime(1012, 1, 2)] + [
+ datetime.datetime(2012, 1, 3)] * 10)
with pd.option_context('display.max_rows', 8):
result = str(s)
@@ -762,7 +798,8 @@ def test_truncate_with_different_dtypes(self):
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
- with pd.option_context('display.max_rows', 8, 'display.max_columns', 3):
+ with pd.option_context('display.max_rows', 8,
+ 'display.max_columns', 3):
result = str(df)
self.assertTrue('None' in result)
self.assertFalse('NaN' in result)
@@ -771,7 +808,8 @@ def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
- {'date': [pd.Timestamp('20130101').tz_localize('UTC')] + [pd.NaT] * 5})
+ {'date': [pd.Timestamp('20130101').tz_localize('UTC')] +
+ [pd.NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
@@ -1019,21 +1057,24 @@ def test_index_with_nan(self):
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
- ' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
+ ' value\nid1 id2 id3 \n'
+ '1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(
- ' id1 id3 value\nid2 \nNaN 1a3 78d 123\nd67 9h4 79d 64')
+ ' id1 id3 value\nid2 \n'
+ 'NaN 1a3 78d 123\nd67 9h4 79d 64')
self.assertEqual(result, expected)
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(
- ' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
+ ' value\nid1 id2 id3 \n'
+ '1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# all-nan in mi
@@ -1042,7 +1083,8 @@ def test_index_with_nan(self):
y = df2.set_index('id2')
result = y.to_string()
expected = u(
- ' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64')
+ ' id1 id3 value\nid2 \n'
+ 'NaN 1a3 78d 123\nNaN 9h4 79d 64')
self.assertEqual(result, expected)
# partial nan in mi
@@ -1051,7 +1093,8 @@ def test_index_with_nan(self):
y = df2.set_index(['id2', 'id3'])
result = y.to_string()
expected = u(
- ' id1 value\nid2 id3 \nNaN 78d 1a3 123\n 79d 9h4 64')
+ ' id1 value\nid2 id3 \n'
+ 'NaN 78d 1a3 123\n 79d 9h4 64')
self.assertEqual(result, expected)
df = DataFrame({'id1': {0: np.nan,
@@ -1066,7 +1109,8 @@ def test_index_with_nan(self):
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(
- ' value\nid1 id2 id3 \nNaN NaN NaN 123\n9h4 d67 79d 64')
+ ' value\nid1 id2 id3 \n'
+ 'NaN NaN NaN 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
def test_to_string(self):
@@ -1660,8 +1704,8 @@ def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
- _rep = unicode
- # not alighned properly because of east asian width
+ _rep = unicode # noqa
+ # not aligned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
@@ -1686,7 +1730,8 @@ def test_east_asian_unicode_series(self):
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
- index=[u'ああ', u'いいいい', u'う', u'えええ'], name=u'おおおおおおお')
+ index=[u'ああ', u'いいいい', u'う', u'えええ'],
+ name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
@@ -1695,7 +1740,9 @@ def test_east_asian_unicode_series(self):
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
- expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
+ expected = (u"あ いい 1\n"
+ u"う え 22\n"
+ u"おおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
@@ -1708,14 +1755,16 @@ def test_east_asian_unicode_series(self):
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
- expected = (u"1 1\nAB 22\n"
- u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64"
- )
+ expected = (u"1 1\n"
+ u"AB 22\n"
+ u"2011-01-01 00:00:00 3333\n"
+ u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
- s = Series([u'あ', u'いい', u'ううう', u'ええええ'], name=u'おおおおおおお')
+ s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
+ name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
@@ -1746,23 +1795,32 @@ def test_east_asian_unicode_series(self):
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
- expected = (u"ああ あ\nいいいい いい\nう ううう\n"
+ expected = (u"ああ あ\n"
+ u"いいいい いい\n"
+ u"う ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
- index=[u'ああ', u'いいいい', u'う', u'えええ'], name=u'おおおおおおお')
- expected = (u"ああ あ\nいいいい いい\nう ううう\n"
- u"えええ ええええ\nName: おおおおおおお, dtype: object")
+ index=[u'ああ', u'いいいい', u'う', u'えええ'],
+ name=u'おおおおおおお')
+ expected = (u"ああ あ\n"
+ u"いいいい いい\n"
+ u"う ううう\n"
+ u"えええ ええええ\n"
+ u"Name: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), (
u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
- expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
- u"き くく 44444\ndtype: int64")
+ expected = (u"あ いい 1\n"
+ u"う え 22\n"
+ u"おおお かかかか 3333\n"
+ u"き くく 44444\n"
+ u"dtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, shorter than unicode repr
@@ -1774,27 +1832,33 @@ def test_east_asian_unicode_series(self):
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
- expected = (u"1 1\nAB 22\n"
- u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64"
- )
+ expected = (u"1 1\n"
+ u"AB 22\n"
+ u"2011-01-01 00:00:00 3333\n"
+ u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
- s = Series([u'あ', u'いい', u'ううう', u'ええええ'], name=u'おおおおおおお')
+ s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
+ name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
- expected = (u"ああ あ\n ... \n"
- u"えええ ええええ\nName: おおおおおおお, dtype: object")
+ expected = (u"ああ あ\n"
+ u" ... \n"
+ u"えええ ええええ\n"
+ u"Name: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
- expected = (u"ああ ¡¡\n¡¡¡¡いい い¡¡\n¡¡ ううう\n"
+ expected = (u"ああ ¡¡\n"
+ u"¡¡¡¡いい い¡¡\n"
+ u"¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
@@ -2099,15 +2163,48 @@ def test_output_significant_digits(self):
expected_output = {
(0, 6):
- ' col1\n0 9.999000e-08\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
+ ' col1\n'
+ '0 9.999000e-08\n'
+ '1 1.000000e-07\n'
+ '2 1.000100e-07\n'
+ '3 2.000000e-07\n'
+ '4 4.999000e-07\n'
+ '5 5.000000e-07',
(1, 6):
- ' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
+ ' col1\n'
+ '1 1.000000e-07\n'
+ '2 1.000100e-07\n'
+ '3 2.000000e-07\n'
+ '4 4.999000e-07\n'
+ '5 5.000000e-07',
(1, 8):
- ' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07\n6 5.000100e-07\n7 6.000000e-07',
+ ' col1\n'
+ '1 1.000000e-07\n'
+ '2 1.000100e-07\n'
+ '3 2.000000e-07\n'
+ '4 4.999000e-07\n'
+ '5 5.000000e-07\n'
+ '6 5.000100e-07\n'
+ '7 6.000000e-07',
(8, 16):
- ' col1\n8 9.999000e-07\n9 1.000000e-06\n10 1.000100e-06\n11 2.000000e-06\n12 4.999000e-06\n13 5.000000e-06\n14 5.000100e-06\n15 6.000000e-06',
+ ' col1\n'
+ '8 9.999000e-07\n'
+ '9 1.000000e-06\n'
+ '10 1.000100e-06\n'
+ '11 2.000000e-06\n'
+ '12 4.999000e-06\n'
+ '13 5.000000e-06\n'
+ '14 5.000100e-06\n'
+ '15 6.000000e-06',
(9, 16):
- ' col1\n9 0.000001\n10 0.000001\n11 0.000002\n12 0.000005\n13 0.000005\n14 0.000005\n15 0.000006'
+ ' col1\n'
+ '9 0.000001\n'
+ '10 0.000001\n'
+ '11 0.000002\n'
+ '12 0.000005\n'
+ '13 0.000005\n'
+ '14 0.000005\n'
+ '15 0.000006'
}
for (start, stop), v in expected_output.items():
diff --git a/pandas/util/clipboard/exceptions.py b/pandas/util/clipboard/exceptions.py
index f42d263a02993..413518e53660a 100644
--- a/pandas/util/clipboard/exceptions.py
+++ b/pandas/util/clipboard/exceptions.py
@@ -1,4 +1,3 @@
-# flake8: noqa
import ctypes
| Just some minor house-cleaning to cut down on the number of search results found <a href="https://github.com/pandas-dev/pandas/search?utf8=%E2%9C%93&q=flake8%3A+noqa&type=">here</a>. | https://api.github.com/repos/pandas-dev/pandas/pulls/15842 | 2017-03-30T00:26:02Z | 2017-03-30T11:53:47Z | null | 2017-03-31T08:31:58Z |
DOC: update io.rst | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e72224c6fa1fe..90167e7c6183f 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -29,36 +29,26 @@ IO Tools (Text, CSV, HDF5, ...)
===============================
The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas``
-object.
-
- * :ref:`read_csv<io.read_csv_table>`
- * :ref:`read_excel<io.excel_reader>`
- * :ref:`read_hdf<io.hdf5>`
- * :ref:`read_feather<io.feather>`
- * :ref:`read_sql<io.sql>`
- * :ref:`read_json<io.json_reader>`
- * :ref:`read_msgpack<io.msgpack>`
- * :ref:`read_html<io.read_html>`
- * :ref:`read_gbq<io.bigquery>`
- * :ref:`read_stata<io.stata_reader>`
- * :ref:`read_sas<io.sas_reader>`
- * :ref:`read_clipboard<io.clipboard>`
- * :ref:`read_pickle<io.pickle>`
-
-The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
-
- * :ref:`to_csv<io.store_in_csv>`
- * :ref:`to_excel<io.excel_writer>`
- * :ref:`to_hdf<io.hdf5>`
- * :ref:`to_feather<io.feather>`
- * :ref:`to_sql<io.sql>`
- * :ref:`to_json<io.json_writer>`
- * :ref:`to_msgpack<io.msgpack>`
- * :ref:`to_html<io.html>`
- * :ref:`to_gbq<io.bigquery>`
- * :ref:`to_stata<io.stata_writer>`
- * :ref:`to_clipboard<io.clipboard>`
- * :ref:`to_pickle<io.pickle>`
+object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
+
+.. csv-table::
+ :header: "Format Type", "Data Description", "Reader", "Writer"
+ :widths: 30, 100, 60, 60
+ :delim: ;
+
+ text;`CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`__;:ref:`read_csv<io.read_csv_table>`;:ref:`to_csv<io.store_in_csv>`
+ text;`JSON <http://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>`
+ text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>`
+ text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
+ binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
+ binary;`HDF5 Format <https://support.hdfgroup.org/HDF5/whatishdf5.html>`__;:ref:`read_hdf<io.hdf5>`;:ref:`to_hdf<io.hdf5>`
+ binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>`
+ binary;`Msgpack <http://msgpack.org/index.html>`__;:ref:`read_msgpack<io.msgpack>`;:ref:`to_msgpack<io.msgpack>`
+ binary;`Stata <https://en.wikipedia.org/wiki/Stata>`__;:ref:`read_stata<io.stata_reader>`;:ref:`to_stata<io.stata_writer>`
+ binary;`SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__;:ref:`read_sas<io.sas_reader>`;
+ binary;`Python Pickle Format <https://docs.python.org/3/library/pickle.html>`__;:ref:`read_pickle<io.pickle>`;:ref:`to_pickle<io.pickle>`
+ SQL;`SQL <https://en.wikipedia.org/wiki/SQL>`__;:ref:`read_sql<io.sql>`;:ref:`to_sql<io.sql>`
+ SQL;`Google Big Query <https://en.wikipedia.org/wiki/BigQuery>`__;:ref:`read_gbq<io.bigquery>`;:ref:`to_gbq<io.bigquery>`
:ref:`Here <io.perf>` is an informal performance comparison for some of these IO methods.
| 
redo of the top of the io.rst page. was getting a bit long / disorganized. I added links for the various formats as well. *could* add another column about use (e.g. general, portable, flexible, etc), but maybe getting much
| https://api.github.com/repos/pandas-dev/pandas/pulls/15840 | 2017-03-29T19:11:02Z | 2017-03-29T22:15:56Z | null | 2017-03-29T22:44:48Z |
DOC: expanded signatures for reindex, rename and reindex_axis | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b5e8e0799421..508893bdb70ab 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2819,9 +2819,11 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
- def reindex(self, index=None, columns=None, **kwargs):
+ def reindex(self, index=None, columns=None, method=None, level=None, copy=True,
+ limit=None, tolerance=None, fill_value=np.nan):
return super(DataFrame, self).reindex(index=index, columns=columns,
- **kwargs)
+ method=method, level=level, copy=copy,
+ limit=limit, tolerance=tolerance, fill_value=fill_value)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
@@ -2832,9 +2834,9 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
- def rename(self, index=None, columns=None, **kwargs):
+ def rename(self, index=None, columns=None, copy=True, inplace=False):
return super(DataFrame, self).rename(index=index, columns=columns,
- **kwargs)
+ copy=copy, inplace=inplace)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 50ddc24ac9656..31158685671f6 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1160,22 +1160,25 @@ def _wrap_result(self, result, axis):
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
- def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
+ def reindex(self, items=None, major_axis=None, minor_axis=None, method=None,
+ level=None, copy=True, limit=None, tolerance=None, fill_value=np.nan):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
- minor_axis=minor_axis, **kwargs)
+ minor_axis=minor_axis, method=None,
+ level=level, copy=copy, limit=limit,
+ tolerance=tolerance, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
- def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
+ def rename(self, items=None, major_axis=None, minor_axis=None, copy=True, inplace=False):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
- minor_axis=minor_axis, **kwargs)
+ minor_axis=minor_axis, copy=copy, inplace=inplace)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0913592e055cd..042ae5bb12b30 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2301,7 +2301,7 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
broadcast_axis=broadcast_axis)
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
- def rename(self, index=None, **kwargs):
+ def rename(self, index=None, copy=True, inplace=False):
kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
'inplace')
@@ -2309,11 +2309,14 @@ def rename(self, index=None, **kwargs):
not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get('inplace'))
- return super(Series, self).rename(index=index, **kwargs)
+ return super(Series, self).rename(index=index, copy=copy, inplace=inplace)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
- def reindex(self, index=None, **kwargs):
- return super(Series, self).reindex(index=index, **kwargs)
+ def reindex(self, index=None, method=None, level=None, copy=True,
+ limit=None, tolerance=None, fill_value=np.nan):
+ return super(Series, self).reindex(index=index, method=method, level=level,
+ copy=copy, limit=limit,
+ tolerance=tolerance, fill_value=fill_value)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
@@ -2327,11 +2330,13 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
- def reindex_axis(self, labels, axis=0, **kwargs):
+ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
- return self.reindex(index=labels, **kwargs)
+ return self.reindex(index=labels, axis=axis,
+ method=method, level=level, copy=copy,
+ limit=limit, fill_value=fill_value)
def memory_usage(self, index=True, deep=False):
"""Memory usage of the Series
| xref #15609
I have expanded the signatures for _reindex_, _rename_, _reindex_axis_ in `series.py`, `frame.py` and `panel.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/15839 | 2017-03-29T17:47:14Z | 2017-05-13T21:37:55Z | null | 2017-05-13T21:37:55Z |
ENH: add to/from_parquet with pyarrow & fastparquet | diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index dcc1656ce3dd7..df6969c7cc659 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -153,6 +153,7 @@ fi
echo
echo "[removing installed pandas]"
conda remove pandas -y --force
+pip uninstall -y pandas
if [ "$BUILD_TEST" ]; then
diff --git a/ci/requirements-2.7.sh b/ci/requirements-2.7.sh
index 5b20617f55759..e3bd5e46026c5 100644
--- a/ci/requirements-2.7.sh
+++ b/ci/requirements-2.7.sh
@@ -4,4 +4,4 @@ source activate pandas
echo "install 27"
-conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1
+conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 fastparquet
diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh
index 3b8fe793a413d..33db9c28c78a9 100644
--- a/ci/requirements-3.5.sh
+++ b/ci/requirements-3.5.sh
@@ -4,8 +4,8 @@ source activate pandas
echo "install 35"
-conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1
-
# pip install python-dateutil to get latest
conda remove -n pandas python-dateutil --force
pip install python-dateutil
+
+conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1
diff --git a/ci/requirements-3.5_OSX.sh b/ci/requirements-3.5_OSX.sh
index 39ea1a0cf67bf..c2978b175968c 100644
--- a/ci/requirements-3.5_OSX.sh
+++ b/ci/requirements-3.5_OSX.sh
@@ -4,4 +4,4 @@ source activate pandas
echo "install 35_OSX"
-conda install -n pandas -c conda-forge feather-format==0.3.1
+conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet
diff --git a/ci/requirements-3.6.pip b/ci/requirements-3.6.pip
index e69de29bb2d1d..753a60d6c119a 100644
--- a/ci/requirements-3.6.pip
+++ b/ci/requirements-3.6.pip
@@ -0,0 +1 @@
+brotlipy
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run
index 00db27d3f2704..822144a80bc9a 100644
--- a/ci/requirements-3.6.run
+++ b/ci/requirements-3.6.run
@@ -17,6 +17,8 @@ pymysql
feather-format
pyarrow
psycopg2
+python-snappy
+fastparquet
beautifulsoup4
s3fs
xarray
diff --git a/ci/requirements-3.6_DOC.sh b/ci/requirements-3.6_DOC.sh
index 8c10a794a13b9..aec0f62148622 100644
--- a/ci/requirements-3.6_DOC.sh
+++ b/ci/requirements-3.6_DOC.sh
@@ -6,6 +6,6 @@ echo "[install DOC_BUILD deps]"
pip install pandas-gbq
-conda install -n pandas -c conda-forge feather-format pyarrow nbsphinx pandoc
+conda install -n pandas -c conda-forge feather-format pyarrow nbsphinx pandoc fastparquet
conda install -n pandas -c r r rpy2 --yes
diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run
index 22aae8944d731..226caa458f6ee 100644
--- a/ci/requirements-3.6_WIN.run
+++ b/ci/requirements-3.6_WIN.run
@@ -13,3 +13,5 @@ numexpr
pytables
matplotlib
blosc
+fastparquet
+pyarrow
diff --git a/doc/source/install.rst b/doc/source/install.rst
index c185a7cf4b875..01a01b1b58b4c 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -237,6 +237,7 @@ Optional Dependencies
* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
* `Feather Format <https://github.com/wesm/feather>`__: necessary for feather-based storage, version 0.3.1 or higher.
+* ``Apache Parquet Format``, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.4.1) or `fastparquet <https://fastparquet.readthedocs.io/en/latest/necessary>`__ (>= 0.0.6) for parquet-based storage. The `snappy <https://pypi.python.org/pypi/python-snappy>`__ and `brotli <https://pypi.python.org/pypi/brotlipy>`__ are available for compression support.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are:
* `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL
diff --git a/doc/source/io.rst b/doc/source/io.rst
index bf68a0cae1d27..0b97264abfcd7 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -43,6 +43,7 @@ object. The corresponding ``writer`` functions are object methods that are acces
binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
binary;`HDF5 Format <https://support.hdfgroup.org/HDF5/whatishdf5.html>`__;:ref:`read_hdf<io.hdf5>`;:ref:`to_hdf<io.hdf5>`
binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>`
+ binary;`Parquet Format <https://parquet.apache.org/>`__;:ref:`read_parquet<io.parquet>`;:ref:`to_parquet<io.parquet>`
binary;`Msgpack <http://msgpack.org/index.html>`__;:ref:`read_msgpack<io.msgpack>`;:ref:`to_msgpack<io.msgpack>`
binary;`Stata <https://en.wikipedia.org/wiki/Stata>`__;:ref:`read_stata<io.stata_reader>`;:ref:`to_stata<io.stata_writer>`
binary;`SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__;:ref:`read_sas<io.sas_reader>`;
@@ -209,7 +210,7 @@ buffer_lines : int, default None
.. deprecated:: 0.19.0
Argument removed because its value is not respected by the parser
-
+
compact_ints : boolean, default False
.. deprecated:: 0.19.0
@@ -4087,7 +4088,7 @@ control compression: ``complevel`` and ``complib``.
``complevel`` specifies if and how hard data is to be compressed.
``complevel=0`` and ``complevel=None`` disables
compression and ``0<complevel<10`` enables compression.
-
+
``complib`` specifies which compression library to use. If nothing is
specified the default library ``zlib`` is used. A
compression library usually optimizes for either good
@@ -4102,9 +4103,9 @@ control compression: ``complevel`` and ``complib``.
- `blosc <http://www.blosc.org/>`_: Fast compression and decompression.
.. versionadded:: 0.20.2
-
+
Support for alternative blosc compressors:
-
+
- `blosc:blosclz <http://www.blosc.org/>`_ This is the
default compressor for ``blosc``
- `blosc:lz4
@@ -4545,6 +4546,79 @@ Read from a feather file.
import os
os.remove('example.feather')
+
+.. _io.parquet:
+
+Parquet
+-------
+
+.. versionadded:: 0.21.0
+
+`Parquet <https://parquet.apache.org/`__ provides a partitioned binary columnar serialization for data frames. It is designed to
+make reading and writing data frames efficient, and to make sharing data across data analysis
+languages easy. Parquet can use a variety of compression techniques to shrink the file size as much as possible
+while still maintaining good read performance.
+
+Parquet is designed to faithfully serialize and de-serialize ``DataFrame`` s, supporting all of the pandas
+dtypes, including extension dtypes such as datetime with tz.
+
+Several caveats.
+
+- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an
+ error if a non-default one is provided. You can simply ``.reset_index(drop=True)`` in order to store the index.
+- Duplicate column names and non-string columns names are not supported
+- Categorical dtypes are currently not-supported (for ``pyarrow``).
+- Non supported types include ``Period`` and actual python object types. These will raise a helpful error message
+ on an attempt at serialization.
+
+You can specifiy an ``engine`` to direct the serialization. This can be one of ``pyarrow``, or ``fastparquet``, or ``auto``.
+If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``, then
+then ``pyarrow`` is tried, and falling back to ``fastparquet``.
+
+See the documentation for `pyarrow <http://arrow.apache.org/docs/python/`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__
+
+.. note::
+
+ These engines are very similar and should read/write nearly identical parquet format files.
+ These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library).
+
+.. ipython:: python
+
+ df = pd.DataFrame({'a': list('abc'),
+ 'b': list(range(1, 4)),
+ 'c': np.arange(3, 6).astype('u1'),
+ 'd': np.arange(4.0, 7.0, dtype='float64'),
+ 'e': [True, False, True],
+ 'f': pd.date_range('20130101', periods=3),
+ 'g': pd.date_range('20130101', periods=3, tz='US/Eastern'),
+ 'h': pd.date_range('20130101', periods=3, freq='ns')})
+
+ df
+ df.dtypes
+
+Write to a parquet file.
+
+.. ipython:: python
+
+ df.to_parquet('example_pa.parquet', engine='pyarrow')
+ df.to_parquet('example_fp.parquet', engine='fastparquet')
+
+Read from a parquet file.
+
+.. ipython:: python
+
+ result = pd.read_parquet('example_pa.parquet', engine='pyarrow')
+ result = pd.read_parquet('example_fp.parquet', engine='fastparquet')
+
+ result.dtypes
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('example_pa.parquet')
+ os.remove('example_fp.parquet')
+
.. _io.sql:
SQL Queries
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 83b08acac5720..51d02bc89692a 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -414,6 +414,9 @@ io.hdf.default_format None default format writing format,
'table'
io.hdf.dropna_table True drop ALL nan rows when appending
to a table
+io.parquet.engine None The engine to use as a default for
+ parquet reading and writing. If None
+ then try 'pyarrow' and 'fastparquet'
mode.chained_assignment warn Raise an exception, warn, or no
action if trying to use chained
assignment, The default is warn
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 589e88dc4aaf6..eb14b09cdb67c 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -78,6 +78,7 @@ Other Enhancements
- :func:`DataFrame.select_dtypes` now accepts scalar values for include/exclude as well as list-like. (:issue:`16855`)
- :func:`date_range` now accepts 'YS' in addition to 'AS' as an alias for start of year (:issue:`9313`)
- :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`)
+- Integration with Apache Parquet, including a new top-level ``pd.read_parquet()`` and ``DataFrame.to_parquet()`` method, see :ref:`here <io.parquet>`.
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 875ab8249f953..ea5c213dbe057 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -465,3 +465,15 @@ def _register_xlsx(engine, other):
except ImportError:
# fallback
_register_xlsx('openpyxl', 'xlsxwriter')
+
+# Set up the io.parquet specific configuration.
+parquet_engine_doc = """
+: string
+ The default parquet reader/writer engine. Available options:
+ 'auto', 'pyarrow', 'fastparquet', the default is 'auto'
+"""
+
+with cf.config_prefix('io.parquet'):
+ cf.register_option(
+ 'engine', 'auto', parquet_engine_doc,
+ validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet']))
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e546e96f253c7..9d63bd2e120aa 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1598,6 +1598,30 @@ def to_feather(self, fname):
from pandas.io.feather_format import to_feather
to_feather(self, fname)
+ def to_parquet(self, fname, engine='auto', compression='snappy',
+ **kwargs):
+ """
+ Write a DataFrame to the binary parquet format.
+
+ .. versionadded:: 0.21.0
+
+ Parameters
+ ----------
+ fname : str
+ string file path
+ engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
+ Parquet reader library to use. If 'auto', then the option
+ 'io.parquet.engine' is used. If 'auto', then the first
+ library to be installed is used.
+ compression : str, optional, default 'snappy'
+ compression method, includes {'gzip', 'snappy', 'brotli'}
+ kwargs
+ Additional keyword arguments passed to the engine
+ """
+ from pandas.io.parquet import to_parquet
+ to_parquet(self, fname, engine,
+ compression=compression, **kwargs)
+
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names')
@Appender(fmt.docstring_to_string, indents=1)
diff --git a/pandas/io/api.py b/pandas/io/api.py
index a4a25b78942db..f542a8176dce7 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -13,6 +13,7 @@
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
+from pandas.io.parquet import read_parquet
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 86d58caa5e816..87a4931421d7d 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -19,7 +19,7 @@ def _try_import():
"you can install via conda\n"
"conda install feather-format -c conda-forge\n"
"or via pip\n"
- "pip install feather-format\n")
+ "pip install -U feather-format\n")
try:
feather.__version__ >= LooseVersion('0.3.1')
@@ -29,7 +29,7 @@ def _try_import():
"you can install via conda\n"
"conda install feather-format -c conda-forge"
"or via pip\n"
- "pip install feather-format\n")
+ "pip install -U feather-format\n")
return feather
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
new file mode 100644
index 0000000000000..0a4426b55b323
--- /dev/null
+++ b/pandas/io/parquet.py
@@ -0,0 +1,194 @@
+""" parquet compat """
+
+from warnings import catch_warnings
+from distutils.version import LooseVersion
+from pandas import DataFrame, RangeIndex, Int64Index, get_option
+from pandas.compat import range
+from pandas.io.common import get_filepath_or_buffer
+
+
+def get_engine(engine):
+ """ return our implementation """
+
+ if engine is 'auto':
+ engine = get_option('io.parquet.engine')
+
+ if engine is 'auto':
+ # try engines in this order
+ try:
+ return PyArrowImpl()
+ except ImportError:
+ pass
+
+ try:
+ return FastParquetImpl()
+ except ImportError:
+ pass
+
+ if engine not in ['pyarrow', 'fastparquet']:
+ raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
+
+ if engine == 'pyarrow':
+ return PyArrowImpl()
+ elif engine == 'fastparquet':
+ return FastParquetImpl()
+
+
+class PyArrowImpl(object):
+
+ def __init__(self):
+ # since pandas is a dependency of pyarrow
+ # we need to import on first use
+
+ try:
+ import pyarrow
+ import pyarrow.parquet
+ except ImportError:
+ raise ImportError("pyarrow is required for parquet support\n\n"
+ "you can install via conda\n"
+ "conda install pyarrow -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U pyarrow\n")
+
+ if LooseVersion(pyarrow.__version__) < '0.4.1':
+ raise ImportError("pyarrow >= 0.4.1 is required for parquet"
+ "support\n\n"
+ "you can install via conda\n"
+ "conda install pyarrow -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U pyarrow\n")
+
+ self.api = pyarrow
+
+ def write(self, df, path, compression='snappy', **kwargs):
+ path, _, _ = get_filepath_or_buffer(path)
+ table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
+ self.api.parquet.write_table(
+ table, path, compression=compression, **kwargs)
+
+ def read(self, path):
+ path, _, _ = get_filepath_or_buffer(path)
+ return self.api.parquet.read_table(path).to_pandas()
+
+
+class FastParquetImpl(object):
+
+ def __init__(self):
+ # since pandas is a dependency of fastparquet
+ # we need to import on first use
+
+ try:
+ import fastparquet
+ except ImportError:
+ raise ImportError("fastparquet is required for parquet support\n\n"
+ "you can install via conda\n"
+ "conda install fastparquet -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U fastparquet")
+
+ if LooseVersion(fastparquet.__version__) < '0.1.0':
+ raise ImportError("fastparquet >= 0.1.0 is required for parquet "
+ "support\n\n"
+ "you can install via conda\n"
+ "conda install fastparquet -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U fastparquet")
+
+ self.api = fastparquet
+
+ def write(self, df, path, compression='snappy', **kwargs):
+ # thriftpy/protocol/compact.py:339:
+ # DeprecationWarning: tostring() is deprecated.
+ # Use tobytes() instead.
+ path, _, _ = get_filepath_or_buffer(path)
+ with catch_warnings(record=True):
+ self.api.write(path, df,
+ compression=compression, **kwargs)
+
+ def read(self, path):
+ path, _, _ = get_filepath_or_buffer(path)
+ return self.api.ParquetFile(path).to_pandas()
+
+
+def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
+ """
+ Write a DataFrame to the parquet format.
+
+ Parameters
+ ----------
+ df : DataFrame
+ path : string
+ File path
+ engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
+ Parquet reader library to use. If 'auto', then the option
+ 'io.parquet.engine' is used. If 'auto', then the first
+ library to be installed is used.
+ compression : str, optional, default 'snappy'
+ compression method, includes {'gzip', 'snappy', 'brotli'}
+ kwargs
+ Additional keyword arguments passed to the engine
+ """
+
+ impl = get_engine(engine)
+
+ if not isinstance(df, DataFrame):
+ raise ValueError("to_parquet only support IO with DataFrames")
+
+ valid_types = {'string', 'unicode'}
+
+ # validate index
+ # --------------
+
+ # validate that we have only a default index
+ # raise on anything else as we don't serialize the index
+
+ if not isinstance(df.index, Int64Index):
+ raise ValueError("parquet does not support serializing {} "
+ "for the index; you can .reset_index()"
+ "to make the index into column(s)".format(
+ type(df.index)))
+
+ if not df.index.equals(RangeIndex.from_range(range(len(df)))):
+ raise ValueError("parquet does not support serializing a "
+ "non-default index for the index; you "
+ "can .reset_index() to make the index "
+ "into column(s)")
+
+ if df.index.name is not None:
+ raise ValueError("parquet does not serialize index meta-data on a "
+ "default index")
+
+ # validate columns
+ # ----------------
+
+ # must have value column names (strings only)
+ if df.columns.inferred_type not in valid_types:
+ raise ValueError("parquet must have string column names")
+
+ return impl.write(df, path, compression=compression)
+
+
+def read_parquet(path, engine='auto', **kwargs):
+ """
+ Load a parquet object from the file path, returning a DataFrame.
+
+ .. versionadded 0.21.0
+
+ Parameters
+ ----------
+ path : string
+ File path
+ engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
+ Parquet reader library to use. If 'auto', then the option
+ 'io.parquet.engine' is used. If 'auto', then the first
+ library to be installed is used.
+ kwargs are passed to the engine
+
+ Returns
+ -------
+ DataFrame
+
+ """
+
+ impl = get_engine(engine)
+ return impl.read(path)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 433ed7e517b1c..09cccd54b74f8 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -82,7 +82,7 @@ class TestPDApi(Base):
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
- 'read_table', 'read_feather']
+ 'read_table', 'read_feather', 'read_parquet']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
new file mode 100644
index 0000000000000..ff0935c7dcc6f
--- /dev/null
+++ b/pandas/tests/io/test_parquet.py
@@ -0,0 +1,374 @@
+""" test parquet compat """
+
+import pytest
+import datetime
+from warnings import catch_warnings
+
+import numpy as np
+import pandas as pd
+from pandas.compat import PY3, is_platform_windows
+from pandas.io.parquet import (to_parquet, read_parquet, get_engine,
+ PyArrowImpl, FastParquetImpl)
+from pandas.util import testing as tm
+
+try:
+ import pyarrow # noqa
+ _HAVE_PYARROW = True
+except ImportError:
+ _HAVE_PYARROW = False
+
+try:
+ import fastparquet # noqa
+ _HAVE_FASTPARQUET = True
+except ImportError:
+ _HAVE_FASTPARQUET = False
+
+
+# setup engines & skips
+@pytest.fixture(params=[
+ pytest.mark.skipif(not _HAVE_FASTPARQUET,
+ reason='fastparquet is not installed')('fastparquet'),
+ pytest.mark.skipif(not _HAVE_PYARROW,
+ reason='pyarrow is not installed')('pyarrow')])
+def engine(request):
+ return request.param
+
+
+@pytest.fixture
+def pa():
+ if not _HAVE_PYARROW:
+ pytest.skip("pyarrow is not installed")
+ if is_platform_windows():
+ pytest.skip("pyarrow-parquet not building on windows")
+ return 'pyarrow'
+
+
+@pytest.fixture
+def fp():
+ if not _HAVE_FASTPARQUET:
+ pytest.skip("fastparquet is not installed")
+ return 'fastparquet'
+
+
+@pytest.fixture
+def df_compat():
+ return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'})
+
+
+@pytest.fixture
+def df_cross_compat():
+ df = pd.DataFrame({'a': list('abc'),
+ 'b': list(range(1, 4)),
+ 'c': np.arange(3, 6).astype('u1'),
+ 'd': np.arange(4.0, 7.0, dtype='float64'),
+ 'e': [True, False, True],
+ 'f': pd.date_range('20130101', periods=3),
+ 'g': pd.date_range('20130101', periods=3,
+ tz='US/Eastern'),
+ 'h': pd.date_range('20130101', periods=3, freq='ns')})
+ return df
+
+
+def test_invalid_engine(df_compat):
+
+ with pytest.raises(ValueError):
+ df_compat.to_parquet('foo', 'bar')
+
+
+def test_options_py(df_compat, pa):
+ # use the set option
+
+ df = df_compat
+ with tm.ensure_clean() as path:
+
+ with pd.option_context('io.parquet.engine', 'pyarrow'):
+ df.to_parquet(path)
+
+ result = read_parquet(path, compression=None)
+ tm.assert_frame_equal(result, df)
+
+
+def test_options_fp(df_compat, fp):
+ # use the set option
+
+ df = df_compat
+ with tm.ensure_clean() as path:
+
+ with pd.option_context('io.parquet.engine', 'fastparquet'):
+ df.to_parquet(path, compression=None)
+
+ result = read_parquet(path, compression=None)
+ tm.assert_frame_equal(result, df)
+
+
+def test_options_auto(df_compat, fp, pa):
+
+ df = df_compat
+ with tm.ensure_clean() as path:
+
+ with pd.option_context('io.parquet.engine', 'auto'):
+ df.to_parquet(path)
+
+ result = read_parquet(path, compression=None)
+ tm.assert_frame_equal(result, df)
+
+
+def test_options_get_engine(fp, pa):
+ assert isinstance(get_engine('pyarrow'), PyArrowImpl)
+ assert isinstance(get_engine('fastparquet'), FastParquetImpl)
+
+ with pd.option_context('io.parquet.engine', 'pyarrow'):
+ assert isinstance(get_engine('auto'), PyArrowImpl)
+ assert isinstance(get_engine('pyarrow'), PyArrowImpl)
+ assert isinstance(get_engine('fastparquet'), FastParquetImpl)
+
+ with pd.option_context('io.parquet.engine', 'fastparquet'):
+ assert isinstance(get_engine('auto'), FastParquetImpl)
+ assert isinstance(get_engine('pyarrow'), PyArrowImpl)
+ assert isinstance(get_engine('fastparquet'), FastParquetImpl)
+
+ with pd.option_context('io.parquet.engine', 'auto'):
+ assert isinstance(get_engine('auto'), PyArrowImpl)
+ assert isinstance(get_engine('pyarrow'), PyArrowImpl)
+ assert isinstance(get_engine('fastparquet'), FastParquetImpl)
+
+
+@pytest.mark.xfail(reason="fp does not ignore pa index __index_level_0__")
+def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
+ # cross-compat with differing reading/writing engines
+
+ df = df_cross_compat
+ with tm.ensure_clean() as path:
+ df.to_parquet(path, engine=pa, compression=None)
+
+ result = read_parquet(path, engine=fp, compression=None)
+ tm.assert_frame_equal(result, df)
+
+
+@pytest.mark.xfail(reason="pyarrow reading fp in some cases")
+def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
+ # cross-compat with differing reading/writing engines
+
+ df = df_cross_compat
+ with tm.ensure_clean() as path:
+ df.to_parquet(path, engine=fp, compression=None)
+
+ result = read_parquet(path, engine=pa, compression=None)
+ tm.assert_frame_equal(result, df)
+
+
+class Base(object):
+
+ def check_error_on_write(self, df, engine, exc):
+ # check that we are raising the exception
+ # on writing
+
+ with pytest.raises(exc):
+ with tm.ensure_clean() as path:
+ to_parquet(df, path, engine, compression=None)
+
+ def check_round_trip(self, df, engine, expected=None, **kwargs):
+
+ with tm.ensure_clean() as path:
+ df.to_parquet(path, engine, **kwargs)
+ result = read_parquet(path, engine)
+
+ if expected is None:
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+ # repeat
+ to_parquet(df, path, engine, **kwargs)
+ result = pd.read_parquet(path, engine)
+
+ if expected is None:
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+
+class TestBasic(Base):
+
+ def test_error(self, engine):
+
+ for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
+ np.array([1, 2, 3])]:
+ self.check_error_on_write(obj, engine, ValueError)
+
+ def test_columns_dtypes(self, engine):
+
+ df = pd.DataFrame({'string': list('abc'),
+ 'int': list(range(1, 4))})
+
+ # unicode
+ df.columns = [u'foo', u'bar']
+ self.check_round_trip(df, engine, compression=None)
+
+ def test_columns_dtypes_invalid(self, engine):
+
+ df = pd.DataFrame({'string': list('abc'),
+ 'int': list(range(1, 4))})
+
+ # numeric
+ df.columns = [0, 1]
+ self.check_error_on_write(df, engine, ValueError)
+
+ if PY3:
+ # bytes on PY3, on PY2 these are str
+ df.columns = [b'foo', b'bar']
+ self.check_error_on_write(df, engine, ValueError)
+
+ # python object
+ df.columns = [datetime.datetime(2011, 1, 1, 0, 0),
+ datetime.datetime(2011, 1, 1, 1, 1)]
+ self.check_error_on_write(df, engine, ValueError)
+
+ def test_write_with_index(self, engine):
+
+ df = pd.DataFrame({'A': [1, 2, 3]})
+ self.check_round_trip(df, engine, compression=None)
+
+ # non-default index
+ for index in [[2, 3, 4],
+ pd.date_range('20130101', periods=3),
+ list('abc'),
+ [1, 3, 4],
+ pd.MultiIndex.from_tuples([('a', 1), ('a', 2),
+ ('b', 1)]),
+ ]:
+
+ df.index = index
+ self.check_error_on_write(df, engine, ValueError)
+
+ # index with meta-data
+ df.index = [0, 1, 2]
+ df.index.name = 'foo'
+ self.check_error_on_write(df, engine, ValueError)
+
+ # column multi-index
+ df.index = [0, 1, 2]
+ df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]),
+ self.check_error_on_write(df, engine, ValueError)
+
+ @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli'])
+ def test_compression(self, engine, compression):
+
+ if compression == 'snappy':
+ pytest.importorskip('snappy')
+
+ elif compression == 'brotli':
+ pytest.importorskip('brotli')
+
+ df = pd.DataFrame({'A': [1, 2, 3]})
+ self.check_round_trip(df, engine, compression=compression)
+
+
+class TestParquetPyArrow(Base):
+
+ def test_basic(self, pa):
+
+ df = pd.DataFrame({'string': list('abc'),
+ 'string_with_nan': ['a', np.nan, 'c'],
+ 'string_with_none': ['a', None, 'c'],
+ 'bytes': [b'foo', b'bar', b'baz'],
+ 'unicode': [u'foo', u'bar', u'baz'],
+ 'int': list(range(1, 4)),
+ 'uint': np.arange(3, 6).astype('u1'),
+ 'float': np.arange(4.0, 7.0, dtype='float64'),
+ 'float_with_nan': [2., np.nan, 3.],
+ 'bool': [True, False, True],
+ 'bool_with_none': [True, None, True],
+ 'datetime_ns': pd.date_range('20130101', periods=3),
+ 'datetime_with_nat': [pd.Timestamp('20130101'),
+ pd.NaT,
+ pd.Timestamp('20130103')]
+ })
+
+ self.check_round_trip(df, pa)
+
+ def test_duplicate_columns(self, pa):
+
+ # not currently able to handle duplicate columns
+ df = pd.DataFrame(np.arange(12).reshape(4, 3),
+ columns=list('aaa')).copy()
+ self.check_error_on_write(df, pa, ValueError)
+
+ def test_unsupported(self, pa):
+
+ # period
+ df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
+ self.check_error_on_write(df, pa, ValueError)
+
+ # categorical
+ df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
+ self.check_error_on_write(df, pa, NotImplementedError)
+
+ # timedelta
+ df = pd.DataFrame({'a': pd.timedelta_range('1 day',
+ periods=3)})
+ self.check_error_on_write(df, pa, NotImplementedError)
+
+ # mixed python objects
+ df = pd.DataFrame({'a': ['a', 1, 2.0]})
+ self.check_error_on_write(df, pa, ValueError)
+
+
+class TestParquetFastParquet(Base):
+
+ def test_basic(self, fp):
+
+ df = pd.DataFrame(
+ {'string': list('abc'),
+ 'string_with_nan': ['a', np.nan, 'c'],
+ 'string_with_none': ['a', None, 'c'],
+ 'bytes': [b'foo', b'bar', b'baz'],
+ 'unicode': [u'foo', u'bar', u'baz'],
+ 'int': list(range(1, 4)),
+ 'uint': np.arange(3, 6).astype('u1'),
+ 'float': np.arange(4.0, 7.0, dtype='float64'),
+ 'float_with_nan': [2., np.nan, 3.],
+ 'bool': [True, False, True],
+ 'datetime': pd.date_range('20130101', periods=3),
+ 'datetime_with_nat': [pd.Timestamp('20130101'),
+ pd.NaT,
+ pd.Timestamp('20130103')],
+ 'timedelta': pd.timedelta_range('1 day', periods=3),
+ })
+
+ self.check_round_trip(df, fp, compression=None)
+
+ @pytest.mark.skip(reason="not supported")
+ def test_duplicate_columns(self, fp):
+
+ # not currently able to handle duplicate columns
+ df = pd.DataFrame(np.arange(12).reshape(4, 3),
+ columns=list('aaa')).copy()
+ self.check_error_on_write(df, fp, ValueError)
+
+ def test_bool_with_none(self, fp):
+ df = pd.DataFrame({'a': [True, None, False]})
+ expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16')
+ self.check_round_trip(df, fp, expected=expected, compression=None)
+
+ def test_unsupported(self, fp):
+
+ # period
+ df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
+ self.check_error_on_write(df, fp, ValueError)
+
+ # mixed
+ df = pd.DataFrame({'a': ['a', 1, 2.0]})
+ self.check_error_on_write(df, fp, ValueError)
+
+ def test_categorical(self, fp):
+ df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
+ self.check_round_trip(df, fp, compression=None)
+
+ def test_datetime_tz(self, fp):
+ # doesn't preserve tz
+ df = pd.DataFrame({'a': pd.date_range('20130101', periods=3,
+ tz='US/Eastern')})
+
+ # warns on the coercion
+ with catch_warnings(record=True):
+ self.check_round_trip(df, fp, df.astype('datetime64[ns]'),
+ compression=None)
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 48b19b02e297e..9ecd4b10365c8 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -94,6 +94,7 @@ def show_versions(as_json=False):
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("s3fs", lambda mod: mod.__version__),
+ ("fastparquet", lambda mod: mod.__version__),
("pandas_gbq", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__),
]
| xref https://github.com/dask/dask/issues/2127
TODO: these are fixed, waiting for release to update tests.
- [x] ``fastparquet``: [duplicate columns errors msg](https://github.com/dask/fastparquet/issues/118)
- [x] ``pyarrow 0.3``: [passing dataframe with non-string object columns](https://github.com/apache/arrow/pull/465)
This is a wrapper around ``pyarrow`` and ``fastparquet`` to provide seemless IO interop within pandas.
cc @wesm
cc @martindurant
cc @mrocklin
| https://api.github.com/repos/pandas-dev/pandas/pulls/15838 | 2017-03-29T17:39:58Z | 2017-08-02T09:47:58Z | 2017-08-02T09:47:58Z | 2017-09-06T15:05:57Z |
BUG: SparseDataFrame construction with lists not coercing to dtype (GH 15682) | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 2e822729873ad..359a038d236b0 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1045,7 +1045,7 @@ Bug Fixes
- Bug in ``pd.concat()`` in which concatting with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
-
+- Bug in ``SparseDataFrame`` construction with lists not coercing to dtype (:issue:`15682`)
- Bug in ``.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`)
- Bug in ``groupby.transform()`` that would coerce the resultant dtypes back to the original (:issue:`10972`, :issue:`11444`)
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 41f301f263374..455d120cca640 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -142,7 +142,7 @@ def _init_dict(self, data, index, columns, dtype=None):
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
- copy=True)
+ copy=True, dtype=dtype)
sdict = DataFrame()
for k, v in compat.iteritems(data):
if isinstance(v, Series):
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index c0c678c184ee8..ae1a1e35f1859 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -28,7 +28,6 @@
class TestSparseDataFrame(tm.TestCase, SharedWithSparse):
-
klass = SparseDataFrame
def setUp(self):
@@ -237,6 +236,18 @@ def test_constructor_nan_dataframe(self):
dtype=float)
tm.assert_sp_frame_equal(result, expected)
+ def test_type_coercion_at_construction(self):
+ # GH 15682
+ result = pd.SparseDataFrame(
+ {'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8',
+ default_fill_value=0)
+ expected = pd.SparseDataFrame(
+ {'a': pd.SparseSeries([1, 0, 0], dtype='uint8'),
+ 'b': pd.SparseSeries([0, 1, 0], dtype='uint8'),
+ 'c': pd.SparseSeries([0, 0, 1], dtype='uint8')},
+ default_fill_value=0)
+ tm.assert_sp_frame_equal(result, expected)
+
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
@@ -756,9 +767,18 @@ def test_sparse_frame_fillna_limit(self):
tm.assert_frame_equal(result, expected)
def test_rename(self):
- # just check this works
- renamed = self.frame.rename(index=str) # noqa
- renamed = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x))) # noqa
+ result = self.frame.rename(index=str)
+ expected = SparseDataFrame(self.data, index=self.dates.strftime(
+ "%Y-%m-%d %H:%M:%S"))
+ tm.assert_sp_frame_equal(result, expected)
+
+ result = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x)))
+ data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
+ 'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
+ 'C1': np.arange(10, dtype=np.float64),
+ 'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
+ expected = SparseDataFrame(data, index=self.dates)
+ tm.assert_sp_frame_equal(result, expected)
def test_corr(self):
res = self.frame.corr()
@@ -967,7 +987,6 @@ def _check(frame, orig):
def test_shift(self):
def _check(frame, orig):
-
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
@@ -1060,7 +1079,7 @@ def test_sparse_pow_issue(self):
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
- result = 1**df
+ result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
@@ -1126,7 +1145,7 @@ def test_isnotnull(self):
tm.assert_frame_equal(res.to_dense(), exp)
-@pytest.mark.parametrize('index', [None, list('ab')]) # noqa: F811
+@pytest.mark.parametrize('index', [None, list('ab')]) # noqa: F811
@pytest.mark.parametrize('columns', [None, list('cd')])
@pytest.mark.parametrize('fill_value', [None, 0, np.nan])
@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16])
@@ -1180,7 +1199,7 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
tm.assert_equal(sdf.to_coo().dtype, np.object_)
-@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811
+@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811
def test_from_to_scipy_object(spmatrix, fill_value):
# GH 4343
dtype = object
@@ -1255,7 +1274,6 @@ def test_comparison_op_scalar(self):
class TestSparseDataFrameAnalytics(tm.TestCase):
-
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
| - [x] closes #15682
- [x] tests added / passed
- [ ] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15834 | 2017-03-29T08:38:32Z | 2017-03-30T12:19:35Z | null | 2017-03-30T12:19:35Z |
ENH: add origin to to_datetime | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 7136b15a7633a..44c200e13b877 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -252,7 +252,8 @@ Epoch Timestamps
It's also possible to convert integer or float epoch times. The default unit
for these is nanoseconds (since these are how ``Timestamp`` s are stored). However,
-often epochs are stored in another ``unit`` which can be specified:
+often epochs are stored in another ``unit`` which can be specified. These are computed
+from the starting point specified by the :ref:`Origin Parameter <timeseries.origin>`.
Typical epoch stored units
@@ -276,6 +277,29 @@ These *work*, but the results may be unexpected.
Epoch times will be rounded to the nearest nanosecond.
+.. _timeseries.origin:
+
+Using the Origin Parameter
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.20.0
+
+Using the ``origin`` parameter, one can specify an alternative starting point for creation
+of a ``DatetimeIndex``.
+
+Start with 1960-01-01 as the starting date
+
+.. ipython:: python
+
+ pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
+
+The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``.
+Commonly called 'unix epoch' or POSIX time.
+
+.. ipython:: python
+
+ pd.to_datetime([1, 2, 3], unit='D')
+
.. _timeseries.daterange:
Generating Ranges of Timestamps
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 781a912555e14..84e6bd99e5ebd 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -27,7 +27,6 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations
New features
~~~~~~~~~~~~
-
.. _whatsnew_0200.enhancements.dataio_dtype:
``dtype`` keyword for data IO
@@ -55,6 +54,27 @@ fixed-width text files, and :func:`read_excel` for parsing Excel files.
pd.read_fwf(StringIO(data)).dtypes
pd.read_fwf(StringIO(data), dtype={'a':'float64', 'b':'object'}).dtypes
+.. _whatsnew_0120.enhancements.datetime_origin:
+
+to_datetime has gained an origin parameter
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``pd.to_datetime`` has gained a new parameter, ``origin``, to define a reference date
+from where to compute the resulting ``DatetimeIndex``. (:issue:`11276`, :issue:`11745`)
+
+Start with 1960-01-01 as the starting date
+
+.. ipython:: python
+
+ pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
+
+The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``.
+Commonly called 'unix epoch' or POSIX time.
+
+.. ipython:: python
+
+ pd.to_datetime([1, 2, 3], unit='D')
+
.. _whatsnew_0200.enhancements.groupby_access:
Groupby Enhancements
@@ -317,7 +337,7 @@ Other Enhancements
- ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`)
- Re-enable the ``parse_dates`` keyword of ``read_excel`` to parse string columns as dates (:issue:`14326`)
- Added ``.empty`` property to subclasses of ``Index``. (:issue:`15270`)
-
+- Enabled floor division for ``Timedelta`` and ``TimedeltaIndex`` (:issue:`15828`)
- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`)
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5aa8e15d0d087..cc1439711c1d4 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -3073,6 +3073,7 @@ class Timedelta(_Timedelta):
return np.timedelta64(self.value, 'ns')
def _validate_ops_compat(self, other):
+
# return True if we are compat with operating
if _checknull_with_nat(other):
return True
@@ -3179,11 +3180,41 @@ class Timedelta(_Timedelta):
__div__ = __truediv__
__rdiv__ = __rtruediv__
- def _not_implemented(self, *args, **kwargs):
- return NotImplemented
+ def __floordiv__(self, other):
+
+ if hasattr(other, 'dtype'):
+
+ # work with i8
+ other = other.astype('m8[ns]').astype('i8')
+
+ return self.value // other
- __floordiv__ = _not_implemented
- __rfloordiv__ = _not_implemented
+ # integers only
+ if is_integer_object(other):
+ return Timedelta(self.value // other, unit='ns')
+
+ if not self._validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return np.nan
+ return self.value // other.value
+
+ def __rfloordiv__(self, other):
+ if hasattr(other, 'dtype'):
+
+ # work with i8
+ other = other.astype('m8[ns]').astype('i8')
+ return other // self.value
+
+ if not self._validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+ return other.value // self.value
def _op_unary_method(func, name):
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 512a3e1c38629..02630c76abb93 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1515,3 +1515,120 @@ def test_normalize_date():
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
+
+
+@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
+def units(request):
+ return request.param
+
+
+@pytest.fixture
+def epoch_1960():
+ # for origin as 1960-01-01
+ return Timestamp('1960-01-01')
+
+
+@pytest.fixture
+def units_from_epochs():
+ return list(range(5))
+
+
+@pytest.fixture(params=[epoch_1960(), epoch_1960().to_datetime(),
+ epoch_1960().to_datetime64(),
+ str(epoch_1960())])
+def epochs(request):
+ return request.param
+
+
+@pytest.fixture
+def julian_dates():
+ return pd.date_range('2014-1-1', periods=10).to_julian_date().values
+
+
+class TestOrigin(object):
+
+ def test_to_basic(self, julian_dates):
+ # gh-11276, gh-11745
+ # for origin as julian
+
+ result = Series(pd.to_datetime(
+ julian_dates, unit='D', origin='julian'))
+ expected = Series(pd.to_datetime(
+ julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
+ assert_series_equal(result, expected)
+
+ result = Series(pd.to_datetime(
+ [0, 1, 2], unit='D', origin='unix'))
+ expected = Series([Timestamp('1970-01-01'),
+ Timestamp('1970-01-02'),
+ Timestamp('1970-01-03')])
+ assert_series_equal(result, expected)
+
+ # default
+ result = Series(pd.to_datetime(
+ [0, 1, 2], unit='D'))
+ expected = Series([Timestamp('1970-01-01'),
+ Timestamp('1970-01-02'),
+ Timestamp('1970-01-03')])
+ assert_series_equal(result, expected)
+
+ def test_julian_round_trip(self):
+ result = pd.to_datetime(2456658, origin='julian', unit='D')
+ assert result.to_julian_date() == 2456658
+
+ # out-of-bounds
+ with pytest.raises(ValueError):
+ pd.to_datetime(1, origin="julian", unit='D')
+
+ def test_invalid_unit(self, units, julian_dates):
+
+ # checking for invalid combination of origin='julian' and unit != D
+ if units != 'D':
+ with pytest.raises(ValueError):
+ pd.to_datetime(julian_dates, unit=units, origin='julian')
+
+ def test_invalid_origin(self):
+
+ # need to have a numeric specified
+ with pytest.raises(ValueError):
+ pd.to_datetime("2005-01-01", origin="1960-01-01")
+
+ with pytest.raises(ValueError):
+ pd.to_datetime("2005-01-01", origin="1960-01-01", unit='D')
+
+ def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
+
+ expected = Series(
+ [pd.Timedelta(x, unit=units) +
+ epoch_1960 for x in units_from_epochs])
+
+ result = Series(pd.to_datetime(
+ units_from_epochs, unit=units, origin=epochs))
+ assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("origin, exc",
+ [('random_string', ValueError),
+ ('epoch', ValueError),
+ ('13-24-1990', ValueError),
+ (datetime(1, 1, 1), tslib.OutOfBoundsDatetime)])
+ def test_invalid_origins(self, origin, exc, units, units_from_epochs):
+
+ with pytest.raises(exc):
+ pd.to_datetime(units_from_epochs, unit=units,
+ origin=origin)
+
+ def test_processing_order(self):
+ # make sure we handle out-of-bounds *before*
+ # constructing the dates
+
+ result = pd.to_datetime(200 * 365, unit='D')
+ expected = Timestamp('2169-11-13 00:00:00')
+ assert result == expected
+
+ result = pd.to_datetime(200 * 365, unit='D', origin='1870-01-01')
+ expected = Timestamp('2069-11-13 00:00:00')
+ assert result == expected
+
+ result = pd.to_datetime(300 * 365, unit='D', origin='1870-01-01')
+ expected = Timestamp('2169-10-20 00:00:00')
+ assert result == expected
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 2e9f11297dc83..36aac8cafecc1 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -284,6 +284,12 @@ def test_ops_compat(self):
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
+ # floor divide
+ expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
+ for offset in offsets:
+ result = rng // offset
+ tm.assert_index_equal(result, expected, exact=False)
+
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
@@ -867,10 +873,12 @@ def test_ops(self):
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
+ self.assertEqual(td // 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
+ self.assertTrue((td // pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
@@ -878,9 +886,6 @@ def test_ops(self):
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
- # invalid
- self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
-
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
@@ -991,7 +996,7 @@ class Other:
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
- self.assertTrue(td.__floordiv__(td) is NotImplemented)
+ self.assertTrue(td.__floordiv__(other) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index c2b895925b685..c22d1d2329fba 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -216,6 +216,7 @@ def test_conversion(self):
def test_freq_conversion(self):
+ # truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
@@ -224,6 +225,15 @@ def test_freq_conversion(self):
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
+ # floordiv
+ td = Timedelta('1 days 2 hours 3 ns')
+ result = td // np.timedelta64(1, 'D')
+ self.assertEqual(result, 1)
+ result = td // np.timedelta64(1, 's')
+ self.assertEqual(result, 93600)
+ result = td // np.timedelta64(1, 'ns')
+ self.assertEqual(result, td.value)
+
def test_fields(self):
def check(value):
# that we are int/long like
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index 5d062dd38f9fc..d0f373fcc5a45 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -326,7 +326,7 @@ def _add_delta(self, delta):
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
- if opstr in ['__div__', '__truediv__']:
+ if opstr in ['__div__', '__truediv__', '__floordiv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isnull(other):
@@ -334,7 +334,10 @@ def _evaluate_with_timedelta_like(self, other, op, opstr):
"division by pd.NaT not implemented")
i8 = self.asi8
- result = i8 / float(other.value)
+ if opstr in ['__floordiv__']:
+ result = i8 // other.value
+ else:
+ result = op(i8, float(other.value))
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 5dc9746c6d6f9..d0f1671f9e309 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -9,7 +9,11 @@
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
- is_list_like)
+ is_integer,
+ is_float,
+ is_list_like,
+ is_scalar,
+ is_numeric_dtype)
from pandas.types.generic import (ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.types.missing import notnull
@@ -177,7 +181,7 @@ def _guess_datetime_format_for_array(arr, **kwargs):
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
- unit=None, infer_datetime_format=False):
+ unit=None, infer_datetime_format=False, origin='unix'):
"""
Convert argument to datetime.
@@ -229,13 +233,27 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
- unit of the arg (D,s,ms,us,ns) denote the unit in epoch
- (e.g. a unix timestamp), which is an integer/float number.
+ unit of the arg (D,s,ms,us,ns) denote the unit, which is an
+ integer or float number. This will be based off the origin.
+ Example, with unit='ms' and origin='unix' (the default), this
+ would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
+ origin : scalar, default is 'unix'
+ Define the reference date. The numeric values would be parsed as number
+ of units (defined by `unit`) since this reference date.
+
+ - If 'unix' (or POSIX) time; origin is set to 1970-01-01.
+ - If 'julian', unit must be 'D', and origin is set to beginning of
+ Julian Calendar. Julian day number 0 is assigned to the day starting
+ at noon on January 1, 4713 BC.
+ - If Timestamp convertible, origin is set to Timestamp identified by
+ origin.
+
+ .. versionadded: 0.20.0
Returns
-------
@@ -297,8 +315,15 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
- """
+ Using a non-unix epoch origin
+
+ >>> pd.to_datetime([1, 2, 3], unit='D',
+ origin=pd.Timestamp('1960-01-01'))
+ 0 1960-01-02
+ 1 1960-01-03
+ 2 1960-01-04
+ """
from pandas.tseries.index import DatetimeIndex
tz = 'utc' if utc else None
@@ -410,21 +435,77 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
raise e
if arg is None:
- return arg
- elif isinstance(arg, tslib.Timestamp):
- return arg
+ return None
+
+ # handle origin
+ if origin == 'julian':
+
+ original = arg
+ j0 = tslib.Timestamp(0).to_julian_date()
+ if unit != 'D':
+ raise ValueError("unit must be 'D' for origin='julian'")
+ try:
+ arg = arg - j0
+ except:
+ raise ValueError("incompatible 'arg' type for given "
+ "'origin'='julian'")
+
+ # premptively check this for a nice range
+ j_max = tslib.Timestamp.max.to_julian_date() - j0
+ j_min = tslib.Timestamp.min.to_julian_date() - j0
+ if np.any(arg > j_max) or np.any(arg < j_min):
+ raise tslib.OutOfBoundsDatetime(
+ "{original} is Out of Bounds for "
+ "origin='julian'".format(original=original))
+
+ elif origin not in ['unix', 'julian']:
+
+ # arg must be a numeric
+ original = arg
+ if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
+ is_numeric_dtype(np.asarray(arg))):
+ raise ValueError(
+ "'{arg}' is not compatible with origin='{origin}'; "
+ "it must be numeric with a unit specified ".format(
+ arg=arg,
+ origin=origin))
+
+ # we are going to offset back to unix / epoch time
+ try:
+ offset = tslib.Timestamp(origin) - tslib.Timestamp(0)
+ except tslib.OutOfBoundsDatetime:
+ raise tslib.OutOfBoundsDatetime(
+ "origin {} is Out of Bounds".format(origin))
+ except ValueError:
+ raise ValueError("origin {} cannot be converted "
+ "to a Timestamp".format(origin))
+
+ # convert the offset to the unit of the arg
+ # this should be lossless in terms of precision
+ offset = offset // tslib.Timedelta(1, unit=unit)
+
+ # scalars & ndarray-like can handle the addition
+ if is_list_like(arg) and not isinstance(
+ arg, (ABCSeries, ABCIndexClass, np.ndarray)):
+ arg = np.asarray(arg)
+ arg = arg + offset
+
+ if isinstance(arg, tslib.Timestamp):
+ result = arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, False, format)
- return Series(values, index=arg.index, name=arg.name)
+ result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
- return _assemble_from_unit_mappings(arg, errors=errors)
+ result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
- return _convert_listlike(arg, box, format, name=arg.name)
+ result = _convert_listlike(arg, box, format, name=arg.name)
elif is_list_like(arg):
- return _convert_listlike(arg, box, format)
+ result = _convert_listlike(arg, box, format)
+ else:
+ result = _convert_listlike(np.array([arg]), box, format)[0]
- return _convert_listlike(np.array([arg]), box, format)[0]
+ return result
# mappings for assembling units
| closes #11276
closes #11745
superseded #11470 | https://api.github.com/repos/pandas-dev/pandas/pulls/15828 | 2017-03-28T20:45:10Z | 2017-04-02T22:55:24Z | null | 2017-04-03T10:11:01Z |
BUG: bug in .at/.loc indexing with a tz-aware columns | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index fdf34e0d11572..51c3d5578ae6c 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -881,6 +881,7 @@ Bug Fixes
- Compat for 32-bit platforms for ``.qcut/cut``; bins will now be ``int64`` dtype (:issue:`14866`)
+- Bug in ``.at`` when selecting from a tz-aware column (:issue:`15822`)
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 90c49a9c85133..90baa1aff4857 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1918,7 +1918,16 @@ def get_value(self, index, col, takeable=False):
series = self._get_item_cache(col)
engine = self.index._engine
- return engine.get_value(series.get_values(), index)
+
+ try:
+ return engine.get_value(series._values, index)
+ except TypeError:
+
+ # we cannot handle direct indexing
+ # use positional
+ col = self.columns.get_loc(col)
+ index = self.index.get_loc(index)
+ return self.get_value(index, col, takeable=True)
def set_value(self, index, col, value, takeable=False):
"""
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 4e81cd01cd5d2..0eeaec3e00fa6 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -154,3 +154,18 @@ def test_at_to_fail(self):
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
+
+ def test_at_with_tz(self):
+ # gh-15822
+ df = DataFrame({'name': ['John', 'Anderson'],
+ 'date': [Timestamp(2017, 3, 13, 13, 32, 56),
+ Timestamp(2017, 2, 16, 12, 10, 3)]})
+ df['date'] = df['date'].dt.tz_localize('Asia/Shanghai')
+
+ expected = Timestamp('2017-03-13 13:32:56+0800', tz='Asia/Shanghai')
+
+ result = df.loc[0, 'date']
+ assert result == expected
+
+ result = df.at[0, 'date']
+ assert result == expected
| closes #15822 | https://api.github.com/repos/pandas-dev/pandas/pulls/15827 | 2017-03-28T16:16:24Z | 2017-03-28T16:49:16Z | null | 2017-03-28T17:17:55Z |
Remove NotImplementedError for parse_dates keyword in read_excel | diff --git a/doc/source/io.rst b/doc/source/io.rst
index faeea9d448cf2..e72224c6fa1fe 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2767,6 +2767,20 @@ indices to be parsed.
read_excel('path_to_file.xls', 'Sheet1', parse_cols=[0, 2, 3])
+
+Parsing Dates
++++++++++++++
+
+Datetime-like values are normally automatically converted to the appropriate
+dtype when reading the excel file. But if you have a column of strings that
+*look* like dates (but are not actually formatted as dates in excel), you can
+use the `parse_dates` keyword to parse those strings to datetimes:
+
+.. code-block:: python
+
+ read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings'])
+
+
Cell Converters
+++++++++++++++
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 3ab69e1ff409b..fdf34e0d11572 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -270,7 +270,7 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
.. _whatsnew_0200.enhancements.other:
-Other enhancements
+Other Enhancements
^^^^^^^^^^^^^^^^^^
- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
@@ -314,6 +314,7 @@ Other enhancements
- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information.
- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`)
- ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`)
+- Re-enable the ``parse_dates`` keyword of ``read_excel`` to parse string columns as dates (:issue:`14326`)
.. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 82ea2e8a46592..d324855bc2f4d 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -343,13 +343,10 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, names=None,
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
- if parse_dates:
- raise NotImplementedError("parse_dates keyword of read_excel "
- "is not implemented")
- if date_parser is not None:
- raise NotImplementedError("date_parser keyword of read_excel "
- "is not implemented")
+ if parse_dates is True and index_col is None:
+ warn("The 'parse_dates=True' keyword of read_excel was provided"
+ " without an 'index_col' keyword value.")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 45c62b224ef4e..30b88de91ef76 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1176,13 +1176,18 @@ def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
- name = self.index_names[i]
+ if self.index_names is not None:
+ name = self.index_names[i]
+ else:
+ name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
- return (j == self.parse_dates) or (name == self.parse_dates)
+ return ((j == self.parse_dates) or
+ (name is not None and name == self.parse_dates))
else:
- return (j in self.parse_dates) or (name in self.parse_dates)
+ return ((j in self.parse_dates) or
+ (name is not None and name in self.parse_dates))
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
@@ -1352,6 +1357,7 @@ def _get_name(icol):
def _agg_index(self, index, try_parse_dates=True):
arrays = []
+
for i, arr in enumerate(index):
if (try_parse_dates and self._should_parse_dates(i)):
@@ -1512,6 +1518,7 @@ def _cast_types(self, values, cast_type, column):
def _do_date_conversions(self, names, data):
# returns data, columns
+
if self.parse_dates is not None:
data, names = _process_date_conversion(
data, self._date_conv, self.parse_dates, self.index_col,
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index b66cb24bf44d8..256a37e922177 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -924,17 +924,32 @@ def test_read_excel_chunksize(self):
chunksize=100)
def test_read_excel_parse_dates(self):
- # GH 11544
- with tm.assertRaises(NotImplementedError):
- pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
- parse_dates=True)
+ # GH 11544, 12051
- def test_read_excel_date_parser(self):
- # GH 11544
- with tm.assertRaises(NotImplementedError):
- dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
- pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
- date_parser=dateparse)
+ df = DataFrame(
+ {'col': [1, 2, 3],
+ 'date_strings': pd.date_range('2012-01-01', periods=3)})
+ df2 = df.copy()
+ df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y')
+
+ with ensure_clean(self.ext) as pth:
+ df2.to_excel(pth)
+
+ res = read_excel(pth)
+ tm.assert_frame_equal(df2, res)
+
+ # no index_col specified when parse_dates is True
+ with tm.assert_produces_warning():
+ res = read_excel(pth, parse_dates=True)
+ tm.assert_frame_equal(df2, res)
+
+ res = read_excel(pth, parse_dates=['date_strings'], index_col=0)
+ tm.assert_frame_equal(df, res)
+
+ dateparser = lambda x: pd.datetime.strptime(x, '%m/%d/%Y')
+ res = read_excel(pth, parse_dates=['date_strings'],
+ date_parser=dateparser, index_col=0)
+ tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self):
# GH 4903
@@ -1382,8 +1397,7 @@ def test_to_excel_multiindex(self):
# round trip
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
- df = read_excel(reader, 'test1', index_col=[0, 1],
- parse_dates=False)
+ df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
@@ -1424,8 +1438,7 @@ def test_to_excel_multiindex_cols(self):
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
df = read_excel(reader, 'test1', header=header,
- index_col=[0, 1],
- parse_dates=False)
+ index_col=[0, 1])
if not self.merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
| rebase of #12051 and fixes on top
closes #11544 | https://api.github.com/repos/pandas-dev/pandas/pulls/15820 | 2017-03-27T18:43:06Z | 2017-03-28T12:49:58Z | null | 2017-03-28T12:51:07Z |
DOC: ecosystem.rst: QtPandas | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 5a7d6a11d293d..93efcd2724ef7 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -93,8 +93,8 @@ targets the IPython Notebook environment.
`Plotly’s <https://plot.ly/>`__ `Python API <https://plot.ly/python/>`__ enables interactive figures and web shareability. Maps, 2D, 3D, and live-streaming graphs are rendered with WebGL and `D3.js <http://d3js.org/>`__. The library supports plotting directly from a pandas DataFrame and cloud-based collaboration. Users of `matplotlib, ggplot for Python, and Seaborn <https://plot.ly/python/matplotlib-to-plotly-tutorial/>`__ can convert figures into interactive web-based plots. Plots can be drawn in `IPython Notebooks <https://plot.ly/ipython-notebooks/>`__ , edited with R or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly is free for unlimited sharing, and has `cloud <https://plot.ly/product/plans/>`__, `offline <https://plot.ly/python/offline/>`__, or `on-premise <https://plot.ly/product/enterprise/>`__ accounts for private use.
-Visualizing Data in Qt applications
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`QtPandas <https://github.com/draperjames/qtpandas>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Spun off from the main pandas library, the `qtpandas <https://github.com/draperjames/qtpandas>`__
library enables DataFrame visualization and manipulation in PyQt4 and PySide applications.
|
- [x] closes draperjames/qtpandas#36
- [0] tests added / passed
- [-] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [-] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15813 | 2017-03-27T05:11:38Z | 2017-03-27T13:08:21Z | null | 2017-03-27T17:44:21Z |
BUG: replace of numeric by string / dtype coversion (GH15743) | diff --git a/RELEASE.md b/RELEASE.md
index a181412be2719..efd075dabcba9 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,6 +1,6 @@
Release Notes
=============
-The list of changes to pandas between each release can be found
+The list of changes to Pandas between each release can be found
[here](http://pandas.pydata.org/pandas-docs/stable/whatsnew.html). For full
details, see the commit logs at http://github.com/pandas-dev/pandas.
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index fdf34e0d11572..7b58678c68899 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -883,6 +883,8 @@ Bug Fixes
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``.replace()`` may result in incorrect dtypes. (:issue:`12747`, :issue:`15765`)
+- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
+- Bug in ``Series.replace`` which replaced a numeric by string (:issue:`15743`)
- Bug in ``.asfreq()``, where frequency was not set for empty ``Series`` (:issue:`14320`)
@@ -985,7 +987,8 @@ Bug Fixes
- Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 2.0.1``) (:issue:`9351`)
- Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`)
-- Bug in ``Series.replace`` and ``DataFrame.replace`` which failed on empty replacement dicts (:issue:`15289`)
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+
+- Bug in ``Series.replace`` which replaced a numeric by string (:issue:`15743`)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 3b9bfe1de48e7..91039f3270af2 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -9,10 +9,16 @@
from pandas.compat import range, string_types
from pandas.types.common import (is_numeric_v_string_like,
- is_float_dtype, is_datetime64_dtype,
- is_datetime64tz_dtype, is_integer_dtype,
- _ensure_float64, is_scalar,
- needs_i8_conversion, is_integer)
+ is_float_dtype,
+ is_datetime64_dtype,
+ is_datetime64tz_dtype,
+ is_integer_dtype,
+ is_scalar,
+ is_integer,
+ needs_i8_conversion,
+ _ensure_float64)
+
+from pandas.types.cast import infer_dtype_from_array
from pandas.types.missing import isnull
@@ -21,11 +27,11 @@ def mask_missing(arr, values_to_mask):
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
- if not isinstance(values_to_mask, (list, np.ndarray)):
- values_to_mask = [values_to_mask]
+ dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
- values_to_mask = np.array(values_to_mask, dtype=arr.dtype)
+ values_to_mask = np.array(values_to_mask, dtype=dtype)
+
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
@@ -409,7 +415,7 @@ def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
- values = values.reshape(tuple((1, ) + values.shape))
+ values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
@@ -447,7 +453,6 @@ def wrapper(arr, mask, limit=None):
def pad_1d(values, limit=None, mask=None, dtype=None):
-
if dtype is None:
dtype = values.dtype
_method = None
@@ -472,7 +477,6 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
def backfill_1d(values, limit=None, mask=None, dtype=None):
-
if dtype is None:
dtype = values.dtype
_method = None
@@ -498,7 +502,6 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
def pad_2d(values, limit=None, mask=None, dtype=None):
-
if dtype is None:
dtype = values.dtype
_method = None
@@ -528,7 +531,6 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
def backfill_2d(values, limit=None, mask=None, dtype=None):
-
if dtype is None:
dtype = values.dtype
_method = None
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 8b50036cd50f8..fce59e10bf4bd 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -795,7 +795,7 @@ def test_replace_dtypes(self):
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
- def test_replace_input_formats(self):
+ def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
@@ -812,15 +812,6 @@ def test_replace_input_formats(self):
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
- # dict to scalar
- filled = df.replace(to_rep, 0)
- expected = {}
- for k, v in compat.iteritems(df):
- expected[k] = v.replace(to_rep[k], 0)
- assert_frame_equal(filled, DataFrame(expected))
-
- self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
-
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
@@ -842,6 +833,20 @@ def test_replace_input_formats(self):
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
+ def test_replace_input_formats_scalar(self):
+ df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
+ 'C': ['', 'asdf', 'fd']})
+
+ # dict to scalar
+ to_rep = {'A': np.nan, 'B': 0, 'C': ''}
+ filled = df.replace(to_rep, 0)
+ expected = {}
+ for k, v in compat.iteritems(df):
+ expected[k] = v.replace(to_rep[k], 0)
+ assert_frame_equal(filled, DataFrame(expected))
+
+ self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
+
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 0a53581e24ba5..5190eb110f4cf 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -10,7 +10,6 @@
class TestSeriesReplace(TestData, tm.TestCase):
-
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
@@ -227,3 +226,24 @@ def test_replace_with_empty_dictlike(self):
s = pd.Series(list('abcd'))
tm.assert_series_equal(s, s.replace(dict()))
tm.assert_series_equal(s, s.replace(pd.Series([])))
+
+ def test_replace_string_with_number(self):
+ # GH 15743
+ s = pd.Series([1, 2, 3])
+ result = s.replace('2', np.nan)
+ expected = pd.Series([1, 2, 3])
+ tm.assert_series_equal(expected, result)
+
+ def test_replace_unicode_with_number(self):
+ # GH 15743
+ s = pd.Series([1, 2, 3])
+ result = s.replace(u'2', np.nan)
+ expected = pd.Series([1, 2, 3])
+ tm.assert_series_equal(expected, result)
+
+ def test_replace_mixed_types_with_string(self):
+ # Testing mixed
+ s = pd.Series([1, 2, 3, '4', 4, 5])
+ result = s.replace([2, '4'], np.nan)
+ expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
+ tm.assert_series_equal(expected, result)
diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py
index dd4ea3bb02be9..de6ef7af9d7f9 100644
--- a/pandas/tests/types/test_cast.py
+++ b/pandas/tests/types/test_cast.py
@@ -5,13 +5,15 @@
"""
-from datetime import datetime
+import pytest
+from datetime import datetime, timedelta, date
import numpy as np
from pandas import Timedelta, Timestamp, DatetimeIndex
from pandas.types.cast import (maybe_downcast_to_dtype,
maybe_convert_objects,
infer_dtype_from_scalar,
+ infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
@@ -82,7 +84,7 @@ def test_datetime_with_timezone(self):
tm.assert_index_equal(res, exp)
-class TestInferDtype(tm.TestCase):
+class TestInferDtype(object):
def test_infer_dtype_from_scalar(self):
# Test that _infer_dtype_from_scalar is returning correct dtype for int
@@ -92,44 +94,62 @@ def test_infer_dtype_from_scalar(self):
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, type(data))
+ assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, np.int64)
+ assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, dtypec)
+ assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, np.float64)
+ assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, np.bool_)
+ assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, np.complex_)
+ assert dtype == np.complex_
- import datetime
for data in [np.datetime64(1, 'ns'), Timestamp(1),
- datetime.datetime(2000, 1, 1, 0, 0)]:
+ datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, 'M8[ns]')
+ assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
- datetime.timedelta(1)]:
+ timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, 'm8[ns]')
+ assert dtype == 'm8[ns]'
- for data in [datetime.date(2000, 1, 1),
+ for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
- self.assertEqual(dtype, np.object_)
+ assert dtype == np.object_
+
+ @pytest.mark.parametrize(
+ "arr, expected",
+ [('foo', np.object_),
+ (b'foo', np.object_),
+ (1, np.int_),
+ (1.5, np.float_),
+ ([1], np.int_),
+ (np.array([1]), np.int_),
+ ([np.nan, 1, ''], np.object_),
+ (np.array([[1.0, 2.0]]), np.float_),
+ (Timestamp('20160101'), np.object_),
+ (np.datetime64('2016-01-01'), np.dtype('<M8[D]')),
+ ])
+ def test_infer_dtype_from_array(self, arr, expected):
+
+ # these infer specifically to numpy dtypes
+ dtype, _ = infer_dtype_from_array(arr)
+ assert dtype == expected
class TestMaybe(tm.TestCase):
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 91c7d287d6d46..985e5b9f95831 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -387,6 +387,50 @@ def infer_dtype_from_scalar(val, pandas_dtype=False):
return dtype, val
+def infer_dtype_from_array(arr):
+ """
+ infer the dtype from a scalar or array
+
+ Parameters
+ ----------
+ arr : scalar or array
+
+ Returns
+ -------
+ tuple (numpy-compat dtype, array)
+
+ Notes
+ -----
+ These infer to numpy dtypes exactly
+ with the exception that mixed / object dtypes
+ are not coerced by stringifying or conversion
+
+ Examples
+ --------
+ >>> np.asarray([1, '1'])
+ array(['1', '1'], dtype='<U21')
+
+ >>> infer_dtype_from_array([1, '1'])
+ (numpy.object_, [1, '1'])
+
+ """
+
+ if isinstance(arr, np.ndarray):
+ return arr.dtype, arr
+
+ if not is_list_like(arr):
+ arr = [arr]
+
+ # don't force numpy coerce with nan's
+ inferred = lib.infer_dtype(arr)
+ if inferred in ['string', 'bytes', 'unicode',
+ 'mixed', 'mixed-integer']:
+ return (np.object_, arr)
+
+ arr = np.asarray(arr)
+ return arr.dtype, arr
+
+
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
| - [x] closes #15743
- [x] test added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15812 | 2017-03-27T02:34:46Z | 2017-03-28T18:27:00Z | null | 2017-03-28T18:49:12Z |
TST: suppress some numpy warnings | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 3b77bda6f69f0..a62d290277443 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -174,7 +174,7 @@ def isin(comps, values):
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
- from pandas import DatetimeIndex, PeriodIndex
+ from pandas import DatetimeIndex, TimedeltaIndex, PeriodIndex
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = np.array(list(values), dtype='object')
@@ -183,6 +183,9 @@ def isin(comps, values):
if is_period_dtype(values):
comps = PeriodIndex(comps)
values = PeriodIndex(values)
+ elif is_timedelta64_dtype(comps):
+ comps = TimedeltaIndex(comps)
+ values = TimedeltaIndex(values)
else:
comps = DatetimeIndex(comps)
values = DatetimeIndex(values)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index f8eac7a8911ad..5d69746034346 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -431,6 +431,8 @@ def test_basic(self):
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
+ def test_i8(self):
+
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 6c8aeba704c7b..479f0e4566b8d 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -2913,10 +2913,12 @@ def test_info(self):
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
- df.info()
+ buf = compat.StringIO()
+ df.info(buf=buf)
df2 = df[df['category'] == 'd']
- df2.info()
+ buf = compat.StringIO()
+ df2.info(buf=buf)
def test_groupby_sort(self):
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 75a7555d58ca5..54de8c1e34031 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -389,9 +389,10 @@ def test_nanstd(self):
def test_nansem(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import sem
- self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
- allow_str=False, allow_date=False,
- allow_tdelta=True, allow_obj='convert')
+ with np.errstate(invalid='ignore'):
+ self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
+ allow_str=False, allow_date=False,
+ allow_tdelta=False, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -449,16 +450,20 @@ def test_nanskew(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
- self.check_funs(nanops.nanskew, func, allow_complex=False,
- allow_str=False, allow_date=False, allow_tdelta=False)
+ with np.errstate(invalid='ignore'):
+ self.check_funs(nanops.nanskew, func, allow_complex=False,
+ allow_str=False, allow_date=False,
+ allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
- self.check_funs(nanops.nankurt, func, allow_complex=False,
- allow_str=False, allow_date=False, allow_tdelta=False)
+ with np.errstate(invalid='ignore'):
+ self.check_funs(nanops.nankurt, func, allow_complex=False,
+ allow_str=False, allow_date=False,
+ allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
| https://api.github.com/repos/pandas-dev/pandas/pulls/15811 | 2017-03-26T17:50:45Z | 2017-03-26T18:26:50Z | 2017-03-26T18:26:50Z | 2017-03-26T18:27:37Z | |
DOC: remove older whatsnew | diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
index d6fb1c6a8f9cc..66af318f6924e 100644
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -33,55 +33,3 @@ These are new features and improvements of note in each release.
.. include:: whatsnew/v0.17.1.txt
.. include:: whatsnew/v0.17.0.txt
-
-.. include:: whatsnew/v0.16.2.txt
-
-.. include:: whatsnew/v0.16.1.txt
-
-.. include:: whatsnew/v0.16.0.txt
-
-.. include:: whatsnew/v0.15.2.txt
-
-.. include:: whatsnew/v0.15.1.txt
-
-.. include:: whatsnew/v0.15.0.txt
-
-.. include:: whatsnew/v0.14.1.txt
-
-.. include:: whatsnew/v0.14.0.txt
-
-.. include:: whatsnew/v0.13.1.txt
-
-.. include:: whatsnew/v0.13.0.txt
-
-.. include:: whatsnew/v0.12.0.txt
-
-.. include:: whatsnew/v0.11.0.txt
-
-.. include:: whatsnew/v0.10.1.txt
-
-.. include:: whatsnew/v0.10.0.txt
-
-.. include:: whatsnew/v0.9.1.txt
-
-.. include:: whatsnew/v0.9.0.txt
-
-.. include:: whatsnew/v0.8.1.txt
-
-.. include:: whatsnew/v0.8.0.txt
-
-.. include:: whatsnew/v0.7.3.txt
-
-.. include:: whatsnew/v0.7.2.txt
-
-.. include:: whatsnew/v0.7.1.txt
-
-.. include:: whatsnew/v0.7.0.txt
-
-.. include:: whatsnew/v0.6.1.txt
-
-.. include:: whatsnew/v0.6.0.txt
-
-.. include:: whatsnew/v0.5.0.txt
-
-.. include:: whatsnew/v0.4.x.txt
| https://api.github.com/repos/pandas-dev/pandas/pulls/15809 | 2017-03-26T17:00:49Z | 2017-03-27T21:40:25Z | null | 2017-03-27T22:01:36Z | |
DOC: remove warnings for .sort / .order deprecation removals | diff --git a/doc/source/whatsnew/v0.13.1.txt b/doc/source/whatsnew/v0.13.1.txt
index d5d54ba43b622..5e5653945fefa 100644
--- a/doc/source/whatsnew/v0.13.1.txt
+++ b/doc/source/whatsnew/v0.13.1.txt
@@ -125,7 +125,7 @@ API changes
df = DataFrame({'col':['foo', 0, np.nan]})
df2 = DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0])
df.equals(df2)
- df.equals(df2.sort())
+ df.equals(df2.sort_index())
import pandas.core.common as com
com.array_equivalent(np.array([0, np.nan]), np.array([0, np.nan]))
diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt
index aff8ec9092cdc..6282f15b6faeb 100644
--- a/doc/source/whatsnew/v0.15.0.txt
+++ b/doc/source/whatsnew/v0.15.0.txt
@@ -80,7 +80,7 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the
# Reorder the categories and simultaneously add the missing categories
df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", "medium", "good", "very good"])
df["grade"]
- df.sort("grade")
+ df.sort_values("grade")
df.groupby("grade").size()
- ``pandas.core.group_agg`` and ``pandas.core.factor_agg`` were removed. As an alternative, construct
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index f96fc41c73f15..38109d5442751 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -35,6 +35,11 @@ New features
The ``dtype`` keyword argument in the :func:`read_csv` function for specifying the types of parsed columns is now supported with the ``'python'`` engine (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information.
+.. ipython:: python
+ :suppress:
+
+ from pandas.compat import StringIO
+
.. ipython:: python
data = "a,b\n1,2\n3,4"
diff --git a/doc/source/whatsnew/v0.7.3.txt b/doc/source/whatsnew/v0.7.3.txt
index 21aa16e5fcb06..6b5199c55cbf5 100644
--- a/doc/source/whatsnew/v0.7.3.txt
+++ b/doc/source/whatsnew/v0.7.3.txt
@@ -93,4 +93,4 @@ Series, to be more consistent with the ``groupby`` behavior with DataFrame:
df
grouped = df.groupby('A')['C']
grouped.describe()
- grouped.apply(lambda x: x.order()[-2:]) # top 2 values
+ grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values
diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt
index 9dd29a5fe7bf7..4faf38219ebee 100644
--- a/doc/source/whatsnew/v0.9.1.txt
+++ b/doc/source/whatsnew/v0.9.1.txt
@@ -20,13 +20,20 @@ New features
- `Series.sort`, `DataFrame.sort`, and `DataFrame.sort_index` can now be
specified in a per-column manner to support multiple sort orders (:issue:`928`)
- .. ipython:: python
- :okwarning:
+ .. code-block:: ipython
- df = DataFrame(np.random.randint(0, 2, (6, 3)), columns=['A', 'B', 'C'])
+ In [2]: df = DataFrame(np.random.randint(0, 2, (6, 3)), columns=['A', 'B', 'C'])
- df.sort(['A', 'B'], ascending=[1, 0])
+ In [3]: df.sort(['A', 'B'], ascending=[1, 0])
+ Out[3]:
+ A B C
+ 3 0 1 1
+ 4 0 1 1
+ 2 0 0 1
+ 0 1 0 0
+ 1 1 0 0
+ 5 1 0 0
- `DataFrame.rank` now supports additional argument values for the
`na_option` parameter so missing values can be assigned either the largest
| https://api.github.com/repos/pandas-dev/pandas/pulls/15808 | 2017-03-26T15:00:45Z | 2017-03-26T15:12:19Z | 2017-03-26T15:12:19Z | 2017-03-26T15:12:19Z | |
CI: simplify ci setup a bit | diff --git a/.travis.yml b/.travis.yml
index bb3388734229e..d9dbdf96ff976 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,9 +1,9 @@
sudo: false
language: python
-# To turn off cached miniconda, cython files and compiler cache comment out the
-# USE_CACHE=true line for the build in the matrix below. To delete caches go to
-# https://travis-ci.org/OWNER/REPOSITORY/caches or run
+# To turn off cached cython files and compiler cache
+# set NOCACHE-true
+# To delete caches go to https://travis-ci.org/OWNER/REPOSITORY/caches or run
# travis cache --delete inside the project directory from the travis command line client
# The cash directories will be deleted if anything in ci/ changes in a commit
cache:
@@ -33,31 +33,31 @@ matrix:
- $HOME/.cache # cython cache
- $HOME/.ccache # compiler cache
env:
- - PYTHON_VERSION=3.5 JOB_NAME="35_osx" TEST_ARGS="--skip-slow --skip-network" JOB_TAG="_OSX" TRAVIS_PYTHON_VERSION=3.5 USE_CACHE=true
+ - JOB="3.5_OSX" TEST_ARGS="--skip-slow --skip-network" TRAVIS_PYTHON_VERSION=3.5
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_slow_nnet_LOCALE" TEST_ARGS="--only-slow --skip-network" LOCALE_OVERRIDE="zh_CN.UTF-8" JOB_TAG="_LOCALE" USE_CACHE=true
+ - JOB="2.7_LOCALE" TEST_ARGS="--only-slow --skip-network" LOCALE_OVERRIDE="zh_CN.UTF-8"
addons:
apt:
packages:
- language-pack-zh-hans
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_nslow" TEST_ARGS="--skip-slow" LINT=true USE_CACHE=true
+ - JOB="2.7" TEST_ARGS="--skip-slow" LINT=true
addons:
apt:
packages:
- python-gtk2
- python: 3.5
env:
- - PYTHON_VERSION=3.5 JOB_NAME="35_nslow" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true USE_CACHE=true
+ - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true
addons:
apt:
packages:
- xsel
- python: 3.6
env:
- - PYTHON_VERSION=3.6 JOB_NAME="36" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true USE_CACHE=true
+ - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true
addons:
apt:
packages:
@@ -66,32 +66,32 @@ matrix:
# In allow_failures
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_slow" JOB_TAG="_SLOW" TEST_ARGS="--only-slow --skip-network" USE_CACHE=true
+ - JOB="2.7_SLOW" TEST_ARGS="--only-slow --skip-network"
# In allow_failures
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_build_test" JOB_TAG="_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true USE_CACHE=true
+ - JOB="2.7_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true
# In allow_failures
- - python: 3.5
+ - python: 3.6
env:
- - PYTHON_VERSION=3.5 JOB_NAME="35_numpy_dev" JOB_TAG="_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" USE_CACHE=true
+ - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
# In allow_failures
- python: 3.5
env:
- - PYTHON_VERSION=3.5 JOB_NAME="doc_build" DOC_BUILD=true JOB_TAG="_DOC_BUILD" USE_CACHE=true
+ - JOB="3.5_DOC_BUILD" DOC_BUILD=true
allow_failures:
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_slow" JOB_TAG="_SLOW" TEST_ARGS="--only-slow --skip-network" USE_CACHE=true
+ - JOB="2.7_SLOW" TEST_ARGS="--only-slow --skip-network"
- python: 2.7
env:
- - PYTHON_VERSION=2.7 JOB_NAME="27_build_test" JOB_TAG="_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true USE_CACHE=true
- - python: 3.5
+ - JOB="2.7_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true
+ - python: 3.6
env:
- - PYTHON_VERSION=3.5 JOB_NAME="35_numpy_dev" JOB_TAG="_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" USE_CACHE=true
+ - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
- python: 3.5
env:
- - PYTHON_VERSION=3.5 JOB_NAME="doc_build" DOC_BUILD=true JOB_TAG="_DOC_BUILD" USE_CACHE=true
+ - JOB="3.5_DOC_BUILD" DOC_BUILD=true
before_install:
- echo "before_install"
@@ -107,7 +107,6 @@ before_install:
install:
- echo "install start"
- - ci/check_cache.sh
- ci/prep_cython_cache.sh
- ci/install_travis.sh
- ci/submit_cython_cache.sh
diff --git a/appveyor.yml b/appveyor.yml
index 5d748ddf1a108..db729b3005be6 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -72,11 +72,11 @@ install:
- cmd: conda info -a
# create our env
- - cmd: conda create -q -n pandas python=%PYTHON_VERSION% cython pytest
+ - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest
- cmd: activate pandas
- - SET REQ=ci\requirements-%PYTHON_VERSION%-%PYTHON_ARCH%.run
+ - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run
- cmd: echo "installing requirements from %REQ%"
- - cmd: conda install -n pandas -q --file=%REQ%
+ - cmd: conda install -n pandas --file=%REQ%
- cmd: conda list -n pandas
- cmd: echo "installing requirements from %REQ% - done"
diff --git a/ci/install_circle.sh b/ci/install_circle.sh
index 485586e9d4f49..00e14b10ebbd6 100755
--- a/ci/install_circle.sh
+++ b/ci/install_circle.sh
@@ -46,9 +46,9 @@ echo "[environmental variable file]"
cat $ENVS_FILE
source $ENVS_FILE
-export REQ_BUILD=ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build
-export REQ_RUN=ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run
-export REQ_PIP=ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip
+export REQ_BUILD=ci/requirements-${JOB}.build
+export REQ_RUN=ci/requirements-${JOB}.run
+export REQ_PIP=ci/requirements-${JOB}.pip
# edit the locale override if needed
if [ -n "$LOCALE_OVERRIDE" ]; then
@@ -61,16 +61,13 @@ if [ -n "$LOCALE_OVERRIDE" ]; then
echo
fi
-# create new env
-echo "[create env]"
-time conda create -q -n pandas python=${PYTHON_VERSION} pytest || exit 1
+# create envbuild deps
+echo "[create env: ${REQ_BUILD}]"
+time conda create -n pandas -q --file=${REQ_BUILD} || exit 1
+time conda install -n pandas pytest || exit 1
source activate pandas
-# build deps
-echo "[build installs: ${REQ_BUILD}]"
-time conda install -q --file=${REQ_BUILD} || exit 1
-
# build but don't install
echo "[build em]"
time python setup.py build_ext --inplace || exit 1
@@ -84,5 +81,5 @@ fi
# we may have additional pip installs
echo "[pip installs: ${REQ_PIP}]"
if [ -e ${REQ_PIP} ]; then
- pip install -q -r $REQ_PIP
+ pip install -r $REQ_PIP
fi
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 66633c0592748..ac7bb2c2f3764 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -68,7 +68,7 @@ conda info -a || exit 1
# set the compiler cache to work
echo
-if [ "$USE_CACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then
+if [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then
echo "[Using ccache]"
export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH
gcc=$(which gcc)
@@ -76,7 +76,7 @@ if [ "$USE_CACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then
ccache=$(which ccache)
echo "[ccache]: $ccache"
export CC='ccache gcc'
-elif [ "$USE_CACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then
+elif [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then
echo "[Using ccache]"
time brew install ccache
export PATH=/usr/local/opt/ccache/libexec:$PATH
@@ -91,35 +91,22 @@ fi
echo
echo "[create env]"
-# may have installation instructions for this build
-INSTALL="ci/install-${PYTHON_VERSION}${JOB_TAG}.sh"
-if [ -e ${INSTALL} ]; then
- time bash $INSTALL || exit 1
-else
- # create new env
- # this may already exists, in which case our caching worked
- time conda create -n pandas python=$PYTHON_VERSION pytest nomkl
-fi
+# create our environment
+REQ="ci/requirements-${JOB}.build"
+time conda create -n pandas --file=${REQ} || exit 1
-# build deps
-echo
-echo "[build installs]"
-REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build"
-if [ -e ${REQ} ]; then
- time conda install -n pandas --file=${REQ} || exit 1
-fi
+source activate pandas
# may have addtl installation instructions for this build
echo
echo "[build addtl installs]"
-REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build.sh"
+REQ="ci/requirements-${JOB}.build.sh"
if [ -e ${REQ} ]; then
time bash $REQ || exit 1
fi
-source activate pandas
-
-pip install pytest-xdist
+time conda install -n pandas pytest
+time pip install pytest-xdist
if [ "$LINT" ]; then
conda install flake8
@@ -152,7 +139,7 @@ fi
# we may have run installations
echo
echo "[conda installs]"
-REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run"
+REQ="ci/requirements-${JOB}.run"
if [ -e ${REQ} ]; then
time conda install -n pandas --file=${REQ} || exit 1
fi
@@ -160,7 +147,7 @@ fi
# we may have additional pip installs
echo
echo "[pip installs]"
-REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip"
+REQ="ci/requirements-${JOB}.pip"
if [ -e ${REQ} ]; then
pip install -r $REQ
fi
@@ -168,7 +155,7 @@ fi
# may have addtl installation instructions for this build
echo
echo "[addtl installs]"
-REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.sh"
+REQ="ci/requirements-${JOB}.sh"
if [ -e ${REQ} ]; then
time bash $REQ || exit 1
fi
diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh
index e091bb00ccedc..18d9388327ddc 100755
--- a/ci/prep_cython_cache.sh
+++ b/ci/prep_cython_cache.sh
@@ -22,7 +22,7 @@ fi
home_dir=$(pwd)
-if [ -f "$CACHE_File" ] && [ "$USE_CACHE" ] && [ -d "$PYX_CACHE_DIR" ]; then
+if [ -f "$CACHE_File" ] && [ -z "$NOCACHE" ] && [ -d "$PYX_CACHE_DIR" ]; then
echo "Cache available - checking pyx diff"
@@ -57,16 +57,16 @@ if [ -f "$CACHE_File" ] && [ "$USE_CACHE" ] && [ -d "$PYX_CACHE_DIR" ]; then
fi
-if [ $clear_cache -eq 0 ] && [ "$USE_CACHE" ]
+if [ $clear_cache -eq 0 ] && [ -z "$NOCACHE" ]
then
- # No and use_cache is set
+ # No and nocache is not set
echo "Will reuse cached cython file"
cd /
tar xvmf $CACHE_File
cd $home_dir
else
echo "Rebuilding cythonized files"
- echo "Use cache (Blank if not set) = $USE_CACHE"
+ echo "No cache = $NOCACHE"
echo "Clear cache (1=YES) = $clear_cache"
fi
diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build
index 836385671d603..415df13179fcf 100644
--- a/ci/requirements-2.7.build
+++ b/ci/requirements-2.7.build
@@ -1,4 +1,6 @@
+python=2.7*
python-dateutil=2.4.1
pytz=2013b
+nomkl
numpy
cython=0.23
diff --git a/ci/requirements-2.7_BUILD_TEST.build b/ci/requirements-2.7_BUILD_TEST.build
index faf1e3559f7f1..aadec00cb7ebf 100644
--- a/ci/requirements-2.7_BUILD_TEST.build
+++ b/ci/requirements-2.7_BUILD_TEST.build
@@ -1,4 +1,6 @@
+python=2.7*
dateutil
pytz
+nomkl
numpy
cython
diff --git a/ci/requirements-2.7_COMPAT.build b/ci/requirements-2.7_COMPAT.build
index 95e3da03f161b..0e1ccf9eac9bf 100644
--- a/ci/requirements-2.7_COMPAT.build
+++ b/ci/requirements-2.7_COMPAT.build
@@ -1,3 +1,4 @@
+python=2.7*
numpy=1.7.1
cython=0.23
dateutil=1.5
diff --git a/ci/requirements-2.7_LOCALE.build b/ci/requirements-2.7_LOCALE.build
index 28e2b96851eff..4a37ce8fbe161 100644
--- a/ci/requirements-2.7_LOCALE.build
+++ b/ci/requirements-2.7_LOCALE.build
@@ -1,3 +1,4 @@
+python=2.7*
python-dateutil
pytz=2013b
numpy=1.8.2
diff --git a/ci/requirements-2.7_SLOW.build b/ci/requirements-2.7_SLOW.build
index 664e8b418def7..0f4a2c6792e6b 100644
--- a/ci/requirements-2.7_SLOW.build
+++ b/ci/requirements-2.7_SLOW.build
@@ -1,3 +1,4 @@
+python=2.7*
python-dateutil
pytz
numpy=1.8.2
diff --git a/ci/requirements-2.7-64.run b/ci/requirements-2.7_WIN.run
similarity index 100%
rename from ci/requirements-2.7-64.run
rename to ci/requirements-2.7_WIN.run
diff --git a/ci/requirements-3.4-64.run b/ci/requirements-3.4-64.run
deleted file mode 100644
index 106cc5b7168ba..0000000000000
--- a/ci/requirements-3.4-64.run
+++ /dev/null
@@ -1,12 +0,0 @@
-python-dateutil
-pytz
-numpy=1.9*
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-scipy
-numexpr
-pytables
-bottleneck
-jinja2=2.8
diff --git a/ci/requirements-3.4.build b/ci/requirements-3.4.build
index e6e59dcba63fe..e8a957f70d40e 100644
--- a/ci/requirements-3.4.build
+++ b/ci/requirements-3.4.build
@@ -1,3 +1,4 @@
+python=3.4*
numpy=1.8.1
cython=0.24.1
libgfortran=1.0
diff --git a/ci/requirements-3.4_SLOW.build b/ci/requirements-3.4_SLOW.build
index c05a68a14b402..88212053af472 100644
--- a/ci/requirements-3.4_SLOW.build
+++ b/ci/requirements-3.4_SLOW.build
@@ -1,4 +1,6 @@
+python=3.4*
python-dateutil
pytz
+nomkl
numpy=1.10*
cython
diff --git a/ci/requirements-3.5.build b/ci/requirements-3.5.build
index 2fc2053e64fe9..76227e106e1fd 100644
--- a/ci/requirements-3.5.build
+++ b/ci/requirements-3.5.build
@@ -1,4 +1,6 @@
+python=3.5*
python-dateutil
pytz
+nomkl
numpy=1.11.3
cython
diff --git a/ci/requirements-3.5_ASCII.build b/ci/requirements-3.5_ASCII.build
index 9558cf00ddf5c..f7befe3b31865 100644
--- a/ci/requirements-3.5_ASCII.build
+++ b/ci/requirements-3.5_ASCII.build
@@ -1,4 +1,6 @@
+python=3.5*
python-dateutil
pytz
+nomkl
numpy
cython
diff --git a/ci/requirements-3.5_DOC_BUILD.build b/ci/requirements-3.5_DOC_BUILD.build
index 9558cf00ddf5c..f7befe3b31865 100644
--- a/ci/requirements-3.5_DOC_BUILD.build
+++ b/ci/requirements-3.5_DOC_BUILD.build
@@ -1,4 +1,6 @@
+python=3.5*
python-dateutil
pytz
+nomkl
numpy
cython
diff --git a/ci/requirements-3.5_OSX.build b/ci/requirements-3.5_OSX.build
index a201be352b8e4..f5bc01b67a20a 100644
--- a/ci/requirements-3.5_OSX.build
+++ b/ci/requirements-3.5_OSX.build
@@ -1,2 +1,4 @@
+python=3.5*
+nomkl
numpy=1.10.4
cython
diff --git a/ci/requirements-3.6.build b/ci/requirements-3.6.build
index 9558cf00ddf5c..1c4b46aea3865 100644
--- a/ci/requirements-3.6.build
+++ b/ci/requirements-3.6.build
@@ -1,4 +1,6 @@
+python=3.6*
python-dateutil
pytz
+nomkl
numpy
cython
diff --git a/ci/requirements-3.5_NUMPY_DEV.build b/ci/requirements-3.6_NUMPY_DEV.build
similarity index 70%
rename from ci/requirements-3.5_NUMPY_DEV.build
rename to ci/requirements-3.6_NUMPY_DEV.build
index d15edbfa3d2c1..738366867a217 100644
--- a/ci/requirements-3.5_NUMPY_DEV.build
+++ b/ci/requirements-3.6_NUMPY_DEV.build
@@ -1,3 +1,4 @@
+python=3.6*
python-dateutil
pytz
cython
diff --git a/ci/requirements-3.5_NUMPY_DEV.build.sh b/ci/requirements-3.6_NUMPY_DEV.build.sh
similarity index 100%
rename from ci/requirements-3.5_NUMPY_DEV.build.sh
rename to ci/requirements-3.6_NUMPY_DEV.build.sh
diff --git a/ci/requirements-3.5_NUMPY_DEV.run b/ci/requirements-3.6_NUMPY_DEV.run
similarity index 100%
rename from ci/requirements-3.5_NUMPY_DEV.run
rename to ci/requirements-3.6_NUMPY_DEV.run
diff --git a/ci/requirements-3.6-64.run b/ci/requirements-3.6_WIN.run
similarity index 100%
rename from ci/requirements-3.6-64.run
rename to ci/requirements-3.6_WIN.run
diff --git a/circle.yml b/circle.yml
index 046af6e9e1389..fa2da0680f388 100644
--- a/circle.yml
+++ b/circle.yml
@@ -21,13 +21,13 @@ dependencies:
- >
case $CIRCLE_NODE_INDEX in
0)
- sudo apt-get install language-pack-it && ./ci/install_circle.sh PYTHON_VERSION=2.7 JOB_TAG="_COMPAT" LOCALE_OVERRIDE="it_IT.UTF-8" ;;
+ sudo apt-get install language-pack-it && ./ci/install_circle.sh JOB="2.7_COMPAT" LOCALE_OVERRIDE="it_IT.UTF-8" ;;
1)
- sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh PYTHON_VERSION=3.4 JOB_TAG="_SLOW" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
+ sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.4_SLOW" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
2)
- sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh PYTHON_VERSION=3.4 JOB_TAG="" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
+ sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.4" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
3)
- ./ci/install_circle.sh PYTHON_VERSION=3.5 JOB_TAG="_ASCII" LOCALE_OVERRIDE="C" ;;
+ ./ci/install_circle.sh JOB="3.5_ASCII" LOCALE_OVERRIDE="C" ;;
esac
- ./ci/show_circle.sh
| https://api.github.com/repos/pandas-dev/pandas/pulls/15807 | 2017-03-26T13:54:43Z | 2017-03-26T15:12:20Z | null | 2017-03-27T10:48:44Z | |
Drop support for NaN categories in Categorical | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 2203737ecd7b5..411f973e9a71f 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -230,6 +230,15 @@ Categories must be unique or a `ValueError` is raised:
except ValueError as e:
print("ValueError: " + str(e))
+Categories must also not be ``NaN`` or a `ValueError` is raised:
+
+.. ipython:: python
+
+ try:
+ s.cat.categories = [1,2,np.nan]
+ except ValueError as e:
+ print("ValueError: " + str(e))
+
Appending new categories
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 358d66653fb9c..a0b2b47c4bac3 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -816,6 +816,7 @@ Removal of prior version deprecations/changes
in favor of ``iloc`` and ``iat`` as explained :ref:`here <whatsnew_0170.deprecations>` (:issue:`10711`).
- The deprecated ``DataFrame.iterkv()`` has been removed in favor of ``DataFrame.iteritems()`` (:issue:`10711`)
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
+- ``Categorical`` has dropped support for ``NaN`` categories (:issue:`10748`)
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
- Where clauses in ``pytables`` are only accepted as strings and expressions types and not other data-types (:issue:`12027`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 0e58c18631588..632c24c33feb7 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -545,18 +545,11 @@ def _validate_categories(cls, categories, fastpath=False):
if not fastpath:
- # check properties of the categories
- # we don't allow NaNs in the categories themselves
-
+ # Categories cannot contain NaN.
if categories.hasnans:
- # NaNs in cats deprecated in 0.17
- # GH 10748
- msg = ('\nSetting NaNs in `categories` is deprecated and '
- 'will be removed in a future version of pandas.')
- warn(msg, FutureWarning, stacklevel=3)
-
- # categories must be unique
+ raise ValueError('Categorial categories cannot be null')
+ # Categories must be unique.
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 64a0e71bd5ace..ef1be7e60e0e8 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -183,11 +183,6 @@ def test_contains(self):
self.assertFalse(0 in ci)
self.assertFalse(1 in ci)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- ci = CategoricalIndex(
- list('aabbca'), categories=list('cabdef') + [np.nan])
- self.assertFalse(np.nan in ci)
-
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
self.assertTrue(np.nan in ci)
@@ -541,7 +536,6 @@ def test_ensure_copied_data(self):
self.assertIs(_base(index.values), _base(result.values))
def test_equals_categorical(self):
-
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
@@ -579,14 +573,6 @@ def test_equals_categorical(self):
self.assertFalse(ci.equals(CategoricalIndex(list('aabca'))))
self.assertTrue(ci.equals(ci.copy()))
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- ci = CategoricalIndex(list('aabca'),
- categories=['c', 'a', 'b', np.nan])
- self.assertFalse(ci.equals(list('aabca')))
- self.assertFalse(ci.equals(CategoricalIndex(list('aabca'))))
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.assertTrue(ci.equals(ci.copy()))
-
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
self.assertFalse(ci.equals(list('aabca')))
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 479f0e4566b8d..8fd3c6324d48c 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
+import pytest
import sys
from datetime import datetime
from distutils.version import LooseVersion
@@ -17,7 +18,8 @@
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
- Timestamp, CategoricalIndex, isnull)
+ Timestamp, CategoricalIndex, DatetimeIndex,
+ isnull, NaT)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
@@ -160,12 +162,6 @@ def f():
self.assertRaises(ValueError, f)
- def f():
- with tm.assert_produces_warning(FutureWarning):
- Categorical([1, 2], [1, 2, np.nan, np.nan])
-
- self.assertRaises(ValueError, f)
-
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
@@ -222,29 +218,12 @@ def f():
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(is_float_dtype(cat.categories))
- # Deprecating NaNs in categoires (GH #10748)
- # preserve int as far as possible by converting to object if NaN is in
- # categories
- with tm.assert_produces_warning(FutureWarning):
- cat = pd.Categorical([np.nan, 1, 2, 3],
- categories=[np.nan, 1, 2, 3])
- self.assertTrue(is_object_dtype(cat.categories))
-
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(is_integer_dtype(vals))
- with tm.assert_produces_warning(FutureWarning):
- cat = pd.Categorical([np.nan, "a", "b", "c"],
- categories=[np.nan, "a", "b", "c"])
- self.assertTrue(is_object_dtype(cat.categories))
- # but don't do it for floats
- with tm.assert_produces_warning(FutureWarning):
- cat = pd.Categorical([np.nan, 1., 2., 3.],
- categories=[np.nan, 1., 2., 3.])
- self.assertTrue(is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
@@ -295,6 +274,22 @@ def f():
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
+ def test_constructor_with_null(self):
+
+ # Cannot have NaN in categories
+ with pytest.raises(ValueError):
+ pd.Categorical([np.nan, "a", "b", "c"],
+ categories=[np.nan, "a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ pd.Categorical([None, "a", "b", "c"],
+ categories=[None, "a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ pd.Categorical(DatetimeIndex(['nat', '20160101']),
+ categories=[NaT, Timestamp('20160101')])
+
+
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
@@ -418,6 +413,12 @@ def f():
self.assertRaises(ValueError, f)
+ # NaN categories included
+ def f():
+ Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
+
+ self.assertRaises(ValueError, f)
+
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
@@ -649,30 +650,6 @@ def test_describe(self):
name='categories'))
tm.assert_frame_equal(desc, expected)
- # NA as a category
- with tm.assert_produces_warning(FutureWarning):
- cat = pd.Categorical(["a", "c", "c", np.nan],
- categories=["b", "a", "c", np.nan])
- result = cat.describe()
-
- expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
- columns=['counts', 'freqs'],
- index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
- name='categories'))
- tm.assert_frame_equal(result, expected, check_categorical=False)
-
- # NA as an unused category
- with tm.assert_produces_warning(FutureWarning):
- cat = pd.Categorical(["a", "c", "c"],
- categories=["b", "a", "c", np.nan])
- result = cat.describe()
-
- exp_idx = pd.CategoricalIndex(
- ['b', 'a', 'c', np.nan], name='categories')
- expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
- columns=['counts', 'freqs'], index=exp_idx)
- tm.assert_frame_equal(result, expected, check_categorical=False)
-
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
@@ -1119,90 +1096,18 @@ def test_nan_handling(self):
self.assert_numpy_array_equal(c._codes,
np.array([0, -1, -1, 0], dtype=np.int8))
- # If categories have nan included, the code should point to that
- # instead
- with tm.assert_produces_warning(FutureWarning):
- c = Categorical(["a", "b", np.nan, "a"],
- categories=["a", "b", np.nan])
- self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
- self.assert_numpy_array_equal(c._codes,
- np.array([0, 1, 2, 0], dtype=np.int8))
- c[1] = np.nan
- self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
- self.assert_numpy_array_equal(c._codes,
- np.array([0, 2, 2, 0], dtype=np.int8))
-
- # Changing categories should also make the replaced category np.nan
- c = Categorical(["a", "b", "c", "a"])
- with tm.assert_produces_warning(FutureWarning):
- c.categories = ["a", "b", np.nan] # noqa
-
- self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
- self.assert_numpy_array_equal(c._codes,
- np.array([0, 1, 2, 0], dtype=np.int8))
-
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_index_equal(c.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, -1, 0], dtype=np.int8))
- with tm.assert_produces_warning(FutureWarning):
- c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
-
- self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
- self.assert_numpy_array_equal(c._codes,
- np.array([0, 1, -1, 0], dtype=np.int8))
- c[1] = np.nan
- self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
- self.assert_numpy_array_equal(c._codes,
- np.array([0, 2, -1, 0], dtype=np.int8))
-
- # Remove null categories (GH 10156)
- cases = [([1.0, 2.0, np.nan], [1.0, 2.0]),
- (['a', 'b', None], ['a', 'b']),
- ([pd.Timestamp('2012-05-01'), pd.NaT],
- [pd.Timestamp('2012-05-01')])]
-
- null_values = [np.nan, None, pd.NaT]
-
- for with_null, without in cases:
- with tm.assert_produces_warning(FutureWarning):
- base = Categorical([], with_null)
- expected = Categorical([], without)
-
- for nullval in null_values:
- result = base.remove_categories(nullval)
- self.assert_categorical_equal(result, expected)
-
- # Different null values are indistinguishable
- for i, j in [(0, 1), (0, 2), (1, 2)]:
- nulls = [null_values[i], null_values[j]]
-
- def f():
- with tm.assert_produces_warning(FutureWarning):
- Categorical([], categories=nulls)
-
- self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
- self.assert_numpy_array_equal(res, exp)
- with tm.assert_produces_warning(FutureWarning):
- c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
- res = c.isnull()
- self.assert_numpy_array_equal(res, exp)
-
- # test both nan in categories and as -1
- exp = np.array([True, False, True])
- c = Categorical(["a", "b", np.nan])
- with tm.assert_produces_warning(FutureWarning):
- c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
- c[0] = np.nan
- res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
@@ -1487,45 +1392,10 @@ def test_slicing_directly(self):
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
- exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
- tm.assert_categorical_equal(cat, exp)
- # if nan in categories, the proper code should be set!
- cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
- cat[1] = np.nan
- exp = np.array([0, 3, 2, -1], dtype=np.int8)
- self.assert_numpy_array_equal(cat.codes, exp)
-
- cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
- cat[1:3] = np.nan
- exp = np.array([0, 3, 3, -1], dtype=np.int8)
- self.assert_numpy_array_equal(cat.codes, exp)
-
- cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
- cat[1:3] = [np.nan, 1]
- exp = np.array([0, 3, 0, -1], dtype=np.int8)
- self.assert_numpy_array_equal(cat.codes, exp)
-
- cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
- cat[1:3] = [np.nan, np.nan]
- exp = np.array([0, 3, 3, -1], dtype=np.int8)
- self.assert_numpy_array_equal(cat.codes, exp)
-
- cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
- cat[pd.isnull(cat)] = np.nan
- exp = np.array([0, 1, 3, 2], dtype=np.int8)
- self.assert_numpy_array_equal(cat.codes, exp)
+ exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
+ tm.assert_categorical_equal(cat, exp)
def test_shift(self):
# GH 9416
@@ -2026,33 +1896,12 @@ def test_sideeffects_free(self):
def test_nan_handling(self):
- # Nans are represented as -1 in labels
+ # NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_index_equal(s.cat.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
- # If categories have nan included, the label should point to that
- # instead
- with tm.assert_produces_warning(FutureWarning):
- s2 = Series(Categorical(["a", "b", np.nan, "a"],
- categories=["a", "b", np.nan]))
-
- exp_cat = Index(["a", "b", np.nan])
- self.assert_index_equal(s2.cat.categories, exp_cat)
- self.assert_numpy_array_equal(s2.values.codes,
- np.array([0, 1, 2, 0], dtype=np.int8))
-
- # Changing categories should also make the replaced category np.nan
- s3 = Series(Categorical(["a", "b", "c", "a"]))
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- s3.cat.categories = ["a", "b", np.nan]
-
- exp_cat = Index(["a", "b", np.nan])
- self.assert_index_equal(s3.cat.categories, exp_cat)
- self.assert_numpy_array_equal(s3.values.codes,
- np.array([0, 1, 2, 0], dtype=np.int8))
-
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_index_equal(s.cat.categories, Index(["a", "b"]))
| Deprecated in 0.17.0.
xref #10748
xref #13648 | https://api.github.com/repos/pandas-dev/pandas/pulls/15806 | 2017-03-26T01:48:11Z | 2017-03-27T14:47:02Z | null | 2017-03-27T19:16:59Z |
MAINT: Remove combineAdd and combineMult | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 0612e86134cf2..8482eef552c17 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -84,29 +84,28 @@ will be completed:
@verbatim
In [1]: df2.<TAB>
- df2.A df2.boxplot
- df2.abs df2.C
- df2.add df2.clip
- df2.add_prefix df2.clip_lower
- df2.add_suffix df2.clip_upper
- df2.align df2.columns
- df2.all df2.combine
- df2.any df2.combineAdd
+ df2.A df2.bool
+ df2.abs df2.boxplot
+ df2.add df2.C
+ df2.add_prefix df2.clip
+ df2.add_suffix df2.clip_lower
+ df2.align df2.clip_upper
+ df2.all df2.columns
+ df2.any df2.combine
df2.append df2.combine_first
- df2.apply df2.combineMult
- df2.applymap df2.compound
- df2.as_blocks df2.consolidate
- df2.asfreq df2.convert_objects
- df2.as_matrix df2.copy
- df2.astype df2.corr
- df2.at df2.corrwith
- df2.at_time df2.count
- df2.axes df2.cov
- df2.B df2.cummax
- df2.between_time df2.cummin
- df2.bfill df2.cumprod
- df2.blocks df2.cumsum
- df2.bool df2.D
+ df2.apply df2.compound
+ df2.applymap df2.consolidate
+ df2.as_blocks df2.convert_objects
+ df2.asfreq df2.copy
+ df2.as_matrix df2.corr
+ df2.astype df2.corrwith
+ df2.at df2.count
+ df2.at_time df2.cov
+ df2.axes df2.cummax
+ df2.B df2.cummin
+ df2.between_time df2.cumprod
+ df2.bfill df2.cumsum
+ df2.blocks df2.D
As you can see, the columns ``A``, ``B``, ``C``, and ``D`` are automatically
tab completed. ``E`` is there as well; the rest of the attributes have been
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index f96fc41c73f15..1b671cfeaba0c 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -814,6 +814,7 @@ Removal of prior version deprecations/changes
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
- Where clauses in ``pytables`` are only accepted as strings and expressions types and not other data-types (:issue:`12027`)
+- ``DataFrame`` has dropped the ``combineAdd`` and ``combineMult`` methods in favor of ``add`` and ``mul`` respectively (:issue:`10735`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6b5e8e0799421..90c49a9c85133 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5362,62 +5362,6 @@ def isin(self, values):
values).reshape(self.shape), self.index,
self.columns)
- # ----------------------------------------------------------------------
- # Deprecated stuff
-
- def combineAdd(self, other):
- """
- DEPRECATED. Use ``DataFrame.add(other, fill_value=0.)`` instead.
-
- Add two DataFrame objects and do not propagate
- NaN values, so if for a (column, time) one frame is missing a
- value, it will default to the other frame's value (which might
- be NaN as well)
-
- Parameters
- ----------
- other : DataFrame
-
- Returns
- -------
- DataFrame
-
- See also
- --------
- DataFrame.add
-
- """
- warnings.warn("'combineAdd' is deprecated. Use "
- "'DataFrame.add(other, fill_value=0.)' instead",
- FutureWarning, stacklevel=2)
- return self.add(other, fill_value=0.)
-
- def combineMult(self, other):
- """
- DEPRECATED. Use ``DataFrame.mul(other, fill_value=1.)`` instead.
-
- Multiply two DataFrame objects and do not propagate NaN values, so if
- for a (column, time) one frame is missing a value, it will default to
- the other frame's value (which might be NaN as well)
-
- Parameters
- ----------
- other : DataFrame
-
- Returns
- -------
- DataFrame
-
- See also
- --------
- DataFrame.mul
-
- """
- warnings.warn("'combineMult' is deprecated. Use "
- "'DataFrame.mul(other, fill_value=1.)' instead",
- FutureWarning, stacklevel=2)
- return self.mul(other, fill_value=1.)
-
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index d6a3592446fd5..268854fe6b62d 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -1038,74 +1038,6 @@ def test_boolean_comparison(self):
self.assertRaises(ValueError, lambda: df == (2, 2))
self.assertRaises(ValueError, lambda: df == [2, 2])
- def test_combineAdd(self):
-
- with tm.assert_produces_warning(FutureWarning):
- # trivial
- comb = self.frame.combineAdd(self.frame)
- assert_frame_equal(comb, self.frame * 2)
-
- # more rigorous
- a = DataFrame([[1., nan, nan, 2., nan]],
- columns=np.arange(5))
- b = DataFrame([[2., 3., nan, 2., 6., nan]],
- columns=np.arange(6))
- expected = DataFrame([[3., 3., nan, 4., 6., nan]],
- columns=np.arange(6))
-
- with tm.assert_produces_warning(FutureWarning):
- result = a.combineAdd(b)
- assert_frame_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning):
- result2 = a.T.combineAdd(b.T)
- assert_frame_equal(result2, expected.T)
-
- expected2 = a.combine(b, operator.add, fill_value=0.)
- assert_frame_equal(expected, expected2)
-
- # corner cases
- with tm.assert_produces_warning(FutureWarning):
- comb = self.frame.combineAdd(self.empty)
- assert_frame_equal(comb, self.frame)
-
- with tm.assert_produces_warning(FutureWarning):
- comb = self.empty.combineAdd(self.frame)
- assert_frame_equal(comb, self.frame)
-
- # integer corner case
- df1 = DataFrame({'x': [5]})
- df2 = DataFrame({'x': [1]})
- df3 = DataFrame({'x': [6]})
-
- with tm.assert_produces_warning(FutureWarning):
- comb = df1.combineAdd(df2)
- assert_frame_equal(comb, df3)
-
- # mixed type GH2191
- df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
- df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
- with tm.assert_produces_warning(FutureWarning):
- rs = df1.combineAdd(df2)
- xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
- assert_frame_equal(xp, rs)
-
- # TODO: test integer fill corner?
-
- def test_combineMult(self):
- with tm.assert_produces_warning(FutureWarning):
- # trivial
- comb = self.frame.combineMult(self.frame)
-
- assert_frame_equal(comb, self.frame ** 2)
-
- # corner cases
- comb = self.frame.combineMult(self.empty)
- assert_frame_equal(comb, self.frame)
-
- comb = self.empty.combineMult(self.frame)
- assert_frame_equal(comb, self.frame)
-
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
| Deprecated in 0.17.0.
xref #10735 | https://api.github.com/repos/pandas-dev/pandas/pulls/15805 | 2017-03-26T01:36:12Z | 2017-03-26T16:58:59Z | 2017-03-26T16:58:59Z | 2017-03-26T20:02:49Z |
DOC: Explain differences further for sep parameter | diff --git a/doc/source/io.rst b/doc/source/io.rst
index a702efdc6aaf9..faeea9d448cf2 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -91,11 +91,12 @@ filepath_or_buffer : various
locations), or any object with a ``read()`` method (such as an open file or
:class:`~python:io.StringIO`).
sep : str, defaults to ``','`` for :func:`read_csv`, ``\t`` for :func:`read_table`
- Delimiter to use. If sep is ``None``,
- will try to automatically determine this. Separators longer than 1 character
- and different from ``'\s+'`` will be interpreted as regular expressions, will
- force use of the python parsing engine and will ignore quotes in the data.
- Regex example: ``'\\r\\t'``.
+ Delimiter to use. If sep is ``None``, the C engine cannot automatically detect
+ the separator, but the Python parsing engine can, meaning the latter will be
+ used automatically. In addition, separators longer than 1 character and
+ different from ``'\s+'`` will be interpreted as regular expressions and
+ will also force the use of the Python parsing engine. Note that regex
+ delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``.
delimiter : str, default ``None``
Alternative argument name for sep.
delim_whitespace : boolean, default False
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index f7b2d75c19304..45c62b224ef4e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -305,10 +305,12 @@
currently more feature-complete."""
_sep_doc = r"""sep : str, default {default}
- Delimiter to use. If sep is None, will try to automatically determine
- this. Separators longer than 1 character and different from ``'\s+'`` will
- be interpreted as regular expressions, will force use of the python parsing
- engine and will ignore quotes in the data. Regex example: ``'\r\t'``"""
+ Delimiter to use. If sep is None, the C engine cannot automatically detect
+ the separator, but the Python parsing engine can, meaning the latter will
+ be used automatically. In addition, separators longer than 1 character and
+ different from ``'\s+'`` will be interpreted as regular expressions and
+ will also force the use of the Python parsing engine. Note that regex
+ delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``"""
_read_csv_doc = """
Read CSV (comma-separated) file into DataFrame
| Per comment in <a href="https://github.com/pandas-dev/pandas/issues/12686#issuecomment-288843261">#12686</a>. This is the only documentation issue that I found amongst the differences that still exist between the C and Python engines in `parsers.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/15804 | 2017-03-26T01:25:12Z | 2017-03-26T17:02:46Z | 2017-03-26T17:02:46Z | 2017-03-26T20:02:53Z |
CI: cleanup / linting | diff --git a/.travis.yml b/.travis.yml
index ab83a37f25905..49e8684ff21ef 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -27,63 +27,37 @@ matrix:
- language: objective-c
os: osx
compiler: clang
- osx_image: xcode6.4
cache:
ccache: true
directories:
- $HOME/.cache # cython cache
- $HOME/.ccache # compiler cache
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "35_osx"
- - TEST_ARGS="--skip-slow --skip-network"
- - JOB_TAG=_OSX
- - TRAVIS_PYTHON_VERSION=3.5
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="35_osx" TEST_ARGS="--skip-slow --skip-network" JOB_TAG="_OSX" TRAVIS_PYTHON_VERSION=3.5 USE_CACHE=true
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_slow_nnet_LOCALE"
- - TEST_ARGS="--only-slow --skip-network"
- - LOCALE_OVERRIDE="zh_CN.UTF-8"
- - JOB_TAG=_LOCALE
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_slow_nnet_LOCALE" TEST_ARGS="--only-slow --skip-network" LOCALE_OVERRIDE="zh_CN.UTF-8" JOB_TAG="_LOCALE" USE_CACHE=true
addons:
apt:
packages:
- language-pack-zh-hans
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_nslow"
- - TEST_ARGS="--skip-slow"
- - CLIPBOARD_GUI=gtk2
- - LINT=true
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_nslow" TEST_ARGS="--skip-slow" LINT=true USE_CACHE=true
addons:
apt:
packages:
- python-gtk2
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "35_nslow"
- - TEST_ARGS="--skip-slow --skip-network"
- - CLIPBOARD=xsel
- - COVERAGE=true
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="35_nslow" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true USE_CACHE=true
addons:
apt:
packages:
- xsel
- python: 3.6
env:
- - PYTHON_VERSION=3.6
- - JOB_NAME: "36"
- - TEST_ARGS="--skip-slow --skip-network"
- - PANDAS_TESTING_MODE="deprecate"
- - CONDA_FORGE=true
- - USE_CACHE=true
+ - PYTHON_VERSION=3.6 JOB_NAME="36" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true USE_CACHE=true
addons:
apt:
packages:
@@ -92,68 +66,32 @@ matrix:
# In allow_failures
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_slow"
- - JOB_TAG=_SLOW
- - TEST_ARGS="--only-slow --skip-network"
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_slow" JOB_TAG="_SLOW" TEST_ARGS="--only-slow --skip-network" USE_CACHE=true
# In allow_failures
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_build_test"
- - JOB_TAG=_BUILD_TEST
- - TEST_ARGS="--skip-slow"
- - BUILD_TEST=true
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_build_test" JOB_TAG="_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true USE_CACHE=true
# In allow_failures
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "35_numpy_dev"
- - JOB_TAG=_NUMPY_DEV
- - TEST_ARGS="--skip-slow --skip-network"
- - PANDAS_TESTING_MODE="deprecate"
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="35_numpy_dev" JOB_TAG="_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" USE_CACHE=true
# In allow_failures
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "doc_build"
- - DOC_BUILD=true
- - JOB_TAG=_DOC_BUILD
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="doc_build" DOC_BUILD=true JOB_TAG="_DOC_BUILD" USE_CACHE=true
allow_failures:
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_slow"
- - JOB_TAG=_SLOW
- - TEST_ARGS="--only-slow --skip-network"
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_slow" JOB_TAG="_SLOW" TEST_ARGS="--only-slow --skip-network" USE_CACHE=true
- python: 2.7
env:
- - PYTHON_VERSION=2.7
- - JOB_NAME: "27_build_test"
- - JOB_TAG=_BUILD_TEST
- - TEST_ARGS="--skip-slow"
- - BUILD_TEST=true
- - USE_CACHE=true
+ - PYTHON_VERSION=2.7 JOB_NAME="27_build_test" JOB_TAG="_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true USE_CACHE=true
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "35_numpy_dev"
- - JOB_TAG=_NUMPY_DEV
- - TEST_ARGS="--skip-slow --skip-network"
- - PANDAS_TESTING_MODE="deprecate"
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="35_numpy_dev" JOB_TAG="_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" USE_CACHE=true
- python: 3.5
env:
- - PYTHON_VERSION=3.5
- - JOB_NAME: "doc_build"
- - DOC_BUILD=true
- - JOB_TAG=_DOC_BUILD
- - USE_CACHE=true
+ - PYTHON_VERSION=3.5 JOB_NAME="doc_build" DOC_BUILD=true JOB_TAG="_DOC_BUILD" USE_CACHE=true
before_install:
- echo "before_install"
@@ -165,7 +103,7 @@ before_install:
- git --version
- git tag
- ci/before_install_travis.sh
- - export DISPLAY=:99.0
+ - export DISPLAY=":99.0"
install:
- echo "install start"
@@ -196,10 +134,10 @@ after_script:
- echo "after_script start"
- ci/install_test.sh
- source activate pandas && python -c "import pandas; pandas.show_versions();"
- - if [ "$DOC_BUILD"]; then
+ - if [ -z "$DOC_BUILD" ]; then
ci/print_skipped.py /tmp/single.xml;
fi
- - if [ "$DOC_BUILD"]; then
+ - if [ -z "$DOC_BUILD" ]; then
ci/print_skipped.py /tmp/multiple.xml;
fi
- echo "after_script done"
| CI: typo in .travis.yml for print_skipped
CI: linted .travis.yml
CI: removed CLIPBOARD env variables as not used
| https://api.github.com/repos/pandas-dev/pandas/pulls/15803 | 2017-03-25T23:28:34Z | 2017-03-26T02:16:11Z | null | 2017-03-26T02:16:47Z |
Revert "MAINT: Remove Long and WidePanel (#15748)" | diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index a7e530e7f5ef1..56ccc94c414fb 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -25,6 +25,11 @@
except:
pass
+try:
+ Panel = Panel
+except Exception:
+ Panel = WidePanel
+
# didn't add to namespace until later
try:
from pandas.core.index import MultiIndex
diff --git a/bench/bench_join_panel.py b/bench/bench_join_panel.py
index 113b317dd8ff8..f3c3f8ba15f70 100644
--- a/bench/bench_join_panel.py
+++ b/bench/bench_join_panel.py
@@ -45,8 +45,8 @@ def reindex_on_axis(panels, axis, axis_reindex):
return p
-# Does the job but inefficient. It is better to handle
-# this like you read a table in pytables.
+# does the job but inefficient (better to handle like you read a table in
+# pytables...e.g create a LongPanel then convert to Wide)
def create_panels_join(cls, panels):
""" given an array of panels's, create a single panel """
panels = [a for a in panels if a is not None]
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index c5bf943cebca7..ca6541256f1d2 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -814,7 +814,6 @@ Removal of prior version deprecations/changes
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
- Where clauses in ``pytables`` are only accepted as strings and expressions types and not other data-types (:issue:`12027`)
-- The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`)
.. _whatsnew_0200.performance:
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 5018de39ca907..65253dedb8b53 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -15,7 +15,7 @@
from pandas.core.series import Series
from pandas.core.frame import DataFrame
-from pandas.core.panel import Panel
+from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 50ddc24ac9656..5ab3c44b175fe 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -4,6 +4,8 @@
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
+import warnings
+
import numpy as np
from pandas.types.cast import (infer_dtype_from_scalar,
@@ -1554,3 +1556,24 @@ def f(self, other, axis=0):
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
+
+
+# legacy
+class WidePanel(Panel):
+
+ def __init__(self, *args, **kwargs):
+ # deprecation, #10892
+ warnings.warn("WidePanel is deprecated. Please use Panel",
+ FutureWarning, stacklevel=2)
+
+ super(WidePanel, self).__init__(*args, **kwargs)
+
+
+class LongPanel(DataFrame):
+
+ def __init__(self, *args, **kwargs):
+ # deprecation, #10892
+ warnings.warn("LongPanel is deprecated. Please use DataFrame",
+ FutureWarning, stacklevel=2)
+
+ super(LongPanel, self).__init__(*args, **kwargs)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 2c7dcf2501f32..73222c246fc70 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -54,7 +54,8 @@ class TestPDApi(Base, tm.TestCase):
'TimedeltaIndex', 'Timestamp']
# these are already deprecated; awaiting removal
- deprecated_classes = ['Panel4D', 'SparseList', 'Expr', 'Term']
+ deprecated_classes = ['WidePanel', 'Panel4D',
+ 'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 2d62cb2d6944d..82a98f5d08488 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -2964,6 +2964,9 @@ def _check(left, right):
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
+ def test_longpanel(self):
+ pass
+
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 13e16f3b90730..ab0322abbcf06 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -178,6 +178,10 @@ def wrapper(x):
class SafeForSparse(object):
+ @classmethod
+ def assert_panel_equal(cls, x, y):
+ assert_panel_equal(x, y)
+
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
@@ -342,10 +346,10 @@ def check_op(op, name):
def test_combinePanel(self):
result = self.panel.add(self.panel)
- assert_panel_equal(result, self.panel * 2)
+ self.assert_panel_equal(result, self.panel * 2)
def test_neg(self):
- assert_panel_equal(-self.panel, self.panel * -1)
+ self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
@@ -365,22 +369,22 @@ def test_select(self):
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
- assert_panel_equal(result, expected)
+ self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
- assert_panel_equal(result, expected)
+ self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
- assert_panel_equal(result, expected)
+ self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
- assert_panel_equal(result, p.reindex(items=[]))
+ self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
@@ -395,8 +399,8 @@ def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
- assert_panel_equal(result, expected)
- assert_panel_equal(result2, expected)
+ self.assert_panel_equal(result, expected)
+ self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
@@ -863,6 +867,10 @@ def test_set_value(self):
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
+ @classmethod
+ def assert_panel_equal(cls, x, y):
+ assert_panel_equal(x, y)
+
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
@@ -1959,7 +1967,7 @@ def test_round(self):
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
- assert_panel_equal(expected, result)
+ self.assert_panel_equal(expected, result)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
@@ -1975,7 +1983,7 @@ def test_numpy_round(self):
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = np.round(p)
- assert_panel_equal(expected, result)
+ self.assert_panel_equal(expected, result)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.round, p, out=p)
@@ -2262,12 +2270,15 @@ def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
-class TestPanelFrame(tm.TestCase):
+class TestLongPanel(tm.TestCase):
"""
- Check that conversions to and from Panel to DataFrame work.
+ LongPanel no longer exists, but...
"""
def setUp(self):
+ import warnings
+ warnings.filterwarnings(action='ignore', category=FutureWarning)
+
panel = tm.makePanel()
tm.add_nans(panel)
diff --git a/vb_suite/pandas_vb_common.py b/vb_suite/pandas_vb_common.py
index 41e43d6ab10e5..bd2e8a1c1d504 100644
--- a/vb_suite/pandas_vb_common.py
+++ b/vb_suite/pandas_vb_common.py
@@ -18,6 +18,11 @@
except:
import pandas._libs.lib as lib
+try:
+ Panel = WidePanel
+except Exception:
+ pass
+
# didn't add to namespace until later
try:
from pandas.core.index import MultiIndex
| This reverts commit bff47f2302a0be4dcbf7e5055e525d5652e08fb5.
xref #15748
we will have to push this to a later pandas major version as statsmodels not fully compat: https://github.com/statsmodels/statsmodels/issues/3580 | https://api.github.com/repos/pandas-dev/pandas/pulls/15802 | 2017-03-25T23:11:14Z | 2017-03-26T02:16:32Z | 2017-03-26T02:16:32Z | 2017-03-26T02:17:11Z |
MAINT: Enforce string type for where parameter | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 37a70435ed6ff..dee1a5750eeeb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -812,6 +812,7 @@ Removal of prior version deprecations/changes
- The ``Categorical`` constructor has dropped the ``name`` parameter (:issue:`10632`)
- The ``take_last`` parameter has been dropped from ``duplicated()``, ``drop_duplicates()``, ``nlargest()``, and ``nsmallest()`` methods (:issue:`10236`, :issue:`10792`, :issue:`10920`)
- ``Series``, ``Index``, and ``DataFrame`` have dropped the ``sort`` and ``order`` methods (:issue:`10726`)
+- Where clauses in ``pytables`` are only accepted as strings and expressions types and not other data-types (:issue:`12027`)
- The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`)
.. _whatsnew_0200.performance:
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 7c09ca8d38773..2a5056963fe8d 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -1,9 +1,7 @@
""" manage PyTables query interface via Expressions """
import ast
-import warnings
from functools import partial
-from datetime import datetime, timedelta
import numpy as np
import pandas as pd
@@ -452,6 +450,32 @@ def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
+def _validate_where(w):
+ """
+ Validate that the where statement is of the right type.
+
+ The type may either be String, Expr, or list-like of Exprs.
+
+ Parameters
+ ----------
+ w : String term expression, Expr, or list-like of Exprs.
+
+ Returns
+ -------
+ where : The original where clause if the check was successful.
+
+ Raises
+ ------
+ TypeError : An invalid data type was passed in for w (e.g. dict).
+ """
+
+ if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
+ raise TypeError("where must be passed as a string, Expr, "
+ "or list-like of Exprs")
+
+ return w
+
+
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
@@ -481,11 +505,9 @@ class Expr(expr.Expr):
"major_axis>=20130101"
"""
- def __init__(self, where, op=None, value=None, queryables=None,
- encoding=None, scope_level=0):
+ def __init__(self, where, queryables=None, encoding=None, scope_level=0):
- # try to be back compat
- where = self.parse_back_compat(where, op, value)
+ where = _validate_where(where)
self.encoding = encoding
self.condition = None
@@ -505,7 +527,7 @@ def __init__(self, where, op=None, value=None, queryables=None,
if isinstance(w, Expr):
local_dict = w.env.scope
else:
- w = self.parse_back_compat(w)
+ w = _validate_where(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where]) # noqa
@@ -519,59 +541,6 @@ def __init__(self, where, op=None, value=None, queryables=None,
encoding=encoding)
self.terms = self.parse()
- def parse_back_compat(self, w, op=None, value=None):
- """ allow backward compatibility for passed arguments """
-
- if isinstance(w, dict):
- w, op, value = w.get('field'), w.get('op'), w.get('value')
- if not isinstance(w, string_types):
- raise TypeError(
- "where must be passed as a string if op/value are passed")
- warnings.warn("passing a dict to Expr is deprecated, "
- "pass the where as a single string",
- FutureWarning, stacklevel=10)
- if isinstance(w, tuple):
- if len(w) == 2:
- w, value = w
- op = '=='
- elif len(w) == 3:
- w, op, value = w
- warnings.warn("passing a tuple into Expr is deprecated, "
- "pass the where as a single string",
- FutureWarning, stacklevel=10)
-
- if op is not None:
- if not isinstance(w, string_types):
- raise TypeError(
- "where must be passed as a string if op/value are passed")
-
- if isinstance(op, Expr):
- raise TypeError("invalid op passed, must be a string")
- w = "{0}{1}".format(w, op)
- if value is not None:
- if isinstance(value, Expr):
- raise TypeError("invalid value passed, must be a string")
-
- # stringify with quotes these values
- def convert(v):
- if isinstance(v, (datetime, np.datetime64,
- timedelta, np.timedelta64)):
- return "'{0}'".format(v)
- return v
-
- if isinstance(value, (list, tuple)):
- value = [convert(v) for v in value]
- else:
- value = convert(value)
-
- w = "{0}{1}".format(w, value)
-
- warnings.warn("passing multiple values to Expr is deprecated, "
- "pass the where as a single string",
- FutureWarning, stacklevel=10)
-
- return w
-
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 324160d5b1ae6..2d62cb2d6944d 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -2585,59 +2585,6 @@ def test_term_compat(self):
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
- def test_backwards_compat_without_term_object(self):
- with ensure_clean_store(self.path) as store:
-
- wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp', wp)
- with catch_warnings(record=True):
- result = store.select('wp', [('major_axis>20000102'),
- ('minor_axis', '=', ['A', 'B'])])
- expected = wp.loc[:,
- wp.major_axis > Timestamp('20000102'),
- ['A', 'B']]
- assert_panel_equal(result, expected)
-
- store.remove('wp', ('major_axis>20000103'))
- result = store.select('wp')
- expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
- assert_panel_equal(result, expected)
-
- with ensure_clean_store(self.path) as store:
-
- wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- store.append('wp', wp)
-
- # stringified datetimes
- with catch_warnings(record=True):
- result = store.select('wp',
- [('major_axis',
- '>',
- datetime.datetime(2000, 1, 2))])
- expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
- assert_panel_equal(result, expected)
- with catch_warnings(record=True):
- result = store.select('wp',
- [('major_axis',
- '>',
- datetime.datetime(2000, 1, 2, 0, 0))])
- expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
- assert_panel_equal(result, expected)
- with catch_warnings(record=True):
- result = store.select('wp',
- [('major_axis',
- '=',
- [datetime.datetime(2000, 1, 2, 0, 0),
- datetime.datetime(2000, 1, 3, 0, 0)])]
- )
- expected = wp.loc[:, [Timestamp('20000102'),
- Timestamp('20000103')]]
- assert_panel_equal(result, expected)
-
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
| Deprecated in 0.11.0.
xref #12027.
| https://api.github.com/repos/pandas-dev/pandas/pulls/15798 | 2017-03-24T09:05:39Z | 2017-03-25T15:59:49Z | null | 2017-03-25T20:49:21Z |
Rolling window endpoints inclusion | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index a37cbc96b2d8c..cd90ba6e9ca1a 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -459,6 +459,48 @@ default of the index) in a DataFrame.
dft
dft.rolling('2s', on='foo').sum()
+.. _stats.rolling_window.endpoints:
+
+Rolling Window Endpoints
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.20.0
+
+The inclusion of the interval endpoints in rolling window calculations can be specified with the ``closed``
+parameter:
+
+.. csv-table::
+ :header: "``closed``", "Description", "Default for"
+ :widths: 20, 30, 30
+
+ ``right``, close right endpoint, time-based windows
+ ``left``, close left endpoint,
+ ``both``, close both endpoints, fixed windows
+ ``neither``, open endpoints,
+
+For example, having the right endpoint open is useful in many problems that require that there is no contamination
+from present information back to past information. This allows the rolling window to compute statistics
+"up to that point in time", but not including that point in time.
+
+.. ipython:: python
+
+ df = pd.DataFrame({'x': [1]*5},
+ index = [pd.Timestamp('20130101 09:00:01'),
+ pd.Timestamp('20130101 09:00:02'),
+ pd.Timestamp('20130101 09:00:03'),
+ pd.Timestamp('20130101 09:00:04'),
+ pd.Timestamp('20130101 09:00:06')])
+
+ df["right"] = df.rolling('2s', closed='right').x.sum() # default
+ df["both"] = df.rolling('2s', closed='both').x.sum()
+ df["left"] = df.rolling('2s', closed='left').x.sum()
+ df["neither"] = df.rolling('2s', closed='neither').x.sum()
+
+ df
+
+Currently, this feature is only implemented for time-based windows.
+For fixed windows, the closed parameter cannot be set and the rolling window will always have both endpoints closed.
+
.. _stats.moments.ts-versus-resampling:
Time-aware Rolling vs. Resampling
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index fd1cd3d0022c9..db650dc6569eb 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -319,6 +319,7 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
Other Enhancements
^^^^^^^^^^^^^^^^^^
+- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window endpoint closedness. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`)
- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`.
- ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`)
- ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ad56ea44a0dc6..86978a9739ca4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5962,12 +5962,12 @@ def _add_series_or_dataframe_operations(cls):
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, freq=None, center=False,
- win_type=None, on=None, axis=0):
+ win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods, freq=freq,
center=center, win_type=win_type,
- on=on, axis=axis)
+ on=on, axis=axis, closed=closed)
cls.rolling = rolling
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 89d2f5b24d77e..5b84b075ce81a 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -56,11 +56,12 @@
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
- 'axis', 'on']
+ 'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
- center=False, win_type=None, axis=0, on=None, **kwargs):
+ center=False, win_type=None, axis=0, on=None, closed=None,
+ **kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
@@ -71,6 +72,7 @@ def __init__(self, obj, window=None, min_periods=None, freq=None,
self.blocks = []
self.obj = obj
self.on = on
+ self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
@@ -101,6 +103,10 @@ def validate(self):
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
+ if self.closed is not None and self.closed not in \
+ ['right', 'both', 'left', 'neither']:
+ raise ValueError("closed must be 'right', 'left', 'both' or "
+ "'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
@@ -374,8 +380,14 @@ class Window(_Window):
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
+ closed : string, default None
+ Make the interval closed on the 'right', 'left', 'both' or
+ 'neither' endpoints.
+ For offset-based windows, it defaults to 'right'.
+ For fixed windows, defaults to 'both'. Remaining cases not implemented
+ for fixed windows.
- .. versionadded:: 0.19.0
+ .. versionadded:: 0.20.0
axis : int or string, default 0
@@ -717,12 +729,12 @@ def _apply(self, func, name=None, window=None, center=None,
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
- def func(arg, window, min_periods=None):
+ def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
- window, minp, indexi, **kwargs)
+ window, minp, indexi, closed, **kwargs)
# calculation function
if center:
@@ -731,11 +743,13 @@ def func(arg, window, min_periods=None):
def calc(x):
return func(np.concatenate((x, additional_nans)),
- window, min_periods=self.min_periods)
+ window, min_periods=self.min_periods,
+ closed=self.closed)
else:
def calc(x):
- return func(x, window, min_periods=self.min_periods)
+ return func(x, window, min_periods=self.min_periods,
+ closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
@@ -768,7 +782,8 @@ def count(self):
for b in blocks:
result = b.notnull().astype(int)
result = self._constructor(result, window=window, min_periods=0,
- center=self.center).sum()
+ center=self.center,
+ closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
@@ -789,11 +804,10 @@ def apply(self, func, args=(), kwargs={}):
offset = _offset(window, self.center)
index, indexi = self._get_index()
- def f(arg, window, min_periods):
+ def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
- return _window.roll_generic(arg, window, minp, indexi,
- offset, func, args,
- kwargs)
+ return _window.roll_generic(arg, window, minp, indexi, closed,
+ offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
@@ -864,7 +878,7 @@ def std(self, ddof=1, *args, **kwargs):
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
- ddof))
+ self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
@@ -911,7 +925,7 @@ def quantile(self, quantile, **kwargs):
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
return _window.roll_quantile(arg, window, minp, indexi,
- quantile)
+ self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
@@ -1044,6 +1058,10 @@ def validate(self):
elif self.window < 0:
raise ValueError("window must be non-negative")
+ if not self.is_datetimelike and self.closed is not None:
+ raise ValueError("closed only implemented for datetimelike "
+ "and offset based windows")
+
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
diff --git a/pandas/core/window.pyx b/pandas/core/window.pyx
index a06e616002ee2..3bb8abe26c781 100644
--- a/pandas/core/window.pyx
+++ b/pandas/core/window.pyx
@@ -158,9 +158,14 @@ cdef class MockFixedWindowIndexer(WindowIndexer):
index of the input
floor: optional
unit for flooring
+ left_closed: bint
+ left endpoint closedness
+ right_closed: bint
+ right endpoint closedness
"""
def __init__(self, ndarray input, int64_t win, int64_t minp,
+ bint left_closed, bint right_closed,
object index=None, object floor=None):
assert index is None
@@ -191,9 +196,14 @@ cdef class FixedWindowIndexer(WindowIndexer):
index of the input
floor: optional
unit for flooring the unit
+ left_closed: bint
+ left endpoint closedness
+ right_closed: bint
+ right endpoint closedness
"""
def __init__(self, ndarray input, int64_t win, int64_t minp,
+ bint left_closed, bint right_closed,
object index=None, object floor=None):
cdef ndarray start_s, start_e, end_s, end_e
@@ -229,10 +239,16 @@ cdef class VariableWindowIndexer(WindowIndexer):
min number of obs in a window to consider non-NaN
index: ndarray
index of the input
+ left_closed: bint
+ left endpoint closedness
+ True if the left endpoint is closed, False if open
+ right_closed: bint
+ right endpoint closedness
+ True if the right endpoint is closed, False if open
"""
def __init__(self, ndarray input, int64_t win, int64_t minp,
- ndarray index):
+ bint left_closed, bint right_closed, ndarray index):
self.is_variable = 1
self.N = len(index)
@@ -244,12 +260,13 @@ cdef class VariableWindowIndexer(WindowIndexer):
self.end = np.empty(self.N, dtype='int64')
self.end.fill(-1)
- self.build(index, win)
+ self.build(index, win, left_closed, right_closed)
# max window size
self.win = (self.end - self.start).max()
- def build(self, ndarray[int64_t] index, int64_t win):
+ def build(self, ndarray[int64_t] index, int64_t win, bint left_closed,
+ bint right_closed):
cdef:
ndarray[int64_t] start, end
@@ -261,7 +278,13 @@ cdef class VariableWindowIndexer(WindowIndexer):
N = self.N
start[0] = 0
- end[0] = 1
+
+ # right endpoint is closed
+ if right_closed:
+ end[0] = 1
+ # right endpoint is open
+ else:
+ end[0] = 0
with nogil:
@@ -271,6 +294,10 @@ cdef class VariableWindowIndexer(WindowIndexer):
end_bound = index[i]
start_bound = index[i] - win
+ # left endpoint is closed
+ if left_closed:
+ start_bound -= 1
+
# advance the start bound until we are
# within the constraint
start[i] = i
@@ -286,9 +313,13 @@ cdef class VariableWindowIndexer(WindowIndexer):
else:
end[i] = end[i - 1]
+ # right endpoint is open
+ if not right_closed:
+ end[i] -= 1
+
-def get_window_indexer(input, win, minp, index, floor=None,
- use_mock=True):
+def get_window_indexer(input, win, minp, index, closed,
+ floor=None, use_mock=True):
"""
return the correct window indexer for the computation
@@ -299,6 +330,10 @@ def get_window_indexer(input, win, minp, index, floor=None,
minp: integer, minimum periods
index: 1d ndarray, optional
index to the input array
+ closed: string, default None
+ {'right', 'left', 'both', 'neither'}
+ window endpoint closedness. Defaults to 'right' in
+ VariableWindowIndexer and to 'both' in FixedWindowIndexer
floor: optional
unit for flooring the unit
use_mock: boolean, default True
@@ -307,18 +342,38 @@ def get_window_indexer(input, win, minp, index, floor=None,
compat Indexer that allows us to use a standard
code path with all of the indexers.
+
Returns
-------
tuple of 1d int64 ndarrays of the offsets & data about the window
"""
+ cdef:
+ bint left_closed = False
+ bint right_closed = False
+
+ assert closed is None or closed in ['right', 'left', 'both', 'neither']
+
+ # if windows is variable, default is 'right', otherwise default is 'both'
+ if closed is None:
+ closed = 'right' if index is not None else 'both'
+
+ if closed in ['right', 'both']:
+ right_closed = True
+
+ if closed in ['left', 'both']:
+ left_closed = True
+
if index is not None:
- indexer = VariableWindowIndexer(input, win, minp, index)
+ indexer = VariableWindowIndexer(input, win, minp, left_closed,
+ right_closed, index)
elif use_mock:
- indexer = MockFixedWindowIndexer(input, win, minp, index, floor)
+ indexer = MockFixedWindowIndexer(input, win, minp, left_closed,
+ right_closed, index, floor)
else:
- indexer = FixedWindowIndexer(input, win, minp, index, floor)
+ indexer = FixedWindowIndexer(input, win, minp, left_closed,
+ right_closed, index, floor)
return indexer.get_data()
# ----------------------------------------------------------------------
@@ -327,7 +382,7 @@ def get_window_indexer(input, win, minp, index, floor=None,
def roll_count(ndarray[double_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, count_x = 0.0
int64_t s, e, nobs, N
@@ -336,7 +391,7 @@ def roll_count(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, _ = get_window_indexer(input, win,
- minp, index)
+ minp, index, closed)
output = np.empty(N, dtype=float)
with nogil:
@@ -408,7 +463,7 @@ cdef inline void remove_sum(double val, int64_t *nobs, double *sum_x) nogil:
def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, prev_x, sum_x = 0
int64_t s, e
@@ -418,7 +473,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
- minp, index)
+ minp, index,
+ closed)
output = np.empty(N, dtype=float)
# for performance we are going to iterate
@@ -523,7 +579,7 @@ cdef inline void remove_mean(double val, Py_ssize_t *nobs, double *sum_x,
def roll_mean(ndarray[double_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, prev_x, result, sum_x = 0
int64_t s, e
@@ -533,7 +589,8 @@ def roll_mean(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
- minp, index)
+ minp, index,
+ closed)
output = np.empty(N, dtype=float)
# for performance we are going to iterate
@@ -647,7 +704,7 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x,
def roll_var(ndarray[double_t] input, int64_t win, int64_t minp,
- object index, int ddof=1):
+ object index, object closed, int ddof=1):
"""
Numerically stable implementation using Welford's method.
"""
@@ -660,7 +717,8 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
- minp, index)
+ minp, index,
+ closed)
output = np.empty(N, dtype=float)
# Check for windows larger than array, addresses #7297
@@ -789,7 +847,7 @@ cdef inline void remove_skew(double val, int64_t *nobs, double *x, double *xx,
def roll_skew(ndarray[double_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, prev
double x = 0, xx = 0, xxx = 0
@@ -800,7 +858,8 @@ def roll_skew(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
- minp, index)
+ minp, index,
+ closed)
output = np.empty(N, dtype=float)
if is_variable:
@@ -916,7 +975,7 @@ cdef inline void remove_kurt(double val, int64_t *nobs, double *x, double *xx,
def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, prev
double x = 0, xx = 0, xxx = 0, xxxx = 0
@@ -927,7 +986,8 @@ def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp,
ndarray[double_t] output
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
- minp, index)
+ minp, index,
+ closed)
output = np.empty(N, dtype=float)
if is_variable:
@@ -985,11 +1045,11 @@ def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp,
def roll_median_c(ndarray[float64_t] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
cdef:
double val, res, prev
- bint err=0, is_variable
- int ret=0
+ bint err = 0, is_variable
+ int ret = 0
skiplist_t *sl
Py_ssize_t i, j
int64_t nobs = 0, N, s, e
@@ -1001,7 +1061,7 @@ def roll_median_c(ndarray[float64_t] input, int64_t win, int64_t minp,
# actual skiplist ops outweigh any window computation costs
start, end, N, win, minp, is_variable = get_window_indexer(
input, win,
- minp, index,
+ minp, index, closed,
use_mock=False)
output = np.empty(N, dtype=float)
@@ -1111,7 +1171,7 @@ cdef inline numeric calc_mm(int64_t minp, Py_ssize_t nobs,
def roll_max(ndarray[numeric] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
"""
Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -1123,12 +1183,15 @@ def roll_max(ndarray[numeric] input, int64_t win, int64_t minp,
is below this, output a NaN
index: ndarray, optional
index for window computation
+ closed: 'right', 'left', 'both', 'neither'
+ make the interval closed on the right, left,
+ both or neither endpoints
"""
- return _roll_min_max(input, win, minp, index, is_max=1)
+ return _roll_min_max(input, win, minp, index, closed=closed, is_max=1)
def roll_min(ndarray[numeric] input, int64_t win, int64_t minp,
- object index):
+ object index, object closed):
"""
Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -1141,11 +1204,11 @@ def roll_min(ndarray[numeric] input, int64_t win, int64_t minp,
index: ndarray, optional
index for window computation
"""
- return _roll_min_max(input, win, minp, index, is_max=0)
+ return _roll_min_max(input, win, minp, index, is_max=0, closed=closed)
cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp,
- object index, bint is_max):
+ object index, object closed, bint is_max):
"""
Moving min/max of 1d array of any numeric type along axis=0
ignoring NaNs.
@@ -1170,7 +1233,7 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp,
starti, endi, N, win, minp, is_variable = get_window_indexer(
input, win,
- minp, index)
+ minp, index, closed)
output = np.empty(N, dtype=input.dtype)
@@ -1272,7 +1335,8 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp,
def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win,
- int64_t minp, object index, double quantile):
+ int64_t minp, object index, object closed,
+ double quantile):
"""
O(N log(window)) implementation using skip list
"""
@@ -1292,7 +1356,7 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win,
# actual skiplist ops outweigh any window computation costs
start, end, N, win, minp, is_variable = get_window_indexer(
input, win,
- minp, index,
+ minp, index, closed,
use_mock=False)
output = np.empty(N, dtype=float)
skiplist = IndexableSkiplist(win)
@@ -1335,7 +1399,7 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win,
def roll_generic(ndarray[float64_t, cast=True] input,
- int64_t win, int64_t minp, object index,
+ int64_t win, int64_t minp, object index, object closed,
int offset, object func,
object args, object kwargs):
cdef:
@@ -1355,12 +1419,13 @@ def roll_generic(ndarray[float64_t, cast=True] input,
start, end, N, win, minp, is_variable = get_window_indexer(input, win,
minp, index,
+ closed,
floor=0)
output = np.empty(N, dtype=float)
counts = roll_sum(np.concatenate([np.isfinite(input).astype(float),
np.array([0.] * offset)]),
- win, minp, index)[offset:]
+ win, minp, index, closed)[offset:]
if is_variable:
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 5fc31e9321f31..3929aba858797 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -431,6 +431,12 @@ def test_numpy_compat(self):
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(r, func), dtype=np.float64)
+ def test_closed(self):
+ df = DataFrame({'A': [0, 1, 2, 3, 4]})
+ # closed only allowed for datetimelike
+ with self.assertRaises(ValueError):
+ df.rolling(window=3, closed='neither')
+
class TestExpanding(Base):
@@ -3385,6 +3391,45 @@ def test_min_periods(self):
result = df.rolling('2s', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
+ def test_closed(self):
+
+ # xref GH13965
+
+ df = DataFrame({'A': [1] * 5},
+ index=[pd.Timestamp('20130101 09:00:01'),
+ pd.Timestamp('20130101 09:00:02'),
+ pd.Timestamp('20130101 09:00:03'),
+ pd.Timestamp('20130101 09:00:04'),
+ pd.Timestamp('20130101 09:00:06')])
+
+ # closed must be 'right', 'left', 'both', 'neither'
+ with self.assertRaises(ValueError):
+ self.regular.rolling(window='2s', closed="blabla")
+
+ expected = df.copy()
+ expected["A"] = [1.0, 2, 2, 2, 1]
+ result = df.rolling('2s', closed='right').sum()
+ tm.assert_frame_equal(result, expected)
+
+ # default should be 'right'
+ result = df.rolling('2s').sum()
+ tm.assert_frame_equal(result, expected)
+
+ expected = df.copy()
+ expected["A"] = [1.0, 2, 3, 3, 2]
+ result = df.rolling('2s', closed='both').sum()
+ tm.assert_frame_equal(result, expected)
+
+ expected = df.copy()
+ expected["A"] = [np.nan, 1.0, 2, 2, 1]
+ result = df.rolling('2s', closed='left').sum()
+ tm.assert_frame_equal(result, expected)
+
+ expected = df.copy()
+ expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
+ result = df.rolling('2s', closed='neither').sum()
+ tm.assert_frame_equal(result, expected)
+
def test_ragged_sum(self):
df = self.ragged
| - [x] closes #13965
- [x] time-based window tests added / passed
- [x] fixed window tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15795 | 2017-03-23T22:29:19Z | 2017-04-13T11:39:15Z | null | 2017-04-13T11:39:51Z |
CI: fix coverage file location | diff --git a/.travis.yml b/.travis.yml
index eb2a58b0616ef..d78e4dab31fbe 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -39,7 +39,6 @@ matrix:
- TEST_ARGS="--skip-slow --skip-network"
- JOB_TAG=_OSX
- TRAVIS_PYTHON_VERSION=3.5
- - CACHE_NAME="35_osx"
- USE_CACHE=true
- python: 2.7
env:
@@ -47,9 +46,7 @@ matrix:
- JOB_NAME: "27_slow_nnet_LOCALE"
- TEST_ARGS="--only-slow --skip-network"
- LOCALE_OVERRIDE="zh_CN.UTF-8"
- - FULL_DEPS=true
- JOB_TAG=_LOCALE
- - CACHE_NAME="27_slow_nnet_LOCALE"
- USE_CACHE=true
addons:
apt:
@@ -60,10 +57,8 @@ matrix:
- PYTHON_VERSION=2.7
- JOB_NAME: "27_nslow"
- TEST_ARGS="--skip-slow"
- - FULL_DEPS=true
- CLIPBOARD_GUI=gtk2
- LINT=true
- - CACHE_NAME="27_nslow"
- USE_CACHE=true
addons:
apt:
@@ -74,10 +69,8 @@ matrix:
- PYTHON_VERSION=3.5
- JOB_NAME: "35_nslow"
- TEST_ARGS="--skip-slow --skip-network"
- - FULL_DEPS=true
- CLIPBOARD=xsel
- COVERAGE=true
- - CACHE_NAME="35_nslow"
- USE_CACHE=true
addons:
apt:
@@ -96,28 +89,24 @@ matrix:
packages:
- libatlas-base-dev
- gfortran
-# In allow_failures
+ # In allow_failures
- python: 2.7
env:
- PYTHON_VERSION=2.7
- JOB_NAME: "27_slow"
- JOB_TAG=_SLOW
- TEST_ARGS="--only-slow --skip-network"
- - FULL_DEPS=true
- - CACHE_NAME="27_slow"
- USE_CACHE=true
-# In allow_failures
+ # In allow_failures
- python: 2.7
env:
- PYTHON_VERSION=2.7
- JOB_NAME: "27_build_test"
- JOB_TAG=_BUILD_TEST
- TEST_ARGS="--skip-slow"
- - FULL_DEPS=true
- BUILD_TEST=true
- - CACHE_NAME="27_build_test"
- USE_CACHE=true
-# In allow_failures
+ # In allow_failures
- python: 3.5
env:
- PYTHON_VERSION=3.5
@@ -125,17 +114,14 @@ matrix:
- JOB_TAG=_NUMPY_DEV
- TEST_ARGS="--skip-slow --skip-network"
- PANDAS_TESTING_MODE="deprecate"
- - CACHE_NAME="35_numpy_dev"
- USE_CACHE=true
-# In allow_failures
+ # In allow_failures
- python: 3.5
env:
- PYTHON_VERSION=3.5
- JOB_NAME: "doc_build"
- - FULL_DEPS=true
- DOC_BUILD=true
- JOB_TAG=_DOC_BUILD
- - CACHE_NAME="doc_build"
- USE_CACHE=true
allow_failures:
- python: 2.7
@@ -144,8 +130,6 @@ matrix:
- JOB_NAME: "27_slow"
- JOB_TAG=_SLOW
- TEST_ARGS="--only-slow --skip-network"
- - FULL_DEPS=true
- - CACHE_NAME="27_slow"
- USE_CACHE=true
- python: 2.7
env:
@@ -153,9 +137,7 @@ matrix:
- JOB_NAME: "27_build_test"
- JOB_TAG=_BUILD_TEST
- TEST_ARGS="--skip-slow"
- - FULL_DEPS=true
- BUILD_TEST=true
- - CACHE_NAME="27_build_test"
- USE_CACHE=true
- python: 3.5
env:
@@ -164,16 +146,13 @@ matrix:
- JOB_TAG=_NUMPY_DEV
- TEST_ARGS="--skip-slow --skip-network"
- PANDAS_TESTING_MODE="deprecate"
- - CACHE_NAME="35_numpy_dev"
- USE_CACHE=true
- python: 3.5
env:
- PYTHON_VERSION=3.5
- JOB_NAME: "doc_build"
- - FULL_DEPS=true
- DOC_BUILD=true
- JOB_TAG=_DOC_BUILD
- - CACHE_NAME="doc_build"
- USE_CACHE=true
before_install:
@@ -209,7 +188,9 @@ script:
- echo "script done"
after_success:
- - source activate pandas && codecov
+ - if [ "$COVERAGE" ]; then
+ source activate pandas && codecov --file /tmp/cov-single.xml /tmp/cov-multiple.xml;
+ fi
after_script:
- echo "after_script start"
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index 2d1211b2f7b96..f0fbb8c54bf2a 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -27,8 +27,8 @@ if [ "$BUILD_TEST" ]; then
cd /tmp
python -c "import pandas; pandas.test(['-n 2'])"
elif [ "$COVERAGE" ]; then
- echo pytest -s -n 2 -m "not single" --cov=pandas --cov-append --cov-report xml:/tmp/cov.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
- pytest -s -n 2 -m "not single" --cov=pandas --cov-append --cov-report xml:/tmp/cov.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+ echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+ pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
else
echo pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas # TODO: doctest
diff --git a/ci/script_single.sh b/ci/script_single.sh
index 2d7962352842b..86e822cb57653 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -20,8 +20,8 @@ fi
if [ "$BUILD_TEST" ]; then
echo "We are not running pytest as this is simply a build test."
elif [ "$COVERAGE" ]; then
- echo pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
- pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ echo pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
else
echo pytest -m "single" --junitxml=/tmp/single.xml $TEST_ARGS pandas
pytest -m "single" --junitxml=/tmp/single.xml $TEST_ARGS pandas # TODO: doctest
| CI: clean up some unused env variables
| https://api.github.com/repos/pandas-dev/pandas/pulls/15792 | 2017-03-23T21:18:45Z | 2017-03-25T15:56:49Z | null | 2017-03-25T15:57:32Z |
COMPAT: 3.6.1 compat for change in PySlice_GetIndices_Ex | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index f78040e5a52f2..f902422b0916d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -3,6 +3,7 @@ cimport numpy as np
cimport cython
import numpy as np
import sys
+
cdef bint PY3 = (sys.version_info[0] >= 3)
from numpy cimport *
@@ -26,7 +27,8 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
PyObject_SetAttrString,
PyObject_RichCompareBool,
PyBytes_GET_SIZE,
- PyUnicode_GET_SIZE)
+ PyUnicode_GET_SIZE,
+ PyObject)
try:
from cpython cimport PyString_GET_SIZE
@@ -36,11 +38,10 @@ except ImportError:
cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
- ctypedef struct PySliceObject:
- pass
+cdef extern from "compat_helper.h":
- cdef int PySlice_GetIndicesEx(
- PySliceObject* s, Py_ssize_t length,
+ cdef int slice_get_indices(
+ PyObject* s, Py_ssize_t length,
Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step,
Py_ssize_t *slicelength) except -1
@@ -1658,8 +1659,8 @@ cpdef slice_get_indices_ex(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX):
if slc is None:
raise TypeError("slc should be a slice")
- PySlice_GetIndicesEx(<PySliceObject *>slc, objlen,
- &start, &stop, &step, &length)
+ slice_get_indices(<PyObject *>slc, objlen,
+ &start, &stop, &step, &length)
return start, stop, step, length
@@ -1683,8 +1684,8 @@ cpdef Py_ssize_t slice_len(
if slc is None:
raise TypeError("slc must be slice")
- PySlice_GetIndicesEx(<PySliceObject *>slc, objlen,
- &start, &stop, &step, &length)
+ slice_get_indices(<PyObject *>slc, objlen,
+ &start, &stop, &step, &length)
return length
diff --git a/pandas/_libs/src/compat_helper.h b/pandas/_libs/src/compat_helper.h
new file mode 100644
index 0000000000000..e3c40d2ca65f4
--- /dev/null
+++ b/pandas/_libs/src/compat_helper.h
@@ -0,0 +1,37 @@
+/*
+Copyright (c) 2016, PyData Development Team
+All rights reserved.
+
+Distributed under the terms of the BSD Simplified License.
+
+The full license is in the LICENSE file, distributed with this software.
+*/
+
+#ifndef PANDAS__LIBS_SRC_COMPAT_HELPER_H_
+#define PANDAS__LIBS_SRC_COMPAT_HELPER_H_
+
+#include "Python.h"
+#include "numpy_helper.h"
+
+/*
+PySlice_GetIndicesEx changes signature in PY3
+but 3.6.1 in particular changes the behavior of this function slightly
+https://bugs.python.org/issue27867
+*/
+
+PANDAS_INLINE int slice_get_indices(PyObject *s,
+ Py_ssize_t length,
+ Py_ssize_t *start,
+ Py_ssize_t *stop,
+ Py_ssize_t *step,
+ Py_ssize_t *slicelength) {
+#if PY_VERSION_HEX >= 0x03000000
+ return PySlice_GetIndicesEx(s, length, start, stop,
+ step, slicelength);
+#else
+ return PySlice_GetIndicesEx((PySliceObject *)s, length, start,
+ stop, step, slicelength);
+#endif
+}
+
+#endif // PANDAS__LIBS_SRC_COMPAT_HELPER_H_
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 29920b165d3f6..af7c584249416 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -2,11 +2,12 @@
# pylint: disable=W0102
from datetime import datetime, date
-
+import sys
import pytest
import numpy as np
import re
+from distutils.version import LooseVersion
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
Series, Categorical)
@@ -22,6 +23,9 @@
randn, assert_series_equal)
from pandas.compat import zip, u
+# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
+PY361 = sys.version >= LooseVersion('3.6.1')
+
@pytest.fixture
def mgr():
@@ -1128,8 +1132,10 @@ def assert_as_slice_equals(arr, slc):
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
- assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
- assert_as_slice_equals([100, 0], slice(100, None, -100))
+
+ if not PY361:
+ assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
+ assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
@@ -1150,8 +1156,9 @@ def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
- assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
- assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
+ if not PY361:
+ assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
+ assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
@@ -1164,8 +1171,10 @@ def assert_as_array_equals(slc, asarray):
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
- assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
- assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
+
+ if not PY361:
+ assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
+ assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
@@ -1180,23 +1189,26 @@ def assert_add_equals(val, inc, result):
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
- assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
- assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
- assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
- with pytest.raises(ValueError):
- BlockPlacement(slice(2, None, -1)).add(-1)
+
+ if not PY361:
+ assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
+ assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
+ assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
+
+ with pytest.raises(ValueError):
+ BlockPlacement(slice(2, None, -1)).add(-1)
diff --git a/setup.py b/setup.py
index 8e690f05b818c..1b471f76ac5e6 100755
--- a/setup.py
+++ b/setup.py
@@ -460,7 +460,8 @@ def pxd(name):
extra_compile_args=['-Wno-unused-function']
lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
- 'pandas/_libs/src/parse_helper.h']
+ 'pandas/_libs/src/parse_helper.h',
+ 'pandas/_libs/src/compat_helper.h']
tseries_depends = ['pandas/_libs/src/datetime/np_datetime.h',
| This doesn't actually matter to any tests except for some internal consistency ones.
Bonus is that it eliminates a warning :<
note that we aren't actually testing this (yet) on Travis as our 3.6 build uses conda-forge and 3.6.1 is not there as of yet. Its in defaults though (and shows up on appveyor build). | https://api.github.com/repos/pandas-dev/pandas/pulls/15790 | 2017-03-23T18:06:49Z | 2017-03-23T19:11:33Z | null | 2017-03-23T19:13:08Z |
DOC: .groupby() aligns Series, accepts ndarray | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 87052800b8fb5..f51e69737397c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4129,11 +4129,14 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
Parameters
----------
- by : mapping function / list of functions, dict, Series, or tuple /
- list of column names or index level names.
+ by : mapping function / list of functions, dict, Series, ndarray, \
+ or tuple / list of column names or index level names or \
+ Series or ndarrays
Called on each element of the object index to determine the groups.
If a dict or Series is passed, the Series or dict VALUES will be
- used to determine the groups
+ used to determine the groups (the Series' values are first
+ aligned; see ``.align()`` method). If ndarray is passed, the
+ values as-is determine the groups.
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
| - [x] closes https://github.com/pandas-dev/pandas/issues/15244
- [ ] tests added / passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15789 | 2017-03-23T17:46:49Z | 2017-03-23T19:10:25Z | null | 2017-03-23T19:10:45Z |
Update testing.py | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index cf76f4ead77e3..9a9f3c6c6b945 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1151,7 +1151,7 @@ def assert_series_equal(left, right, check_dtype=True,
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
- check_dateteimelike_compat : bool, default False
+ check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
@@ -1264,7 +1264,7 @@ def assert_frame_equal(left, right, check_dtype=True,
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
- check_dateteimelike_compat : bool, default False
+ check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
| Tiny docs typo fix
| https://api.github.com/repos/pandas-dev/pandas/pulls/15784 | 2017-03-22T21:12:48Z | 2017-03-22T21:36:54Z | 2017-03-22T21:36:54Z | 2017-03-22T21:36:56Z |
CI: remove travis dedupe as enabled auto-cancellation | diff --git a/.travis.yml b/.travis.yml
index 270f8c2fc76c3..eb2a58b0616ef 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -177,8 +177,6 @@ matrix:
- USE_CACHE=true
before_install:
- - echo "Checking to see if this build is outdated"
- - ci/travis_fast_finish.py || { echo "Failing outdated build to end it."; exit 1; }
- echo "before_install"
- source ci/travis_process_gbq_encryption.sh
- export PATH="$HOME/miniconda3/bin:$PATH"
diff --git a/ci/travis_fast_finish.py b/ci/travis_fast_finish.py
deleted file mode 100755
index c2e2a9159918b..0000000000000
--- a/ci/travis_fast_finish.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-# script to cancel previous travis builds for the same PR
-# originally from
-# https://github.com/conda-forge/staged-recipes/pull/2257
-
-try:
- from future_builtins import (
- map,
- filter,
- )
-except ImportError:
- pass
-
-import codecs
-import contextlib
-import json
-import os
-
-try:
- from urllib.request import (
- Request,
- urlopen,
- )
-except ImportError:
- from urllib2 import (
- Request,
- urlopen,
- )
-
-
-def check_latest_pr_build(repo, pr, build_num):
- # Not a PR so it is latest.
- if pr is None:
- return True
-
- headers = {
- "Accept": "application/vnd.travis-ci.2+json",
- }
- url = "https://api.travis-ci.org/repos/{repo}/builds?event_type=pull_request"
-
- request = Request(url.format(repo=repo), headers=headers)
- with contextlib.closing(urlopen(request)) as response:
- reader = codecs.getreader("utf-8")
- data = json.load(reader(response))
-
- # Parse the response to get a list of build numbers for this PR.
- builds = data["builds"]
- pr_builds = filter(lambda b: b["pull_request_number"] == pr, builds)
- pr_build_nums = sorted(map(lambda b: int(b["number"]), pr_builds))
-
- print("build_num: {}".format(build_num))
- print("pr_build_nums: {}".format(','.join([str(n) for n in pr_build_nums])))
-
- # Check if our build number is the latest (largest)
- # out of all of the builds for this PR.
- if build_num < max(pr_build_nums):
- return False
- else:
- return True
-
-
-def main():
- repo = os.environ["TRAVIS_REPO_SLUG"]
-
- pr = os.environ["TRAVIS_PULL_REQUEST"]
- pr = None if pr == "false" else int(pr)
- build_num = int(os.environ["TRAVIS_BUILD_NUMBER"])
-
- print("checking for fast_finish: {}-{}-{}".format(repo, pr, build_num))
-
- return int(check_latest_pr_build(repo, pr, build_num) is False)
-
-
-if __name__ == "__main__":
- import sys
- sys.exit(main())
| xref https://github.com/pandas-dev/pandas/commit/79581ffe6fb73089dfa8394c2f4e44677acfe1ce
of course Travis just announced auto-cancellation / it looks good when I enabled it. so removing this :< | https://api.github.com/repos/pandas-dev/pandas/pulls/15783 | 2017-03-22T19:43:40Z | 2017-03-23T19:07:23Z | null | 2017-03-23T19:07:23Z |
COMPAT: NaT accessors | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index a0b2b47c4bac3..3ab69e1ff409b 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -771,7 +771,8 @@ Other API Changes
since pandas version 0.13.0 and can be done with the ``Series.str.extract``
method (:issue:`5224`). As a consequence, the ``as_indexer`` keyword is
ignored (no longer needed to specify the new behaviour) and is deprecated.
-
+- ``NaT`` will now correctly report ``False`` for datetimelike boolean operations such as ``is_month_start`` (:issue:`15781`)
+- ``NaT`` will now correctly return ``np.nan`` for ``Timedelta`` and ``Period`` accessors such as ``days`` and ``quarter`` (:issue:`15782`)
.. _whatsnew_0200.deprecations:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 055534bbdb7ee..d441f1ec4759b 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -849,6 +849,30 @@ class NaTType(_NaT):
def is_leap_year(self):
return False
+ @property
+ def is_month_start(self):
+ return False
+
+ @property
+ def is_quarter_start(self):
+ return False
+
+ @property
+ def is_year_start(self):
+ return False
+
+ @property
+ def is_month_end(self):
+ return False
+
+ @property
+ def is_quarter_end(self):
+ return False
+
+ @property
+ def is_year_end(self):
+ return False
+
def __rdiv__(self, other):
return _nat_rdivide_op(self, other)
@@ -3799,8 +3823,9 @@ def array_strptime(ndarray[object] values, object fmt,
# these by definition return np.nan
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
- 'week', 'dayofyear', 'days_in_month', 'daysinmonth', 'dayofweek',
- 'weekday_name']
+ 'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth',
+ 'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds',
+ 'nanoseconds', 'qyear', 'quarter']
for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
@@ -4810,7 +4835,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
if field == 'is_month_start':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4823,7 +4848,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4836,7 +4861,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
elif field == 'is_month_end':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4854,7 +4879,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4871,7 +4896,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
elif field == 'is_quarter_start':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4885,7 +4910,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4898,7 +4923,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
elif field == 'is_quarter_end':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4917,7 +4942,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4934,7 +4959,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
elif field == 'is_year_start':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4948,7 +4973,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4961,7 +4986,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
elif field == 'is_year_end':
if is_business:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
@@ -4980,7 +5005,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
return out.view(bool)
else:
for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ if dtindex[i] == NPY_NAT: out[i] = 0; continue
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index ef24c493f5090..76a26b09ed131 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -259,19 +259,14 @@ def test_datetimeindex_accessors(self):
dti.name = 'name'
# non boolean accessors -> return Index
- for accessor in ['year', 'month', 'day', 'hour', 'minute',
- 'second', 'microsecond', 'nanosecond',
- 'dayofweek', 'dayofyear', 'weekofyear',
- 'quarter', 'weekday_name']:
+ for accessor in DatetimeIndex._field_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == 'name'
# boolean accessors -> return array
- for accessor in ['is_month_start', 'is_month_end',
- 'is_quarter_start', 'is_quarter_end',
- 'is_year_start', 'is_year_end']:
+ for accessor in DatetimeIndex._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 4abc282252559..4681879d708c4 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -31,15 +31,10 @@ def setUp(self):
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
- self.check_ops_properties(
- ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
- 'week', 'dayofweek', 'dayofyear', 'quarter'])
- self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
- 'is_month_start', 'is_month_end',
- 'is_quarter_start',
- 'is_quarter_end', 'is_year_start',
- 'is_year_end', 'weekday_name'],
- lambda x: isinstance(x, DatetimeIndex))
+ f = lambda x: isinstance(x, DatetimeIndex)
+ self.check_ops_properties(DatetimeIndex._field_ops, f)
+ self.check_ops_properties(DatetimeIndex._object_ops, f)
+ self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 4533428cf1514..3b94992f2fe9f 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -21,11 +21,10 @@ def setUp(self):
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
- self.check_ops_properties(
- ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
- 'week', 'dayofweek', 'dayofyear', 'quarter'])
- self.check_ops_properties(['qyear'],
- lambda x: isinstance(x, PeriodIndex))
+ f = lambda x: isinstance(x, PeriodIndex)
+ self.check_ops_properties(PeriodIndex._field_ops, f)
+ self.check_ops_properties(PeriodIndex._object_ops, f)
+ self.check_ops_properties(PeriodIndex._bool_ops, f)
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 6a6c0ab49b15d..6639fcd985ac4 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -394,8 +394,8 @@ def test_fields(self):
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
- 'weekofyear', 'week', 'dayofweek', 'weekday', 'dayofyear',
- 'quarter', 'qyear', 'days_in_month', 'is_leap_year']
+ 'weekofyear', 'week', 'dayofweek', 'dayofyear',
+ 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 8c7b88a9cf2ca..2e9f11297dc83 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -21,9 +21,9 @@ def setUp(self):
self.not_valid_objs = []
def test_ops_properties(self):
- self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
- 'milliseconds'])
- self.check_ops_properties(['microseconds', 'nanoseconds'])
+ f = lambda x: isinstance(x, TimedeltaIndex)
+ self.check_ops_properties(TimedeltaIndex._field_ops, f)
+ self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
new file mode 100644
index 0000000000000..ce2ed237f5559
--- /dev/null
+++ b/pandas/tests/scalar/test_nat.py
@@ -0,0 +1,248 @@
+import pytest
+
+from datetime import datetime, timedelta
+import pytz
+
+import numpy as np
+from pandas import (NaT, Index, Timestamp, Timedelta, Period,
+ DatetimeIndex, PeriodIndex,
+ TimedeltaIndex, Series, isnull)
+from pandas.util import testing as tm
+from pandas._libs.tslib import iNaT
+
+
+@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
+ (Timedelta('NaT'), TimedeltaIndex),
+ (Period('NaT', freq='M'), PeriodIndex)])
+def test_nat_fields(nat, idx):
+
+ for field in idx._field_ops:
+
+ # weekday is a property of DTI, but a method
+ # on NaT/Timestamp for compat with datetime
+ if field == 'weekday':
+ continue
+
+ result = getattr(NaT, field)
+ assert np.isnan(result)
+
+ result = getattr(nat, field)
+ assert np.isnan(result)
+
+ for field in idx._bool_ops:
+
+ result = getattr(NaT, field)
+ assert result is False
+
+ result = getattr(nat, field)
+ assert result is False
+
+
+def test_nat_vector_field_access():
+ idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
+
+ for field in DatetimeIndex._field_ops:
+ # weekday is a property of DTI, but a method
+ # on NaT/Timestamp for compat with datetime
+ if field == 'weekday':
+ continue
+
+ result = getattr(idx, field)
+ expected = Index([getattr(x, field) for x in idx])
+ tm.assert_index_equal(result, expected)
+
+ s = Series(idx)
+
+ for field in DatetimeIndex._field_ops:
+
+ # weekday is a property of DTI, but a method
+ # on NaT/Timestamp for compat with datetime
+ if field == 'weekday':
+ continue
+
+ result = getattr(s.dt, field)
+ expected = [getattr(x, field) for x in idx]
+ tm.assert_series_equal(result, Series(expected))
+
+ for field in DatetimeIndex._bool_ops:
+ result = getattr(s.dt, field)
+ expected = [getattr(x, field) for x in idx]
+ tm.assert_series_equal(result, Series(expected))
+
+
+@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
+def test_identity(klass):
+ assert klass(None) is NaT
+
+ result = klass(np.nan)
+ assert result is NaT
+
+ result = klass(None)
+ assert result is NaT
+
+ result = klass(iNaT)
+ assert result is NaT
+
+ result = klass(np.nan)
+ assert result is NaT
+
+ result = klass(float('nan'))
+ assert result is NaT
+
+ result = klass(NaT)
+ assert result is NaT
+
+ result = klass('NaT')
+ assert result is NaT
+
+ assert isnull(klass('nat'))
+
+
+@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
+def test_equality(klass):
+
+ # nat
+ if klass is not Period:
+ klass('').value == iNaT
+ klass('nat').value == iNaT
+ klass('NAT').value == iNaT
+ klass(None).value == iNaT
+ klass(np.nan).value == iNaT
+ assert isnull(klass('nat'))
+
+
+@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
+def test_round_nat(klass):
+ # GH14940
+ ts = klass('nat')
+ for method in ["round", "floor", "ceil"]:
+ round_method = getattr(ts, method)
+ for freq in ["s", "5s", "min", "5min", "h", "5h"]:
+ assert round_method(freq) is ts
+
+
+def test_NaT_methods():
+ # GH 9513
+ raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
+ 'fromordinal', 'fromtimestamp', 'isocalendar',
+ 'strftime', 'strptime', 'time', 'timestamp',
+ 'timetuple', 'timetz', 'toordinal', 'tzname',
+ 'utcfromtimestamp', 'utcnow', 'utcoffset',
+ 'utctimetuple']
+ nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
+ nan_methods = ['weekday', 'isoweekday']
+
+ for method in raise_methods:
+ if hasattr(NaT, method):
+ with pytest.raises(ValueError):
+ getattr(NaT, method)()
+
+ for method in nan_methods:
+ if hasattr(NaT, method):
+ assert np.isnan(getattr(NaT, method)())
+
+ for method in nat_methods:
+ if hasattr(NaT, method):
+ # see gh-8254
+ exp_warning = None
+ if method == 'to_datetime':
+ exp_warning = FutureWarning
+ with tm.assert_produces_warning(
+ exp_warning, check_stacklevel=False):
+ assert getattr(NaT, method)() is NaT
+
+ # GH 12300
+ assert NaT.isoformat() == 'NaT'
+
+
+@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
+def test_isoformat(klass):
+
+ result = klass('NaT').isoformat()
+ expected = 'NaT'
+ assert result == expected
+
+
+def test_nat_arithmetic():
+ # GH 6873
+ i = 2
+ f = 1.5
+
+ for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
+ assert left / right is NaT
+ assert left * right is NaT
+ assert right * left is NaT
+ with pytest.raises(TypeError):
+ right / left
+
+ # Timestamp / datetime
+ t = Timestamp('2014-01-01')
+ dt = datetime(2014, 1, 1)
+ for (left, right) in [(NaT, NaT), (NaT, t), (NaT, dt)]:
+ # NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
+ assert right + left is NaT
+ assert left + right is NaT
+ assert left - right is NaT
+ assert right - left is NaT
+
+ # timedelta-like
+ # offsets are tested in test_offsets.py
+
+ delta = timedelta(3600)
+ td = Timedelta('5s')
+
+ for (left, right) in [(NaT, delta), (NaT, td)]:
+ # NaT + timedelta-like returns NaT
+ assert right + left is NaT
+ assert left + right is NaT
+ assert right - left is NaT
+ assert left - right is NaT
+
+ # GH 11718
+ t_utc = Timestamp('2014-01-01', tz='UTC')
+ t_tz = Timestamp('2014-01-01', tz='US/Eastern')
+ dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
+
+ for (left, right) in [(NaT, t_utc), (NaT, t_tz),
+ (NaT, dt_tz)]:
+ # NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
+ assert right + left is NaT
+ assert left + right is NaT
+ assert left - right is NaT
+ assert right - left is NaT
+
+ # int addition / subtraction
+ for (left, right) in [(NaT, 2), (NaT, 0), (NaT, -3)]:
+ assert right + left is NaT
+ assert left + right is NaT
+ assert left - right is NaT
+ assert right - left is NaT
+
+
+def test_nat_arithmetic_index():
+ # GH 11718
+
+ dti = DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
+ exp = DatetimeIndex([NaT, NaT], name='x')
+ tm.assert_index_equal(dti + NaT, exp)
+ tm.assert_index_equal(NaT + dti, exp)
+
+ dti_tz = DatetimeIndex(['2011-01-01', '2011-01-02'],
+ tz='US/Eastern', name='x')
+ exp = DatetimeIndex([NaT, NaT], name='x', tz='US/Eastern')
+ tm.assert_index_equal(dti_tz + NaT, exp)
+ tm.assert_index_equal(NaT + dti_tz, exp)
+
+ exp = TimedeltaIndex([NaT, NaT], name='x')
+ for (left, right) in [(NaT, dti), (NaT, dti_tz)]:
+ tm.assert_index_equal(left - right, exp)
+ tm.assert_index_equal(right - left, exp)
+
+ # timedelta
+ tdi = TimedeltaIndex(['1 day', '2 day'], name='x')
+ exp = DatetimeIndex([NaT, NaT], name='x')
+ for (left, right) in [(NaT, tdi)]:
+ tm.assert_index_equal(left + right, exp)
+ tm.assert_index_equal(right + left, exp)
+ tm.assert_index_equal(left - right, exp)
+ tm.assert_index_equal(right - left, exp)
diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py
index 3128e90695324..7a15600d6041e 100644
--- a/pandas/tests/scalar/test_period.py
+++ b/pandas/tests/scalar/test_period.py
@@ -110,20 +110,6 @@ def test_period_cons_nat(self):
p = Period(tslib.iNaT)
self.assertIs(p, pd.NaT)
- def test_cons_null_like(self):
- # check Timestamp compat
- self.assertIs(Timestamp('NaT'), pd.NaT)
- self.assertIs(Period('NaT'), pd.NaT)
-
- self.assertIs(Timestamp(None), pd.NaT)
- self.assertIs(Period(None), pd.NaT)
-
- self.assertIs(Timestamp(float('nan')), pd.NaT)
- self.assertIs(Period(float('nan')), pd.NaT)
-
- self.assertIs(Timestamp(np.nan), pd.NaT)
- self.assertIs(Period(np.nan), pd.NaT)
-
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
@@ -854,17 +840,6 @@ def test_properties_secondly(self):
self.assertEqual(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
- def test_properties_nat(self):
- p_nat = Period('NaT', freq='M')
- t_nat = pd.Timestamp('NaT')
- self.assertIs(p_nat, t_nat)
-
- # confirm Period('NaT') work identical with Timestamp('NaT')
- for f in ['year', 'month', 'day', 'hour', 'minute', 'second', 'week',
- 'dayofyear', 'quarter', 'days_in_month']:
- self.assertTrue(np.isnan(getattr(p_nat, f)))
- self.assertTrue(np.isnan(getattr(t_nat, f)))
-
def test_pnow(self):
# deprecation, xref #13790
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index 7c5caa9506ca2..c2b895925b685 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
- to_timedelta, compat, isnull)
+ to_timedelta, compat)
from pandas._libs.tslib import iNaT, NaTType
@@ -151,14 +151,6 @@ def test_construction(self):
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
- # nat
- self.assertEqual(Timedelta('').value, iNaT)
- self.assertEqual(Timedelta('nat').value, iNaT)
- self.assertEqual(Timedelta('NAT').value, iNaT)
- self.assertEqual(Timedelta(None).value, iNaT)
- self.assertEqual(Timedelta(np.nan).value, iNaT)
- self.assertTrue(isnull(Timedelta('nat')))
-
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
@@ -686,11 +678,6 @@ def test_isoformat(self):
expected = 'P0DT0H0M0.001S'
self.assertEqual(result, expected)
- # NaT
- result = Timedelta('NaT').isoformat()
- expected = 'NaT'
- self.assertEqual(result, expected)
-
# don't strip every 0
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index bbf33c4db5ad7..e39375141ad5f 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -7,23 +7,19 @@
from datetime import datetime, timedelta
from distutils.version import LooseVersion
-import pandas as pd
import pandas.util.testing as tm
-
from pandas.tseries import offsets, frequencies
from pandas._libs import tslib, period
-from pandas._libs.tslib import get_timezone, iNaT
+from pandas._libs.tslib import get_timezone
from pandas.compat import lrange, long
from pandas.util.testing import assert_series_equal
from pandas.compat.numpy import np_datetime64_compat
from pandas import (Timestamp, date_range, Period, Timedelta, compat,
- Series, NaT, isnull, DataFrame, DatetimeIndex)
+ Series, NaT, DataFrame, DatetimeIndex)
from pandas.tseries.frequencies import (RESO_DAY, RESO_HR, RESO_MIN, RESO_US,
RESO_MS, RESO_SEC)
-randn = np.random.randn
-
class TestTimestamp(tm.TestCase):
@@ -202,8 +198,6 @@ def test_constructor_positional(self):
repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)),
repr(Timestamp('2015-11-12 01:02:03.999999')))
- self.assertIs(Timestamp(None), pd.NaT)
-
def test_constructor_keyword(self):
# GH 10758
with tm.assertRaises(TypeError):
@@ -235,7 +229,7 @@ def test_constructor_fromordinal(self):
self.assertEqual(base.toordinal(), ts.toordinal())
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
- self.assertEqual(pd.Timestamp('2000-01-01', tz='US/Eastern'), ts)
+ self.assertEqual(Timestamp('2000-01-01', tz='US/Eastern'), ts)
self.assertEqual(base.toordinal(), ts.toordinal())
def test_constructor_offset_depr(self):
@@ -260,7 +254,7 @@ def test_constructor_offset_depr_fromordinal(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = Timestamp.fromordinal(base.toordinal(), offset='D')
- self.assertEqual(pd.Timestamp('2000-01-01'), ts)
+ self.assertEqual(Timestamp('2000-01-01'), ts)
self.assertEqual(ts.freq, 'D')
self.assertEqual(base.toordinal(), ts.toordinal())
@@ -422,12 +416,12 @@ def test_tz_localize_nonexistent(self):
self.assertRaises(NonExistentTimeError, ts.tz_localize,
tz, errors='raise')
self.assertIs(ts.tz_localize(tz, errors='coerce'),
- pd.NaT)
+ NaT)
def test_tz_localize_errors_ambiguous(self):
# See issue 13057
from pytz.exceptions import AmbiguousTimeError
- ts = pd.Timestamp('2015-11-1 01:00')
+ ts = Timestamp('2015-11-1 01:00')
self.assertRaises(AmbiguousTimeError,
ts.tz_localize, 'US/Pacific', errors='coerce')
@@ -576,94 +570,6 @@ def check(value, equal):
for end in ends:
self.assertTrue(getattr(ts, end))
- def test_nat_fields(self):
- # GH 10050
- ts = Timestamp('NaT')
- self.assertTrue(np.isnan(ts.year))
- self.assertTrue(np.isnan(ts.month))
- self.assertTrue(np.isnan(ts.day))
- self.assertTrue(np.isnan(ts.hour))
- self.assertTrue(np.isnan(ts.minute))
- self.assertTrue(np.isnan(ts.second))
- self.assertTrue(np.isnan(ts.microsecond))
- self.assertTrue(np.isnan(ts.nanosecond))
- self.assertTrue(np.isnan(ts.dayofweek))
- self.assertTrue(np.isnan(ts.quarter))
- self.assertTrue(np.isnan(ts.dayofyear))
- self.assertTrue(np.isnan(ts.week))
- self.assertTrue(np.isnan(ts.daysinmonth))
- self.assertTrue(np.isnan(ts.days_in_month))
-
- def test_nat_vector_field_access(self):
- idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
-
- # non boolean fields
- fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute',
- 'second', 'microsecond', 'nanosecond', 'week', 'dayofyear',
- 'days_in_month']
-
- for field in fields:
- result = getattr(idx, field)
- expected = [getattr(x, field) for x in idx]
- self.assert_index_equal(result, pd.Index(expected))
-
- # boolean fields
- fields = ['is_leap_year']
- # other boolean fields like 'is_month_start' and 'is_month_end'
- # not yet supported by NaT
-
- for field in fields:
- result = getattr(idx, field)
- expected = [getattr(x, field) for x in idx]
- self.assert_numpy_array_equal(result, np.array(expected))
-
- s = pd.Series(idx)
-
- for field in fields:
- result = getattr(s.dt, field)
- expected = [getattr(x, field) for x in idx]
- self.assert_series_equal(result, pd.Series(expected))
-
- def test_nat_scalar_field_access(self):
- fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute',
- 'second', 'microsecond', 'nanosecond', 'week', 'dayofyear',
- 'days_in_month', 'daysinmonth', 'dayofweek', 'weekday_name']
- for field in fields:
- result = getattr(NaT, field)
- self.assertTrue(np.isnan(result))
-
- def test_NaT_methods(self):
- # GH 9513
- raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
- 'fromordinal', 'fromtimestamp', 'isocalendar',
- 'strftime', 'strptime', 'time', 'timestamp',
- 'timetuple', 'timetz', 'toordinal', 'tzname',
- 'utcfromtimestamp', 'utcnow', 'utcoffset',
- 'utctimetuple']
- nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
- nan_methods = ['weekday', 'isoweekday']
-
- for method in raise_methods:
- if hasattr(NaT, method):
- self.assertRaises(ValueError, getattr(NaT, method))
-
- for method in nan_methods:
- if hasattr(NaT, method):
- self.assertTrue(np.isnan(getattr(NaT, method)()))
-
- for method in nat_methods:
- if hasattr(NaT, method):
- # see gh-8254
- exp_warning = None
- if method == 'to_datetime':
- exp_warning = FutureWarning
- with tm.assert_produces_warning(
- exp_warning, check_stacklevel=False):
- self.assertIs(getattr(NaT, method)(), NaT)
-
- # GH 12300
- self.assertEqual(NaT.isoformat(), 'NaT')
-
def test_pprint(self):
# GH12622
import pprint
@@ -772,24 +678,40 @@ def test_round(self):
self.assertRaises(ValueError, lambda: dti.round(freq))
# GH 14440 & 15578
- result = pd.Timestamp('2016-10-17 12:00:00.0015').round('ms')
- expected = pd.Timestamp('2016-10-17 12:00:00.002000')
+ result = Timestamp('2016-10-17 12:00:00.0015').round('ms')
+ expected = Timestamp('2016-10-17 12:00:00.002000')
self.assertEqual(result, expected)
- result = pd.Timestamp('2016-10-17 12:00:00.00149').round('ms')
- expected = pd.Timestamp('2016-10-17 12:00:00.001000')
+ result = Timestamp('2016-10-17 12:00:00.00149').round('ms')
+ expected = Timestamp('2016-10-17 12:00:00.001000')
self.assertEqual(result, expected)
- ts = pd.Timestamp('2016-10-17 12:00:00.0015')
+ ts = Timestamp('2016-10-17 12:00:00.0015')
for freq in ['us', 'ns']:
self.assertEqual(ts, ts.round(freq))
- result = pd.Timestamp('2016-10-17 12:00:00.001501031').round('10ns')
- expected = pd.Timestamp('2016-10-17 12:00:00.001501030')
+ result = Timestamp('2016-10-17 12:00:00.001501031').round('10ns')
+ expected = Timestamp('2016-10-17 12:00:00.001501030')
self.assertEqual(result, expected)
with tm.assert_produces_warning():
- pd.Timestamp('2016-10-17 12:00:00.001501031').round('1010ns')
+ Timestamp('2016-10-17 12:00:00.001501031').round('1010ns')
+
+ def test_round_misc(self):
+ stamp = Timestamp('2000-01-05 05:09:15.13')
+
+ def _check_round(freq, expected):
+ result = stamp.round(freq=freq)
+ self.assertEqual(result, expected)
+
+ for freq, expected in [('D', Timestamp('2000-01-05 00:00:00')),
+ ('H', Timestamp('2000-01-05 05:00:00')),
+ ('S', Timestamp('2000-01-05 05:09:15'))]:
+ _check_round(freq, expected)
+
+ msg = frequencies._INVALID_FREQ_ERROR
+ with self.assertRaisesRegexp(ValueError, msg):
+ stamp.round('foo')
def test_class_ops_pytz(self):
tm._skip_if_no_pytz()
@@ -906,48 +828,30 @@ def check(val, unit=None, h=1, s=1, us=0):
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
- # nan
- result = Timestamp(np.nan)
- self.assertIs(result, NaT)
-
- result = Timestamp(None)
- self.assertIs(result, NaT)
-
- result = Timestamp(iNaT)
- self.assertIs(result, NaT)
-
- result = Timestamp(NaT)
- self.assertIs(result, NaT)
-
- result = Timestamp('NaT')
- self.assertIs(result, NaT)
-
- self.assertTrue(isnull(Timestamp('nat')))
-
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
- result = Timestamp(base.value + pd.Timedelta('5ms').value)
+ result = Timestamp(base.value + Timedelta('5ms').value)
self.assertEqual(result, Timestamp(str(base) + ".005000"))
self.assertEqual(result.microsecond, 5000)
- result = Timestamp(base.value + pd.Timedelta('5us').value)
+ result = Timestamp(base.value + Timedelta('5us').value)
self.assertEqual(result, Timestamp(str(base) + ".000005"))
self.assertEqual(result.microsecond, 5)
- result = Timestamp(base.value + pd.Timedelta('5ns').value)
+ result = Timestamp(base.value + Timedelta('5ns').value)
self.assertEqual(result, Timestamp(str(base) + ".000000005"))
self.assertEqual(result.nanosecond, 5)
self.assertEqual(result.microsecond, 0)
- result = Timestamp(base.value + pd.Timedelta('6ms 5us').value)
+ result = Timestamp(base.value + Timedelta('6ms 5us').value)
self.assertEqual(result, Timestamp(str(base) + ".006005"))
self.assertEqual(result.microsecond, 5 + 6 * 1000)
- result = Timestamp(base.value + pd.Timedelta('200ms 5us').value)
+ result = Timestamp(base.value + Timedelta('200ms 5us').value)
self.assertEqual(result, Timestamp(str(base) + ".200005"))
self.assertEqual(result.microsecond, 5 + 200 * 1000)
@@ -1004,9 +908,9 @@ def test_compare_invalid(self):
self.assertTrue(val != np.int64(1))
# ops testing
- df = DataFrame(randn(5, 2))
+ df = DataFrame(np.random.randn(5, 2))
a = df[0]
- b = Series(randn(5))
+ b = Series(np.random.randn(5))
b.name = Timestamp('2000-01-01')
tm.assert_series_equal(a / b, 1 / (b / a))
@@ -1149,8 +1053,8 @@ def test_timestamp_compare_series(self):
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
- s[0] = pd.Timestamp('nat')
- s[3] = pd.Timestamp('nat')
+ s[0] = Timestamp('nat')
+ s[3] = Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
@@ -1194,18 +1098,6 @@ def test_is_leap_year(self):
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
self.assertFalse(dt.is_leap_year)
- self.assertFalse(pd.NaT.is_leap_year)
- self.assertIsInstance(pd.NaT.is_leap_year, bool)
-
- def test_round_nat(self):
- # GH14940
- ts = Timestamp('nat')
- print(dir(ts))
- for method in ["round", "floor", "ceil"]:
- round_method = getattr(ts, method)
- for freq in ["s", "5s", "min", "5min", "h", "5h"]:
- self.assertIs(round_method(freq), ts)
-
class TestTimestampNsOperations(tm.TestCase):
@@ -1293,95 +1185,6 @@ def test_nanosecond_timestamp(self):
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
- def test_nat_arithmetic(self):
- # GH 6873
- i = 2
- f = 1.5
-
- for (left, right) in [(pd.NaT, i), (pd.NaT, f), (pd.NaT, np.nan)]:
- self.assertIs(left / right, pd.NaT)
- self.assertIs(left * right, pd.NaT)
- self.assertIs(right * left, pd.NaT)
- with tm.assertRaises(TypeError):
- right / left
-
- # Timestamp / datetime
- t = Timestamp('2014-01-01')
- dt = datetime(2014, 1, 1)
- for (left, right) in [(pd.NaT, pd.NaT), (pd.NaT, t), (pd.NaT, dt)]:
- # NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
- self.assertIs(right + left, pd.NaT)
- self.assertIs(left + right, pd.NaT)
- self.assertIs(left - right, pd.NaT)
- self.assertIs(right - left, pd.NaT)
-
- # timedelta-like
- # offsets are tested in test_offsets.py
-
- delta = timedelta(3600)
- td = Timedelta('5s')
-
- for (left, right) in [(pd.NaT, delta), (pd.NaT, td)]:
- # NaT + timedelta-like returns NaT
- self.assertIs(right + left, pd.NaT)
- self.assertIs(left + right, pd.NaT)
- self.assertIs(right - left, pd.NaT)
- self.assertIs(left - right, pd.NaT)
-
- # GH 11718
- tm._skip_if_no_pytz()
- import pytz
-
- t_utc = Timestamp('2014-01-01', tz='UTC')
- t_tz = Timestamp('2014-01-01', tz='US/Eastern')
- dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
-
- for (left, right) in [(pd.NaT, t_utc), (pd.NaT, t_tz),
- (pd.NaT, dt_tz)]:
- # NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
- self.assertIs(right + left, pd.NaT)
- self.assertIs(left + right, pd.NaT)
- self.assertIs(left - right, pd.NaT)
- self.assertIs(right - left, pd.NaT)
-
- # int addition / subtraction
- for (left, right) in [(pd.NaT, 2), (pd.NaT, 0), (pd.NaT, -3)]:
- self.assertIs(right + left, pd.NaT)
- self.assertIs(left + right, pd.NaT)
- self.assertIs(left - right, pd.NaT)
- self.assertIs(right - left, pd.NaT)
-
- def test_nat_arithmetic_index(self):
- # GH 11718
-
- # datetime
- tm._skip_if_no_pytz()
-
- dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
- exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
- self.assert_index_equal(dti + pd.NaT, exp)
- self.assert_index_equal(pd.NaT + dti, exp)
-
- dti_tz = pd.DatetimeIndex(['2011-01-01', '2011-01-02'],
- tz='US/Eastern', name='x')
- exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x', tz='US/Eastern')
- self.assert_index_equal(dti_tz + pd.NaT, exp)
- self.assert_index_equal(pd.NaT + dti_tz, exp)
-
- exp = pd.TimedeltaIndex([pd.NaT, pd.NaT], name='x')
- for (left, right) in [(pd.NaT, dti), (pd.NaT, dti_tz)]:
- self.assert_index_equal(left - right, exp)
- self.assert_index_equal(right - left, exp)
-
- # timedelta
- tdi = pd.TimedeltaIndex(['1 day', '2 day'], name='x')
- exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
- for (left, right) in [(pd.NaT, tdi)]:
- self.assert_index_equal(left + right, exp)
- self.assert_index_equal(right + left, exp)
- self.assert_index_equal(left - right, exp)
- self.assert_index_equal(right - left, exp)
-
class TestTimestampOps(tm.TestCase):
@@ -1722,22 +1525,3 @@ def test_to_datetime_bijective(self):
self.assertEqual(
Timestamp(Timestamp.min.to_pydatetime()).value / 1000,
Timestamp.min.value / 1000)
-
-
-class TestTslib(tm.TestCase):
-
- def test_round(self):
- stamp = Timestamp('2000-01-05 05:09:15.13')
-
- def _check_round(freq, expected):
- result = stamp.round(freq=freq)
- self.assertEqual(result, expected)
-
- for freq, expected in [('D', Timestamp('2000-01-05 00:00:00')),
- ('H', Timestamp('2000-01-05 05:00:00')),
- ('S', Timestamp('2000-01-05 05:09:15'))]:
- _check_round(freq, expected)
-
- msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
- with self.assertRaisesRegexp(ValueError, msg):
- stamp.round('foo')
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 4c697c7e52bb8..89f972a33a630 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -8,10 +8,8 @@
from pandas.types.common import is_integer_dtype, is_list_like
from pandas import (Index, Series, DataFrame, bdate_range,
- date_range, period_range, timedelta_range)
-from pandas.tseries.period import PeriodIndex
-from pandas.tseries.index import Timestamp, DatetimeIndex
-from pandas.tseries.tdi import TimedeltaIndex
+ date_range, period_range, timedelta_range,
+ PeriodIndex, Timestamp, DatetimeIndex, TimedeltaIndex)
import pandas.core.common as com
from pandas.util.testing import assert_series_equal
@@ -27,21 +25,13 @@ def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
- ok_for_base = ['year', 'month', 'day', 'hour', 'minute', 'second',
- 'weekofyear', 'week', 'dayofweek', 'weekday',
- 'dayofyear', 'quarter', 'freq', 'days_in_month',
- 'daysinmonth', 'is_leap_year']
- ok_for_period = ok_for_base + ['qyear', 'start_time', 'end_time']
+ ok_for_period = PeriodIndex._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
- ok_for_dt = ok_for_base + ['date', 'time', 'microsecond', 'nanosecond',
- 'is_month_start', 'is_month_end',
- 'is_quarter_start', 'is_quarter_end',
- 'is_year_start', 'is_year_end', 'tz',
- 'weekday_name']
+ ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'weekday_name']
- ok_for_td = ['days', 'seconds', 'microseconds', 'nanoseconds']
+ ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 68db0d19344b9..032e3a186b84a 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -219,7 +219,7 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False):
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not
- # defined we mostly care about Series hwere anyhow
+ # defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 8fd3c6324d48c..b4072d04dfd81 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -17,9 +17,11 @@
import pandas as pd
import pandas.compat as compat
import pandas.util.testing as tm
-from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
- Timestamp, CategoricalIndex, DatetimeIndex,
- isnull, NaT)
+from pandas import (Categorical, Index, Series, DataFrame,
+ Timestamp, CategoricalIndex, isnull,
+ date_range, DatetimeIndex,
+ period_range, PeriodIndex,
+ timedelta_range, TimedeltaIndex, NaT)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
@@ -4299,9 +4301,6 @@ def test_str_accessor_api_for_categorical(self):
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.tseries.common import Properties
- from pandas.tseries.index import date_range, DatetimeIndex
- from pandas.tseries.period import period_range, PeriodIndex
- from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
@@ -4312,10 +4311,14 @@ def test_dt_accessor_api_for_categorical(self):
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
+ # only testing field (like .day)
+ # and bool (is_month_start)
+ get_ops = lambda x: x._datetimelike_ops
+
test_data = [
- ("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
- ("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
- ("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
+ ("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
+ ("Period", get_ops(PeriodIndex), s_pr, c_pr),
+ ("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
@@ -4325,12 +4328,13 @@ def test_dt_accessor_api_for_categorical(self):
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
+ ('asfreq', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
- _ignore_names = ['tz_localize']
+ _ignore_names = ['tz_localize', 'components']
for name, attr_names, s, c in test_data:
func_names = [f
@@ -4352,7 +4356,7 @@ def test_dt_accessor_api_for_categorical(self):
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
- tm.assert_numpy_array_equal(res, exp)
+ tm.assert_almost_equal(res, exp)
for attr in attr_names:
try:
@@ -4367,7 +4371,7 @@ def test_dt_accessor_api_for_categorical(self):
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
- tm.assert_numpy_array_equal(res, exp)
+ tm.assert_almost_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index f9fd27176487c..7940efc7e1b59 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -168,8 +168,7 @@ def to_pydatetime(self):
typ='property')
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
- accessors=["to_period", "tz_localize", "tz_convert",
- "normalize", "strftime", "round", "floor", "ceil"],
+ accessors=DatetimeIndex._datetimelike_methods,
typ='method')
@@ -208,7 +207,7 @@ def components(self):
typ='property')
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
- accessors=["to_pytimedelta", "total_seconds", "round", "floor", "ceil"],
+ accessors=TimedeltaIndex._datetimelike_methods,
typ='method')
@@ -230,9 +229,10 @@ class PeriodProperties(Properties):
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_ops,
typ='property')
-PeriodProperties._add_delegate_accessors(delegate=PeriodIndex,
- accessors=["strftime"],
- typ='method')
+PeriodProperties._add_delegate_accessors(
+ delegate=PeriodIndex,
+ accessors=PeriodIndex._datetimelike_methods,
+ typ='method')
class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 11d2d29597fc0..1992e177556cc 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -64,25 +64,26 @@ def f(self):
if self.tz is not utc:
values = self._local_timestamps()
- # boolean accessors -> return array
- if field in ['is_month_start', 'is_month_end',
- 'is_quarter_start', 'is_quarter_end',
- 'is_year_start', 'is_year_end']:
- month_kw = (self.freq.kwds.get('startingMonth',
- self.freq.kwds.get('month', 12))
- if self.freq else 12)
-
- result = libts.get_start_end_field(values, field, self.freqstr,
- month_kw)
- return self._maybe_mask_results(result, convert='float64')
- elif field in ['is_leap_year']:
- # no need to mask NaT
- return libts.get_date_field(values, field)
-
- # non-boolean accessors -> return Index
- elif field in ['weekday_name']:
+ if field in self._bool_ops:
+ if field in ['is_month_start', 'is_month_end',
+ 'is_quarter_start', 'is_quarter_end',
+ 'is_year_start', 'is_year_end']:
+ month_kw = (self.freq.kwds.get('startingMonth',
+ self.freq.kwds.get('month', 12))
+ if self.freq else 12)
+
+ result = libts.get_start_end_field(values, field, self.freqstr,
+ month_kw)
+ else:
+ result = libts.get_date_field(values, field)
+
+ # these return a boolean by-definition
+ return result
+
+ if field in self._object_ops:
result = libts.get_date_name_field(values, field)
result = self._maybe_mask_results(result)
+
else:
result = libts.get_date_field(values, field)
result = self._maybe_mask_results(result, convert='float64')
@@ -232,14 +233,24 @@ def _join_i8_wrapper(joinf, **kwargs):
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
- _datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
- 'weekofyear', 'week', 'dayofweek', 'weekday',
- 'dayofyear', 'quarter', 'days_in_month',
- 'daysinmonth', 'date', 'time', 'microsecond',
- 'nanosecond', 'is_month_start', 'is_month_end',
- 'is_quarter_start', 'is_quarter_end', 'is_year_start',
- 'is_year_end', 'tz', 'freq', 'weekday_name',
- 'is_leap_year']
+
+ # define my properties & methods for delegation
+ _bool_ops = ['is_month_start', 'is_month_end',
+ 'is_quarter_start', 'is_quarter_end', 'is_year_start',
+ 'is_year_end', 'is_leap_year']
+ _object_ops = ['weekday_name', 'freq', 'tz']
+ _field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
+ 'weekofyear', 'week', 'weekday', 'dayofweek',
+ 'dayofyear', 'quarter', 'days_in_month',
+ 'daysinmonth', 'microsecond',
+ 'nanosecond']
+ _other_ops = ['date', 'time']
+ _datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
+ _datetimelike_methods = ['to_period', 'tz_localize',
+ 'tz_convert',
+ 'normalize', 'strftime', 'round', 'floor',
+ 'ceil']
+
_is_numeric_dtype = False
_infer_as_myclass = True
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index c279d5a9342e8..1e1496bbe9c27 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -174,12 +174,18 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
- _datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
- 'weekofyear', 'week', 'dayofweek', 'weekday',
- 'dayofyear', 'quarter', 'qyear', 'freq',
- 'days_in_month', 'daysinmonth',
- 'to_timestamp', 'asfreq', 'start_time', 'end_time',
- 'is_leap_year']
+
+ # define my properties & methods for delegation
+ _other_ops = []
+ _bool_ops = ['is_leap_year']
+ _object_ops = ['start_time', 'end_time', 'freq']
+ _field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
+ 'weekofyear', 'weekday', 'week', 'dayofweek',
+ 'dayofyear', 'quarter', 'qyear',
+ 'days_in_month', 'daysinmonth']
+ _datetimelike_ops = _field_ops + _object_ops + _bool_ops
+ _datetimelike_methods = ['strftime', 'to_timestamp', 'asfreq']
+
_is_numeric_dtype = False
_infer_as_myclass = True
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index 55333890640c1..5d062dd38f9fc 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -127,8 +127,15 @@ def _join_i8_wrapper(joinf, **kwargs):
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
- _datetimelike_ops = ['days', 'seconds', 'microseconds', 'nanoseconds',
- 'freq', 'components']
+
+ # define my properties & methods for delegation
+ _other_ops = []
+ _bool_ops = []
+ _object_ops = ['freq']
+ _field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
+ _datetimelike_ops = _field_ops + _object_ops + _bool_ops
+ _datetimelike_methods = ["to_pytimedelta", "total_seconds",
+ "round", "floor", "ceil"]
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
| closes #15781
PR
```
In [1]: idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
In [2]: idx.is_month_start
Out[2]: array([ True, False, False, False], dtype=bool)
In [3]: idx.is_leap_year
Out[3]: array([ True, False, False, True], dtype=bool)
```
0.19.2
```
In [1]: idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
In [3]: idx.is_month_start
Out[3]: array([ 1., nan, nan, 0.])
In [2]: idx.is_leap_year
Out[2]: array([ True, False, False, True], dtype=bool)
```
mimics ``is_leap_year`` to return a boolean (inlcuding for ``NaT``). | https://api.github.com/repos/pandas-dev/pandas/pulls/15782 | 2017-03-22T14:47:23Z | 2017-03-27T19:22:23Z | 2017-03-27T19:22:23Z | 2017-03-27T19:23:35Z |
travis deduping on prs | diff --git a/.travis.yml b/.travis.yml
index 67b37f1d58931..270f8c2fc76c3 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -177,15 +177,14 @@ matrix:
- USE_CACHE=true
before_install:
+ - echo "Checking to see if this build is outdated"
+ - ci/travis_fast_finish.py || { echo "Failing outdated build to end it."; exit 1; }
- echo "before_install"
- source ci/travis_process_gbq_encryption.sh
- - echo $VIRTUAL_ENV
- export PATH="$HOME/miniconda3/bin:$PATH"
- df -h
- - date
- pwd
- uname -a
- - python -V
- git --version
- git tag
- ci/before_install_travis.sh
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index c940083f5ae9e..66633c0592748 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -1,18 +1,6 @@
#!/bin/bash
-# There are 2 distinct pieces that get zipped and cached
-# - The venv site-packages dir including the installed dependencies
-# - The pandas build artifacts, using the build cache support via
-# scripts/use_build_cache.py
-#
-# if the user opted in to use the cache and we're on a whitelisted fork
-# - if the server doesn't hold a cached version of venv/pandas build,
-# do things the slow way, and put the results on the cache server
-# for the next time.
-# - if the cache files are available, instal some necessaries via apt
-# (no compiling needed), then directly goto script and collect 200$.
-#
-
+# edit the locale file if needed
function edit_init()
{
if [ -n "$LOCALE_OVERRIDE" ]; then
@@ -26,15 +14,18 @@ function edit_init()
fi
}
+echo
echo "[install_travis]"
edit_init
home_dir=$(pwd)
-echo "[home_dir: $home_dir]"
+echo
+echo "[home_dir]: $home_dir"
# install miniconda
MINICONDA_DIR="$HOME/miniconda3"
+echo
echo "[Using clean Miniconda install]"
if [ -d "$MINICONDA_DIR" ]; then
@@ -49,14 +40,17 @@ else
fi
time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1
+echo
echo "[show conda]"
which conda
+echo
echo "[update conda]"
conda config --set ssl_verify false || exit 1
conda config --set always_yes true --set changeps1 false || exit 1
conda update -q conda
+echo
echo "[add channels]"
# add the pandas channel to take priority
# to add extra packages
@@ -73,26 +67,28 @@ fi
conda info -a || exit 1
# set the compiler cache to work
+echo
if [ "$USE_CACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then
echo "[Using ccache]"
export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH
gcc=$(which gcc)
- echo "[gcc: $gcc]"
+ echo "[gcc]: $gcc"
ccache=$(which ccache)
- echo "[ccache: $ccache]"
+ echo "[ccache]: $ccache"
export CC='ccache gcc'
elif [ "$USE_CACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then
echo "[Using ccache]"
time brew install ccache
export PATH=/usr/local/opt/ccache/libexec:$PATH
gcc=$(which gcc)
- echo "[gcc: $gcc]"
+ echo "[gcc]: $gcc"
ccache=$(which ccache)
- echo "[ccache: $ccache]"
+ echo "[ccache]: $ccache"
else
echo "[Not using ccache]"
fi
+echo
echo "[create env]"
# may have installation instructions for this build
@@ -106,6 +102,7 @@ else
fi
# build deps
+echo
echo "[build installs]"
REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build"
if [ -e ${REQ} ]; then
@@ -113,6 +110,7 @@ if [ -e ${REQ} ]; then
fi
# may have addtl installation instructions for this build
+echo
echo "[build addtl installs]"
REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.build.sh"
if [ -e ${REQ} ]; then
@@ -132,6 +130,7 @@ if [ "$COVERAGE" ]; then
pip install coverage pytest-cov
fi
+echo
if [ "$BUILD_TEST" ]; then
# build & install testing
@@ -151,6 +150,7 @@ else
fi
# we may have run installations
+echo
echo "[conda installs]"
REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.run"
if [ -e ${REQ} ]; then
@@ -158,6 +158,7 @@ if [ -e ${REQ} ]; then
fi
# we may have additional pip installs
+echo
echo "[pip installs]"
REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.pip"
if [ -e ${REQ} ]; then
@@ -165,6 +166,7 @@ if [ -e ${REQ} ]; then
fi
# may have addtl installation instructions for this build
+echo
echo "[addtl installs]"
REQ="ci/requirements-${PYTHON_VERSION}${JOB_TAG}.sh"
if [ -e ${REQ} ]; then
@@ -176,14 +178,17 @@ if [ -z "$BUILD_TEST" ]; then
# remove any installed pandas package
# w/o removing anything else
+ echo
echo "[removing installed pandas]"
conda remove pandas --force
# install our pandas
+ echo
echo "[running setup.py develop]"
python setup.py develop || exit 1
fi
+echo
echo "[done]"
exit 0
diff --git a/ci/travis_fast_finish.py b/ci/travis_fast_finish.py
new file mode 100755
index 0000000000000..c2e2a9159918b
--- /dev/null
+++ b/ci/travis_fast_finish.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# script to cancel previous travis builds for the same PR
+# originally from
+# https://github.com/conda-forge/staged-recipes/pull/2257
+
+try:
+ from future_builtins import (
+ map,
+ filter,
+ )
+except ImportError:
+ pass
+
+import codecs
+import contextlib
+import json
+import os
+
+try:
+ from urllib.request import (
+ Request,
+ urlopen,
+ )
+except ImportError:
+ from urllib2 import (
+ Request,
+ urlopen,
+ )
+
+
+def check_latest_pr_build(repo, pr, build_num):
+ # Not a PR so it is latest.
+ if pr is None:
+ return True
+
+ headers = {
+ "Accept": "application/vnd.travis-ci.2+json",
+ }
+ url = "https://api.travis-ci.org/repos/{repo}/builds?event_type=pull_request"
+
+ request = Request(url.format(repo=repo), headers=headers)
+ with contextlib.closing(urlopen(request)) as response:
+ reader = codecs.getreader("utf-8")
+ data = json.load(reader(response))
+
+ # Parse the response to get a list of build numbers for this PR.
+ builds = data["builds"]
+ pr_builds = filter(lambda b: b["pull_request_number"] == pr, builds)
+ pr_build_nums = sorted(map(lambda b: int(b["number"]), pr_builds))
+
+ print("build_num: {}".format(build_num))
+ print("pr_build_nums: {}".format(','.join([str(n) for n in pr_build_nums])))
+
+ # Check if our build number is the latest (largest)
+ # out of all of the builds for this PR.
+ if build_num < max(pr_build_nums):
+ return False
+ else:
+ return True
+
+
+def main():
+ repo = os.environ["TRAVIS_REPO_SLUG"]
+
+ pr = os.environ["TRAVIS_PULL_REQUEST"]
+ pr = None if pr == "false" else int(pr)
+ build_num = int(os.environ["TRAVIS_BUILD_NUMBER"])
+
+ print("checking for fast_finish: {}-{}-{}".format(repo, pr, build_num))
+
+ return int(check_latest_pr_build(repo, pr, build_num) is False)
+
+
+if __name__ == "__main__":
+ import sys
+ sys.exit(main())
| closes #12438 | https://api.github.com/repos/pandas-dev/pandas/pulls/15780 | 2017-03-22T12:52:37Z | 2017-03-22T13:58:05Z | null | 2017-03-22T15:38:39Z |
Fix scalar iloc | diff --git a/doc/source/whatsnew/v0.20.2.txt b/doc/source/whatsnew/v0.20.2.txt
index 03579dab0d6a3..743307113fa27 100644
--- a/doc/source/whatsnew/v0.20.2.txt
+++ b/doc/source/whatsnew/v0.20.2.txt
@@ -46,7 +46,7 @@ Indexing
^^^^^^^^
- Bug in ``DataFrame.reset_index(level=)`` with single level index (:issue:`16263`)
-
+- Bug in ``DataFrame.iloc`` with duplicate labels (:issue:`15686`)
I/O
^^^
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 19d3792f73de7..62ab0ea54c28d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1026,3 +1026,24 @@ def find_common_type(types):
return np.object
return np.find_common_type(types, [])
+
+
+def _maybe_convert_indexer(indexer, until):
+ """
+ Convert slice, tuple, list or scalar "indexer" to 1-d array of indices,
+ using "until" as maximum for upwards open slices.
+ """
+
+ if is_scalar(indexer):
+ return np.array([indexer], dtype=int)
+
+ if isinstance(indexer, np.ndarray):
+ if indexer.dtype == bool:
+ return np.where(indexer)[0]
+ return indexer
+
+ if isinstance(indexer, slice):
+ stop = until if indexer.stop is None else indexer.stop
+ return np.arange(stop, dtype=int)[indexer]
+
+ return np.array(indexer, dtype=int)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
old mode 100755
new mode 100644
index a01e3dc46dfe9..5dc10d232c479
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -16,6 +16,7 @@
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isnull, _infer_fill_value
+from pandas.core.dtypes.cast import _maybe_convert_indexer
from pandas.core.index import Index, MultiIndex
@@ -81,6 +82,24 @@ def __getitem__(self, arg):
IndexSlice = _IndexSlice()
+class InfoCleaner:
+ """
+ A context manager which temporarily removes labels on the "info" axis,
+ replacing them with a RangeIndex, and then puts them back in place.
+ Used to unambiguously index by position.
+ """
+ def __init__(self, obj):
+ self._obj = obj
+ self._info_axis = self._obj._AXIS_NAMES[self._obj._info_axis_number]
+
+ def __enter__(self):
+ self._old_col = getattr(self._obj, self._info_axis)
+ setattr(self._obj, self._info_axis, range(len(self._old_col)))
+
+ def __exit__(self, *args):
+ setattr(self._obj, self._info_axis, self._old_col)
+
+
class IndexingError(Exception):
pass
@@ -492,29 +511,10 @@ def _setitem_with_indexer(self, indexer, value):
else:
lplane_indexer = 0
- def setter(item, v):
- s = self.obj[item]
- pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
-
- # perform the equivalent of a setitem on the info axis
- # as we have a null slice or a slice with full bounds
- # which means essentially reassign to the columns of a
- # multi-dim object
- # GH6149 (null slice), GH10408 (full bounds)
- if (isinstance(pi, tuple) and
- all(is_null_slice(idx) or
- is_full_slice(idx, len(self.obj))
- for idx in pi)):
- s = v
- else:
- # set the item, possibly having a dtype change
- s._consolidate_inplace()
- s = s.copy()
- s._data = s._data.setitem(indexer=pi, value=v)
- s._maybe_update_cacher(clear=True)
-
- # reset the sliced object if unique
- self.obj[item] = s
+ setter_kwargs = {'items': labels,
+ 'indexer': indexer,
+ 'pi': plane_indexer[0] if lplane_indexer == 1
+ else plane_indexer}
def can_do_equal_len():
""" return True if we have an equal len settable """
@@ -542,7 +542,7 @@ def can_do_equal_len():
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
- for item in labels:
+ for idx, item in enumerate(labels):
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
@@ -551,7 +551,7 @@ def can_do_equal_len():
else:
v = np.nan
- setter(item, v)
+ self._setter(idx, v, force_loc=True, **setter_kwargs)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
@@ -563,14 +563,15 @@ def can_do_equal_len():
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
- for i, item in enumerate(labels):
+ for i in range(len(labels)):
# setting with a list, recoerces
- setter(item, value[:, i].tolist())
+ self._setter(i, value[:, i].tolist(), force_loc=True,
+ **setter_kwargs)
# we have an equal len list/ndarray
elif can_do_equal_len():
- setter(labels[0], value)
+ self._setter(0, value, **setter_kwargs)
# per label values
else:
@@ -579,13 +580,12 @@ def can_do_equal_len():
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
- for item, v in zip(labels, value):
- setter(item, v)
+ for i, v in zip(range(len(labels)), value):
+ self._setter(i, v, **setter_kwargs)
else:
-
# scalar
- for item in labels:
- setter(item, value)
+ for idx in range(len(labels)):
+ self._setter(idx, value, **setter_kwargs)
else:
if isinstance(indexer, tuple):
@@ -619,6 +619,47 @@ def can_do_equal_len():
value=value)
self.obj._maybe_update_cacher(clear=True)
+ def _setter(self, idx, v, items, pi, **kwargs):
+ """
+ Set a single value on the underlying object. Label-based.
+
+ Parameters
+ ----------
+ idx : int
+ The index of the desired element inside "items"
+
+ v : any
+ The value to assign to the specified location
+
+ items: list
+ A list of labels
+
+ pi: tuple or list-like
+ Components of original indexer preceding the info axis
+ """
+ item = items[idx]
+ s = self.obj[item]
+
+ # perform the equivalent of a setitem on the info axis
+ # as we have a null slice or a slice with full bounds
+ # which means essentially reassign to the columns of a
+ # multi-dim object
+ # GH6149 (null slice), GH10408 (full bounds)
+ if (isinstance(pi, tuple) and
+ all(is_null_slice(ix) or
+ is_full_slice(ix, len(self.obj))
+ for ix in pi)):
+ s = v
+ else:
+ # set the item, possibly having a dtype change
+ s._consolidate_inplace()
+ s = s.copy()
+ s._data = s._data.setitem(indexer=pi, value=v)
+ s._maybe_update_cacher(clear=True)
+
+ # reset the sliced object if unique
+ self.obj[item] = s
+
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
@@ -1766,6 +1807,37 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False):
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
+ def _setter(self, idx, v, indexer, force_loc=False, **kwargs):
+ """
+ Set a single value on the underlying object. Position-based by default.
+
+ Parameters
+ ----------
+ idx : int
+ The index of the desired element
+
+ v : any
+ The value to assign to the specified location
+
+ indexer: list
+ The original indexer
+
+ force_loc: bool
+ If True, use location-based indexing.
+
+ Other keyword arguments are forwarded to _NDFrameIndexer._setter()
+ """
+
+ if force_loc:
+ super(_iLocIndexer, self)._setter(idx, v, **kwargs)
+ else:
+ info_axis = self.obj._info_axis_number
+ max_idx = len(self.obj._get_axis(info_axis))
+ kwargs['items'] = _maybe_convert_indexer(indexer[info_axis],
+ max_idx)
+ with InfoCleaner(self.obj):
+ super(_iLocIndexer, self)._setter(idx, v, **kwargs)
+
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index af4b9e1f0cc25..2a7cf3e285d44 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -288,15 +288,33 @@ def test_iloc_setitem_dups(self):
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
- df = concat([df1, df2], axis=1)
+ df_orig = concat([df1, df2], axis=1)
+ df = df_orig.copy()
+ # GH 15686
+ # iloc with mask, duplicated index and multiple blocks
expected = df.fillna(3)
- expected['A'] = expected['A'].astype('float64')
+ expected.iloc[:, 0] = expected.iloc[:, 0].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
+ # GH 15686
+ # iloc with scalar, duplicated index and multiple blocks
+ df = df_orig.copy()
+ expected = df.fillna(15)
+ df.iloc[0, 0] = 15
+ tm.assert_frame_equal(df, expected)
+
+ # GH 15686
+ # iloc with repeated value, duplicated index and multiple blocks
+ df = df_orig.copy()
+ expected = concat([DataFrame([{'A': 15, 'B': 1}, {'A': 15, 'B': 2}]),
+ df2], axis=1)
+ df.iloc[:, 0] = 15
+ tm.assert_frame_equal(df, expected)
+
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
@@ -314,6 +332,17 @@ def test_iloc_setitem_dups(self):
drop=True)
tm.assert_frame_equal(df, expected)
+ @pytest.mark.xfail(reason="BlockManager.setitem() broken")
+ def test_iloc_setitem_dups_mixed_df(self):
+ # GH 12991
+ df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
+ df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
+ df = concat([df1, df2], axis=1)
+
+ expected = df.fillna(15)
+ df.iloc[0, 0] = 15
+ tm.assert_frame_equal(df, expected)
+
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
| - [x] closes #15686
- [x] tests added / passed
- [x] passes ``git diff master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry
Unless I'm missing anything, all code for indexers (inside ``pandas/core/indexing.py``) resorts to one of the following for setting values:
- if we can work directly on ``BlockManager.setitem`` (simple case, e.g. no multiple dtypes involved), do so
- otherwise, rely on methods provided by the object (mainly ``__setitem__``), _which are label based_
This makes it impossible, in the "non-simple case", to index positionally in the correct way with duplicated labels (by the way, it's also plain ugly).
I can see three (four) possible solutions:
1. temporarily strip an object from (some of) its labels - what this PR does. Arguably the simplest approach (doesn't require changing objects/``BlockManager`` code), arguably not very elegant (although we could already expect some efficiency gains compared to current master, since this removes some equality checks on labels)
2. add methods to the different objects that do position-based setting. Not trivial as it sounds, I'm afraid - such methods should talk directly the the ``BlockManager``s, hence for instance taking care of dtype changes
3. seriously improving ``BlockManager.setitem`` so that it is able to take care of multiple dtypes. Probably the best approach (would allow to enormously simplify indexing code), and the one requiring most work
4. (completely rewrite all setting code in ``indexing.py`` currently relying on ``self.obj.__setitem__`` so it directly talks to ``BlockManager.setitem``)
Notice that ``BlockManager.setitem`` must be fixed in order to fix #12991, but the fix for that bug is significantly simpler than 3. (it is just a matter of shifting indices, not of working with different dtypes).
The current PR... works, and does clean a bit the ``_setter`` interface (i.e. concerning the difference between ``iloc`` and ``loc``), which might be a good thing even in case we later decide to drop the less elegant ``InfoCleaner``. My preferred approach would be to apply it, then start working on 3., then come back to the indexing code and simplify it by profiting of 3. But if you prefer to start directly with 2. or 3., I can have a try in that direction. I don't think I'm willing to try 4. because current indexing code is already too complicated. | https://api.github.com/repos/pandas-dev/pandas/pulls/15778 | 2017-03-22T10:22:51Z | 2017-08-01T22:56:27Z | null | 2017-08-07T22:26:15Z |
COMPAT: 32-bit skips | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index b1e6bd7520c69..e9122f7a17359 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -121,7 +121,6 @@ def test_reindex_base(self):
idx.get_indexer(idx, method='invalid')
def test_ndarray_compat_properties(self):
-
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 1739211982b10..4fbadfca06ede 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -1,3 +1,5 @@
+import pytest
+
import numpy as np
from numpy.random import randn
from datetime import timedelta
@@ -6,7 +8,7 @@
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, notnull, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
- offsets)
+ offsets, compat)
from ..datetimelike import DatetimeLike
@@ -626,6 +628,11 @@ def test_shift_nat(self):
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
+ def test_ndarray_compat_properties(self):
+ if compat.is_platform_32bit():
+ pytest.skip("skipping on 32bit")
+ super(TestPeriodIndex, self).test_ndarray_compat_properties()
+
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 7a3cc3e2c3cd7..ce925f756edb7 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -648,7 +648,9 @@ def test_value_counts_uint64(self):
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
- tm.assert_series_equal(result, expected)
+ # 32-bit linux has a different ordering
+ if not compat.is_platform_32bit():
+ tm.assert_series_equal(result, expected)
class TestDuplicated(tm.TestCase):
| closes #14183
| https://api.github.com/repos/pandas-dev/pandas/pulls/15776 | 2017-03-22T00:33:54Z | 2017-03-22T11:53:46Z | 2017-03-22T11:53:46Z | 2017-03-22T11:54:43Z |
CLN: separate out groupby algos | diff --git a/pandas/_libs/algos.pxd b/pandas/_libs/algos.pxd
new file mode 100644
index 0000000000000..6d80e6f0073eb
--- /dev/null
+++ b/pandas/_libs/algos.pxd
@@ -0,0 +1,13 @@
+from util cimport numeric
+from numpy cimport float64_t, double_t
+
+cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k) nogil
+
+cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil:
+ cdef numeric t
+
+ # cython doesn't allow pointer dereference so use array syntax
+ t = a[0]
+ a[0] = b[0]
+ b[0] = t
+ return 0
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 7d3ce3280ec1e..897a60e0c2f21 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -96,22 +96,94 @@ class NegInfinity(object):
__ge__ = lambda self, other: self is other
-cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil except -1:
- cdef numeric t
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def is_lexsorted(list list_of_arrays):
+ cdef:
+ int i
+ Py_ssize_t n, nlevels
+ int64_t k, cur, pre
+ ndarray arr
+ bint result = True
+
+ nlevels = len(list_of_arrays)
+ n = len(list_of_arrays[0])
+
+ cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*))
+ for i in range(nlevels):
+ arr = list_of_arrays[i]
+ vecs[i] = <int64_t*> arr.data
+
+ # Assume uniqueness??
+ with nogil:
+ for i in range(n):
+ for k in range(nlevels):
+ cur = vecs[k][i]
+ pre = vecs[k][i -1]
+ if cur == pre:
+ continue
+ elif cur > pre:
+ break
+ else:
+ result = False
+ break
+ free(vecs)
+ return result
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
+ """
+ compute a 1-d indexer that is an ordering of the passed index,
+ ordered by the groups. This is a reverse of the label
+ factorization process.
+
+ Parameters
+ ----------
+ index: int64 ndarray
+ mappings from group -> position
+ ngroups: int64
+ number of groups
+
+ return a tuple of (1-d indexer ordered by groups, group counts)
+ """
+
+ cdef:
+ Py_ssize_t i, loc, label, n
+ ndarray[int64_t] counts, where, result
+
+ counts = np.zeros(ngroups + 1, dtype=np.int64)
+ n = len(index)
+ result = np.zeros(n, dtype=np.int64)
+ where = np.zeros(ngroups + 1, dtype=np.int64)
+
+ with nogil:
+
+ # count group sizes, location 0 for NA
+ for i in range(n):
+ counts[index[i] + 1] += 1
- # cython doesn't allow pointer dereference so use array syntax
- t = a[0]
- a[0] = b[0]
- b[0] = t
- return 0
+ # mark the start of each contiguous group of like-indexed data
+ for i in range(1, ngroups + 1):
+ where[i] = where[i - 1] + counts[i - 1]
+
+ # this is our indexer
+ for i in range(n):
+ label = index[i] + 1
+ result[where[label]] = i
+ where[label] += 1
+
+ return result, counts
@cython.boundscheck(False)
@cython.wraparound(False)
-cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k):
+cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k) nogil:
cdef:
- Py_ssize_t i, j, l, m, n = a.size
+ Py_ssize_t i, j, l, m, n = a.shape[0]
numeric x
+
with nogil:
l = 0
m = n - 1
@@ -132,32 +204,6 @@ cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k):
if j < k: l = i
if k < i: m = j
- return a[k]
-
-
-cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n):
- cdef:
- Py_ssize_t i, j, l, m
- double_t x, t
-
- l = 0
- m = n -1
- while (l<m):
- x = a[k]
- i = l
- j = m
-
- while 1:
- while a[i] < x: i += 1
- while x < a[j]: j -= 1
- if i <= j:
- swap(&a[i], &a[j])
- i += 1; j -= 1
-
- if i > j: break
-
- if j < k: l = i
- if k < i: m = j
return a[k]
@@ -181,6 +227,8 @@ cpdef numeric median(numeric[:] arr):
# -------------- Min, Max subsequence
+@cython.boundscheck(False)
+@cython.wraparound(False)
def max_subseq(ndarray[double_t] arr):
cdef:
Py_ssize_t i=0, s=0, e=0, T, n
@@ -195,21 +243,24 @@ def max_subseq(ndarray[double_t] arr):
S = m
T = 0
- for i in range(1, n):
- # S = max { S + A[i], A[i] )
- if (S > 0):
- S = S + arr[i]
- else:
- S = arr[i]
- T = i
- if S > m:
- s = T
- e = i
- m = S
+ with nogil:
+ for i in range(1, n):
+ # S = max { S + A[i], A[i] )
+ if (S > 0):
+ S = S + arr[i]
+ else:
+ S = arr[i]
+ T = i
+ if S > m:
+ s = T
+ e = i
+ m = S
return (s, e, m)
+@cython.boundscheck(False)
+@cython.wraparound(False)
def min_subseq(ndarray[double_t] arr):
cdef:
Py_ssize_t s, e
@@ -225,9 +276,10 @@ def min_subseq(ndarray[double_t] arr):
@cython.boundscheck(False)
@cython.wraparound(False)
-def nancorr(ndarray[float64_t, ndim=2] mat, cov=False, minp=None):
+def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None):
cdef:
Py_ssize_t i, j, xi, yi, N, K
+ bint minpv
ndarray[float64_t, ndim=2] result
ndarray[uint8_t, ndim=2] mask
int64_t nobs = 0
@@ -236,46 +288,49 @@ def nancorr(ndarray[float64_t, ndim=2] mat, cov=False, minp=None):
N, K = (<object> mat).shape
if minp is None:
- minp = 1
+ minpv = 1
+ else:
+ minpv = <int>minp
result = np.empty((K, K), dtype=np.float64)
mask = np.isfinite(mat).view(np.uint8)
- for xi in range(K):
- for yi in range(xi + 1):
- nobs = sumxx = sumyy = sumx = sumy = 0
- for i in range(N):
- if mask[i, xi] and mask[i, yi]:
- vx = mat[i, xi]
- vy = mat[i, yi]
- nobs += 1
- sumx += vx
- sumy += vy
-
- if nobs < minp:
- result[xi, yi] = result[yi, xi] = np.NaN
- else:
- meanx = sumx / nobs
- meany = sumy / nobs
-
- # now the cov numerator
- sumx = 0
-
+ with nogil:
+ for xi in range(K):
+ for yi in range(xi + 1):
+ nobs = sumxx = sumyy = sumx = sumy = 0
for i in range(N):
if mask[i, xi] and mask[i, yi]:
- vx = mat[i, xi] - meanx
- vy = mat[i, yi] - meany
+ vx = mat[i, xi]
+ vy = mat[i, yi]
+ nobs += 1
+ sumx += vx
+ sumy += vy
+
+ if nobs < minpv:
+ result[xi, yi] = result[yi, xi] = NaN
+ else:
+ meanx = sumx / nobs
+ meany = sumy / nobs
- sumx += vx * vy
- sumxx += vx * vx
- sumyy += vy * vy
+ # now the cov numerator
+ sumx = 0
- divisor = (nobs - 1.0) if cov else sqrt(sumxx * sumyy)
+ for i in range(N):
+ if mask[i, xi] and mask[i, yi]:
+ vx = mat[i, xi] - meanx
+ vy = mat[i, yi] - meany
- if divisor != 0:
- result[xi, yi] = result[yi, xi] = sumx / divisor
- else:
- result[xi, yi] = result[yi, xi] = np.NaN
+ sumx += vx * vy
+ sumxx += vx * vx
+ sumyy += vy * vy
+
+ divisor = (nobs - 1.0) if cov else sqrt(sumxx * sumyy)
+
+ if divisor != 0:
+ result[xi, yi] = result[yi, xi] = sumx / divisor
+ else:
+ result[xi, yi] = result[yi, xi] = NaN
return result
@@ -308,7 +363,7 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
nobs += 1
if nobs < minp:
- result[xi, yi] = result[yi, xi] = np.NaN
+ result[xi, yi] = result[yi, xi] = NaN
else:
maskedx = np.empty(nobs, dtype=np.float64)
maskedy = np.empty(nobs, dtype=np.float64)
@@ -339,326 +394,11 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
if divisor != 0:
result[xi, yi] = result[yi, xi] = sumx / divisor
else:
- result[xi, yi] = result[yi, xi] = np.NaN
+ result[xi, yi] = result[yi, xi] = NaN
return result
-#----------------------------------------------------------------------
-# group operations
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def is_lexsorted(list list_of_arrays):
- cdef:
- int i
- Py_ssize_t n, nlevels
- int64_t k, cur, pre
- ndarray arr
-
- nlevels = len(list_of_arrays)
- n = len(list_of_arrays[0])
-
- cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*))
- for i from 0 <= i < nlevels:
- arr = list_of_arrays[i]
- vecs[i] = <int64_t*> arr.data
-
- # Assume uniqueness??
- for i from 1 <= i < n:
- for k from 0 <= k < nlevels:
- cur = vecs[k][i]
- pre = vecs[k][i -1]
- if cur == pre:
- continue
- elif cur > pre:
- break
- else:
- return False
- free(vecs)
- return True
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups):
- """
- compute a 1-d indexer that is an ordering of the passed index,
- ordered by the groups. This is a reverse of the label
- factorization process.
-
- Parameters
- ----------
- index: int64 ndarray
- mappings from group -> position
- ngroups: int64
- number of groups
-
- return a tuple of (1-d indexer ordered by groups, group counts)
- """
-
- cdef:
- Py_ssize_t i, loc, label, n
- ndarray[int64_t] counts, where, result
-
- counts = np.zeros(ngroups + 1, dtype=np.int64)
- n = len(index)
- result = np.zeros(n, dtype=np.int64)
- where = np.zeros(ngroups + 1, dtype=np.int64)
-
- with nogil:
-
- # count group sizes, location 0 for NA
- for i from 0 <= i < n:
- counts[index[i] + 1] += 1
-
- # mark the start of each contiguous group of like-indexed data
- for i from 1 <= i < ngroups + 1:
- where[i] = where[i - 1] + counts[i - 1]
-
- # this is our indexer
- for i from 0 <= i < n:
- label = index[i] + 1
- result[where[label]] = i
- where[label] += 1
-
- return result, counts
-
-# TODO: aggregate multiple columns in single pass
-#----------------------------------------------------------------------
-# first, nth, last
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_nth_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] labels,
- int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab
- object val
- float64_t count
- ndarray[int64_t, ndim=2] nobs
- ndarray[object, ndim=2] resx
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- N, K = (<object> values).shape
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
-
- for i in range(len(counts)):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = <object> nan
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_nth_bin_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- object val
- float64_t count
- ndarray[object, ndim=2] resx
- ndarray[float64_t, ndim=2] nobs
-
- nobs = np.zeros((<object> out).shape, dtype=np.float64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = nan
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_last_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] labels):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, lab
- object val
- float64_t count
- ndarray[object, ndim=2] resx
- ndarray[int64_t, ndim=2] nobs
-
- nobs = np.zeros((<object> out).shape, dtype=np.int64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- N, K = (<object> values).shape
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- resx[lab, j] = val
-
- for i in range(len(counts)):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = nan
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_last_bin_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] bins):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- object val
- float64_t count
- ndarray[object, ndim=2] resx
- ndarray[float64_t, ndim=2] nobs
-
- nobs = np.zeros((<object> out).shape, dtype=np.float64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = nan
- else:
- out[i, j] = resx[i, j]
-
-cdef inline float64_t _median_linear(float64_t* a, int n):
- cdef int i, j, na_count = 0
- cdef float64_t result
- cdef float64_t* tmp
-
- if n == 0:
- return NaN
-
- # count NAs
- for i in range(n):
- if a[i] != a[i]:
- na_count += 1
-
- if na_count:
- if na_count == n:
- return NaN
-
- tmp = <float64_t*> malloc((n - na_count) * sizeof(float64_t))
-
- j = 0
- for i in range(n):
- if a[i] == a[i]:
- tmp[j] = a[i]
- j += 1
-
- a = tmp
- n -= na_count
-
- if n % 2:
- result = kth_smallest_c( a, n / 2, n)
- else:
- result = (kth_smallest_c(a, n / 2, n) +
- kth_smallest_c(a, n / 2 - 1, n)) / 2
-
- if na_count:
- free(a)
-
- return result
-
-
# generated from template
include "algos_common_helper.pxi"
-include "algos_groupby_helper.pxi"
include "algos_rank_helper.pxi"
include "algos_take_helper.pxi"
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
new file mode 100644
index 0000000000000..c6ff602cfef1c
--- /dev/null
+++ b/pandas/_libs/groupby.pyx
@@ -0,0 +1,291 @@
+# cython: profile=False
+
+from numpy cimport *
+cimport numpy as np
+import numpy as np
+
+cimport cython
+
+import_array()
+
+cimport util
+
+from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
+ uint32_t, uint64_t, float16_t, float32_t, float64_t)
+
+from libc.stdlib cimport malloc, free
+
+from util cimport numeric, get_nat
+from algos cimport swap
+from algos import take_2d_axis1_float64_float64, groupsort_indexer
+
+cdef int64_t iNaT = get_nat()
+
+cdef double NaN = <double> np.NaN
+cdef double nan = NaN
+
+
+# TODO: aggregate multiple columns in single pass
+#----------------------------------------------------------------------
+# first, nth, last
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_nth_object(ndarray[object, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[object, ndim=2] values,
+ ndarray[int64_t] labels,
+ int64_t rank):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, lab
+ object val
+ float64_t count
+ ndarray[int64_t, ndim=2] nobs
+ ndarray[object, ndim=2] resx
+
+ nobs = np.zeros((<object> out).shape, dtype=np.int64)
+ resx = np.empty((<object> out).shape, dtype=object)
+
+ N, K = (<object> values).shape
+
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[lab, j] += 1
+ if nobs[lab, j] == rank:
+ resx[lab, j] = val
+
+ for i in range(len(counts)):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ out[i, j] = <object> nan
+ else:
+ out[i, j] = resx[i, j]
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_nth_bin_object(ndarray[object, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[object, ndim=2] values,
+ ndarray[int64_t] bins, int64_t rank):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, ngroups, b
+ object val
+ float64_t count
+ ndarray[object, ndim=2] resx
+ ndarray[float64_t, ndim=2] nobs
+
+ nobs = np.zeros((<object> out).shape, dtype=np.float64)
+ resx = np.empty((<object> out).shape, dtype=object)
+
+ if len(bins) == 0:
+ return
+ if bins[len(bins) - 1] == len(values):
+ ngroups = len(bins)
+ else:
+ ngroups = len(bins) + 1
+
+ N, K = (<object> values).shape
+
+ b = 0
+ for i in range(N):
+ while b < ngroups - 1 and i >= bins[b]:
+ b += 1
+
+ counts[b] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[b, j] += 1
+ if nobs[b, j] == rank:
+ resx[b, j] = val
+
+ for i in range(ngroups):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ out[i, j] = nan
+ else:
+ out[i, j] = resx[i, j]
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_last_object(ndarray[object, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[object, ndim=2] values,
+ ndarray[int64_t] labels):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, lab
+ object val
+ float64_t count
+ ndarray[object, ndim=2] resx
+ ndarray[int64_t, ndim=2] nobs
+
+ nobs = np.zeros((<object> out).shape, dtype=np.int64)
+ resx = np.empty((<object> out).shape, dtype=object)
+
+ N, K = (<object> values).shape
+
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[lab, j] += 1
+ resx[lab, j] = val
+
+ for i in range(len(counts)):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ out[i, j] = nan
+ else:
+ out[i, j] = resx[i, j]
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_last_bin_object(ndarray[object, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[object, ndim=2] values,
+ ndarray[int64_t] bins):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, ngroups, b
+ object val
+ float64_t count
+ ndarray[object, ndim=2] resx
+ ndarray[float64_t, ndim=2] nobs
+
+ nobs = np.zeros((<object> out).shape, dtype=np.float64)
+ resx = np.empty((<object> out).shape, dtype=object)
+
+ if len(bins) == 0:
+ return
+ if bins[len(bins) - 1] == len(values):
+ ngroups = len(bins)
+ else:
+ ngroups = len(bins) + 1
+
+ N, K = (<object> values).shape
+
+ b = 0
+ for i in range(N):
+ while b < ngroups - 1 and i >= bins[b]:
+ b += 1
+
+ counts[b] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if val == val:
+ nobs[b, j] += 1
+ resx[b, j] = val
+
+ for i in range(ngroups):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ out[i, j] = nan
+ else:
+ out[i, j] = resx[i, j]
+
+
+cdef inline float64_t _median_linear(float64_t* a, int n) nogil:
+ cdef int i, j, na_count = 0
+ cdef float64_t result
+ cdef float64_t* tmp
+
+ if n == 0:
+ return NaN
+
+ # count NAs
+ for i in range(n):
+ if a[i] != a[i]:
+ na_count += 1
+
+ if na_count:
+ if na_count == n:
+ return NaN
+
+ tmp = <float64_t*> malloc((n - na_count) * sizeof(float64_t))
+
+ j = 0
+ for i in range(n):
+ if a[i] == a[i]:
+ tmp[j] = a[i]
+ j += 1
+
+ a = tmp
+ n -= na_count
+
+ if n % 2:
+ result = kth_smallest_c( a, n / 2, n)
+ else:
+ result = (kth_smallest_c(a, n / 2, n) +
+ kth_smallest_c(a, n / 2 - 1, n)) / 2
+
+ if na_count:
+ free(a)
+
+ return result
+
+
+cdef inline float64_t kth_smallest_c(float64_t* a,
+ Py_ssize_t k,
+ Py_ssize_t n) nogil:
+ cdef:
+ Py_ssize_t i, j, l, m
+ double_t x, t
+
+ l = 0
+ m = n -1
+ while (l<m):
+ x = a[k]
+ i = l
+ j = m
+
+ while 1:
+ while a[i] < x: i += 1
+ while x < a[j]: j -= 1
+ if i <= j:
+ swap(&a[i], &a[j])
+ i += 1; j -= 1
+
+ if i > j: break
+
+ if j < k: l = i
+ if k < i: m = j
+ return a[k]
+
+
+# generated from template
+include "groupby_helper.pxi"
diff --git a/pandas/_libs/algos_groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
similarity index 98%
rename from pandas/_libs/algos_groupby_helper.pxi.in
rename to pandas/_libs/groupby_helper.pxi.in
index e2c263f49b110..d38b677df321c 100644
--- a/pandas/_libs/algos_groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -681,6 +681,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
#----------------------------------------------------------------------
+@cython.boundscheck(False)
+@cython.wraparound(False)
def group_median_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
@@ -704,13 +706,15 @@ def group_median_float64(ndarray[float64_t, ndim=2] out,
take_2d_axis1_float64_float64(values.T, indexer, out=data)
- for i in range(K):
- # exclude NA group
- ptr += _counts[0]
- for j in range(ngroups):
- size = _counts[j + 1]
- out[j, i] = _median_linear(ptr, size)
- ptr += size
+ with nogil:
+
+ for i in range(K):
+ # exclude NA group
+ ptr += _counts[0]
+ for j in range(ngroups):
+ size = _counts[j + 1]
+ out[j, i] = _median_linear(ptr, size)
+ ptr += size
@cython.boundscheck(False)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 4095a14aa5970..308fe18116134 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -60,7 +60,7 @@
import pandas.core.common as com
from pandas.core.config import option_context
-from pandas._libs import lib, algos as libalgos, Timestamp, NaT, iNaT
+from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT
from pandas._libs.lib import count_level_2d
_doc_template = """
@@ -1474,7 +1474,7 @@ def shift(self, periods=1, freq=None, axis=0):
# filled in by Cython
indexer = np.zeros_like(labels)
- libalgos.group_shift_indexer(indexer, labels, ngroups, periods)
+ libgroupby.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
@@ -1815,13 +1815,13 @@ def _get_cython_function(self, kind, how, values, is_numeric):
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
- f = getattr(libalgos, fname, None)
+ f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
- f = getattr(libalgos, "%s_%s" % (fname, dtype_str), None)
+ f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
@@ -3118,7 +3118,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
out = _ensure_int64(out)
return Series(out, index=mi, name=self.name)
- # for compat. with libalgos.value_counts need to ensure every
+ # for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index 77c5bde332cff..02c7933e020ea 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -7,8 +7,7 @@
from pandas import Index, isnull
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
-import pandas._libs.lib as lib
-import pandas._libs.algos as algos
+from pandas._libs import lib, groupby
def test_series_grouper():
@@ -92,7 +91,7 @@ def _check(dtype):
labels = _ensure_int64(np.repeat(np.arange(3),
np.diff(np.r_[0, bins])))
- func = getattr(algos, 'group_ohlc_%s' % dtype)
+ func = getattr(groupby, 'group_ohlc_%s' % dtype)
func(out, counts, obj[:, None], labels)
def _ohlc(group):
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 4acf9dd4755f4..3b85fadda6cfe 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -6,7 +6,7 @@
from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range
from pandas.types.common import _ensure_platform_int, is_timedelta64_dtype
from pandas.compat import StringIO
-from pandas._libs import algos
+from pandas._libs import groupby
from .common import MixIn, assert_fp_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -418,8 +418,8 @@ def test_cython_group_transform_algos(self):
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
np.uint64, np.float32, np.float64]
- ops = [(algos.group_cumprod_float64, np.cumproduct, [np.float64]),
- (algos.group_cumsum, np.cumsum, dtypes)]
+ ops = [(groupby.group_cumprod_float64, np.cumproduct, [np.float64]),
+ (groupby.group_cumsum, np.cumsum, dtypes)]
is_datetimelike = False
for pd_op, np_op, dtypes in ops:
@@ -437,13 +437,13 @@ def test_cython_group_transform_algos(self):
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
- algos.group_cumprod_float64(actual, data, labels, is_datetimelike)
+ groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
- algos.group_cumsum(actual, data, labels, is_datetimelike)
+ groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
@@ -451,8 +451,8 @@ def test_cython_group_transform_algos(self):
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
- algos.group_cumsum(actual, data.view('int64'), labels,
- is_datetimelike)
+ groupby.group_cumsum(actual, data.view('int64'), labels,
+ is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 7a3cc3e2c3cd7..e8a9397a11769 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -10,7 +10,8 @@
import pandas as pd
from pandas import compat
-from pandas._libs import algos as libalgos, hashtable
+from pandas._libs import (groupby as libgroupby, algos as libalgos,
+ hashtable)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange
import pandas.core.algorithms as algos
@@ -889,7 +890,7 @@ def test_group_var_constant(self):
class TestGroupVarFloat64(tm.TestCase, GroupVarTestMixin):
__test__ = True
- algo = algos.algos.group_var_float64
+ algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
@@ -912,7 +913,7 @@ def test_group_var_large_inputs(self):
class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin):
__test__ = True
- algo = algos.algos.group_var_float32
+ algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
diff --git a/setup.py b/setup.py
index 3e0a6b41152dc..8e690f05b818c 100755
--- a/setup.py
+++ b/setup.py
@@ -110,8 +110,9 @@ def is_platform_mac():
_pxi_dep_template = {
- 'algos': ['_libs/algos_common_helper.pxi.in', '_libs/algos_groupby_helper.pxi.in',
+ 'algos': ['_libs/algos_common_helper.pxi.in',
'_libs/algos_take_helper.pxi.in', '_libs/algos_rank_helper.pxi.in'],
+ 'groupby': ['_libs/groupby_helper.pxi.in'],
'join': ['_libs/join_helper.pxi.in', '_libs/join_func_helper.pxi.in'],
'reshape': ['_libs/reshape_helper.pxi.in'],
'hashtable': ['_libs/hashtable_class_helper.pxi.in',
@@ -496,8 +497,11 @@ def pxd(name):
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['index']},
'_libs.algos': {'pyxfile': '_libs/algos',
- 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
'depends': _pxi_dep['algos']},
+ '_libs.groupby': {'pyxfile': '_libs/groupby',
+ 'pxdfiles': ['_libs/src/util', '_libs/algos'],
+ 'depends': _pxi_dep['groupby']},
'_libs.join': {'pyxfile': '_libs/join',
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['join']},
| - separate out groupby algorithms to separate lib
- release GIL on median
- release GIL on is_lexsorted / fix memory leak
- release GIL on nancorr | https://api.github.com/repos/pandas-dev/pandas/pulls/15775 | 2017-03-21T22:34:24Z | 2017-03-22T12:07:06Z | null | 2017-03-22T12:08:16Z |
BUG: Check that values for "nrows" and "chunksize" are valid | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index eeb568c2e2558..5ac7624856040 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -815,6 +815,7 @@ Bug Fixes
- Bug in ``pd.read_fwf`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`)
- Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`)
+- Added checks in ``pd.read_csv()`` ensuring that values for ``nrows`` and ``chunksize`` are valid (:issue:`15767`)
- Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`)
- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
- Bug in using ``__deepcopy__`` on empty NDFrame objects (:issue:`15370`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 18343670fb39e..a6564a643058d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -344,24 +344,34 @@
""" % (_parser_params % (_fwf_widths, ''))
-def _validate_nrows(nrows):
+def _validate_integer(name, val, min_val=0):
"""
- Checks whether the 'nrows' parameter for parsing is either
+ Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
+
+ Parameters
+ ----------
+ name : string
+ Parameter name (used for error reporting)
+ val : int or float
+ The value to check
+ min_val : int
+ Minimum allowed value (val < min_val will result in a ValueError)
"""
- msg = "'nrows' must be an integer"
+ msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name,
+ min_val=min_val)
- if nrows is not None:
- if is_float(nrows):
- if int(nrows) != nrows:
+ if val is not None:
+ if is_float(val):
+ if int(val) != val:
raise ValueError(msg)
- nrows = int(nrows)
- elif not is_integer(nrows):
+ val = int(val)
+ elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
- return nrows
+ return val
def _read(filepath_or_buffer, kwds):
@@ -383,8 +393,8 @@ def _read(filepath_or_buffer, kwds):
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
- chunksize = kwds.get('chunksize', None)
- nrows = _validate_nrows(kwds.get('nrows', None))
+ chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
+ nrows = _validate_integer('nrows', kwds.get('nrows', None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 24d15dcb96fe7..2c8bca490f274 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -384,7 +384,7 @@ def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
- msg = "must be an integer"
+ msg = r"'nrows' must be an integer >=0"
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
@@ -392,6 +392,9 @@ def test_read_nrows(self):
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(self.data1), nrows=-1)
+
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
@@ -402,6 +405,18 @@ def test_read_chunksize(self):
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
+ # with invalid chunksize value:
+ msg = r"'chunksize' must be an integer >=1"
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(self.data1), chunksize=1.3)
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(self.data1), chunksize='foo')
+
+ with tm.assertRaisesRegexp(ValueError, msg):
+ self.read_csv(StringIO(self.data1), chunksize=0)
+
def test_read_chunksize_and_nrows(self):
# gh-15755
| - [x] closes #15767
- [x] tests added / passed
- [x] passes ``git diff master --name-only -- '*.py' | flake8 --diff``
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/15774 | 2017-03-21T22:29:22Z | 2017-03-22T08:03:33Z | 2017-03-22T08:03:32Z | 2017-03-22T08:33:23Z |
CLN: relocate lib.ismember* to hashtable space | diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index fa373905ef08a..0608af8f8504b 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -11,14 +11,14 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
# dtype, ttype
-dtypes = [('float64', 'float64'),
- ('uint64', 'uint64'),
- ('object', 'pymap'),
- ('int64', 'int64')]
+dtypes = [('float64', 'float64', 'float64_t'),
+ ('uint64', 'uint64', 'uint64_t'),
+ ('object', 'pymap', 'object'),
+ ('int64', 'int64', 'int64_t')]
}}
-{{for dtype, ttype in dtypes}}
+{{for dtype, ttype, scalar in dtypes}}
@cython.wraparound(False)
@@ -34,9 +34,7 @@ cdef build_count_table_{{dtype}}({{dtype}}_t[:] values,
khiter_t k
Py_ssize_t i, n = len(values)
- {{if dtype != 'object'}}
- {{dtype}}_t val
- {{endif}}
+ {{scalar}} val
int ret = 0
@@ -79,7 +77,7 @@ cdef build_count_table_{{dtype}}({{dtype}}_t[:] values,
{{if dtype == 'object'}}
cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
{{else}}
-cpdef value_count_{{dtype}}({{dtype}}_t[:] values, bint dropna):
+cpdef value_count_{{dtype}}({{scalar}}[:] values, bint dropna):
{{endif}}
cdef:
Py_ssize_t i=0
@@ -130,12 +128,11 @@ cpdef value_count_{{dtype}}({{dtype}}_t[:] values, bint dropna):
@cython.boundscheck(False)
{{if dtype == 'object'}}
-
def duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'):
{{else}}
-def duplicated_{{dtype}}({{dtype}}_t[:] values, object keep='first'):
+def duplicated_{{dtype}}({{scalar}}[:] values, object keep='first'):
{{endif}}
cdef:
int ret = 0
@@ -203,8 +200,87 @@ def duplicated_{{dtype}}({{dtype}}_t[:] values, object keep='first'):
kh_destroy_{{ttype}}(table)
return out
+
+#----------------------------------------------------------------------
+# Membership
+#----------------------------------------------------------------------
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+{{if dtype == 'object'}}
+
+def ismember_{{dtype}}(ndarray[{{scalar}}] arr, ndarray[{{scalar}}] values, bint hasnans=0):
+{{else}}
+
+def ismember_{{dtype}}({{scalar}}[:] arr, {{scalar}}[:] values, bint hasnans=0):
+{{endif}}
+
+ """
+ Return boolean of values in arr on an
+ element by-element basis
+
+ Parameters
+ ----------
+ arr : {{dtype}} ndarray
+ values : {{dtype}} ndarray
+ hasnans : bint, optional
+
+ Returns
+ -------
+ boolean ndarry len of (arr)
+ """
+ cdef:
+ Py_ssize_t i, n, k
+ int ret = 0
+ ndarray[uint8_t] result
+ {{scalar}} val
+ kh_{{ttype}}_t * table = kh_init_{{ttype}}()
+
+
+ # construct the table
+ n = len(values)
+ kh_resize_{{ttype}}(table, min(n, len(values)))
+
+ {{if dtype == 'object'}}
+ for i in range(n):
+ kh_put_{{ttype}}(table, <PyObject*> values[i], &ret)
+ {{else}}
+ with nogil:
+ for i in range(n):
+ kh_put_{{ttype}}(table, values[i], &ret)
+ {{endif}}
+
+ # test membership
+ n = len(arr)
+ result = np.empty(n, dtype=np.uint8)
+
+ {{if dtype == 'object'}}
+ for i in range(n):
+ val = arr[i]
+ k = kh_get_{{ttype}}(table, <PyObject*> val)
+ if k != table.n_buckets:
+ result[i] = 1
+ else:
+ result[i] = hasnans and val != val
+ {{else}}
+ with nogil:
+ for i in range(n):
+ val = arr[i]
+ k = kh_get_{{ttype}}(table, val)
+ if k != table.n_buckets:
+ result[i] = 1
+ else:
+ result[i] = hasnans and val != val
+ {{endif}}
+
+ kh_destroy_{{ttype}}(table)
+ return result.view(np.bool_)
+
{{endfor}}
+
+
#----------------------------------------------------------------------
# Mode Computations
#----------------------------------------------------------------------
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index b4724bc3dd59b..f78040e5a52f2 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -13,6 +13,7 @@ cdef extern from "numpy/arrayobject.h":
cdef enum NPY_TYPES:
NPY_intp "NPY_INTP"
+from libc.stdlib cimport malloc, free
from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
PyDict_Contains, PyDict_Keys,
@@ -111,77 +112,6 @@ cpdef map_indices_list(list index):
return result
-from libc.stdlib cimport malloc, free
-
-
-def ismember_nans(float64_t[:] arr, set values, bint hasnans):
- cdef:
- Py_ssize_t i, n
- ndarray[uint8_t] result
- float64_t val
-
- n = len(arr)
- result = np.empty(n, dtype=np.uint8)
- for i in range(n):
- val = arr[i]
- result[i] = val in values or hasnans and isnan(val)
-
- return result.view(np.bool_)
-
-
-def ismember(ndarray arr, set values):
- """
- Checks whether
-
- Parameters
- ----------
- arr : ndarray
- values : set
-
- Returns
- -------
- ismember : ndarray (boolean dtype)
- """
- cdef:
- Py_ssize_t i, n
- ndarray[uint8_t] result
- object val
-
- n = len(arr)
- result = np.empty(n, dtype=np.uint8)
- for i in range(n):
- val = util.get_value_at(arr, i)
- result[i] = val in values
-
- return result.view(np.bool_)
-
-
-def ismember_int64(ndarray[int64_t] arr, set values):
- """
- Checks whether
-
- Parameters
- ----------
- arr : ndarray of int64
- values : set
-
- Returns
- -------
- ismember : ndarray (boolean dtype)
- """
- cdef:
- Py_ssize_t i, n
- ndarray[uint8_t] result
- int64_t v
-
- n = len(arr)
- result = np.empty(n, dtype=np.uint8)
- for i in range(n):
- result[i] = arr[i] in values
-
- return result.view(np.bool_)
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def memory_usage_of_objects(ndarray[object, ndim=1] arr):
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 00a3264e6c74a..9a8d0a779105e 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -12,12 +12,12 @@
from pandas.types.common import (is_unsigned_integer_dtype,
is_signed_integer_dtype,
is_integer_dtype,
- is_int64_dtype,
is_categorical_dtype,
is_extension_type,
is_datetimetz,
is_period_dtype,
is_period_arraylike,
+ is_numeric_dtype,
is_float_dtype,
is_bool_dtype,
needs_i8_conversion,
@@ -197,19 +197,37 @@ def isin(comps, values):
except TypeError:
# object array conversion will fail
pass
- else:
+ elif is_numeric_dtype(comps):
comps = np.asarray(comps)
values = np.asarray(values)
+ else:
+ comps = np.asarray(comps).astype(object)
+ values = np.asarray(values).astype(object)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
+ f = lambda x, y: htable.ismember_object(x, values)
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
- f = lambda x, y: np.in1d(x, np.asarray(list(y)))
- elif is_int64_dtype(comps):
- f = lambda x, y: lib.ismember_int64(x, set(y))
- else:
- f = lambda x, y: lib.ismember(x, set(values))
+ f = lambda x, y: np.in1d(x, y)
+ elif is_integer_dtype(comps):
+ try:
+ values = values.astype('int64', copy=False)
+ comps = comps.astype('int64', copy=False)
+ f = lambda x, y: htable.ismember_int64(x, y)
+ except (TypeError, ValueError):
+ values = values.astype(object)
+ comps = comps.astype(object)
+
+ elif is_float_dtype(comps):
+ try:
+ values = values.astype('float64', copy=False)
+ comps = comps.astype('float64', copy=False)
+ checknull = isnull(values).any()
+ f = lambda x, y: htable.ismember_float64(x, y, checknull)
+ except (TypeError, ValueError):
+ values = values.astype(object)
+ comps = comps.astype(object)
return f(comps, values)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 732d88b47ae2a..b49aa926d1923 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5358,8 +5358,8 @@ def isin(self, values):
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
- lib.ismember(self.values.ravel(),
- set(values)).reshape(self.shape), self.index,
+ algorithms.isin(self.values.ravel(),
+ values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py
index 978492131ca89..e6ae0605d4758 100644
--- a/pandas/indexes/multi.py
+++ b/pandas/indexes/multi.py
@@ -1392,7 +1392,7 @@ def _drop_from_level(self, labels, level):
index = self.levels[i]
values = index.get_indexer(labels)
- mask = ~lib.ismember(self.labels[i], set(values))
+ mask = ~algos.isin(self.labels[i], values)
return self[mask]
@@ -2463,7 +2463,8 @@ def _wrap_joined_index(self, joined, other):
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
- return lib.ismember(np.array(self), set(values))
+ return algos.isin(self.values,
+ MultiIndex.from_tuples(values).values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py
index 2f897c81975c2..31258c785d9e8 100644
--- a/pandas/indexes/numeric.py
+++ b/pandas/indexes/numeric.py
@@ -1,13 +1,13 @@
import numpy as np
-from pandas._libs import (lib, index as libindex,
+from pandas._libs import (index as libindex,
algos as libalgos, join as libjoin)
from pandas.types.common import (is_dtype_equal, pandas_dtype,
is_float_dtype, is_object_dtype,
is_integer_dtype, is_scalar)
-from pandas.types.missing import isnull
from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
+from pandas.core import algorithms
from pandas.indexes.base import Index, InvalidIndexError, _index_shared_docs
from pandas.util.decorators import Appender, cache_readonly
import pandas.indexes.base as ibase
@@ -379,11 +379,9 @@ def is_unique(self):
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
- value_set = set(values)
if level is not None:
self._validate_index_level(level)
- return lib.ismember_nans(np.array(self), value_set,
- isnull(list(value_set)).any())
+ return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 18343670fb39e..90d72c0bceeb7 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -26,6 +26,7 @@
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
+from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
from pandas.io.common import (get_filepath_or_buffer, _validate_header_arg,
@@ -1388,7 +1389,8 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
try:
values = lib.map_infer(values, conv_f)
except ValueError:
- mask = lib.ismember(values, na_values).view(np.uint8)
+ mask = algorithms.isin(
+ values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
@@ -1436,7 +1438,7 @@ def _infer_types(self, values, na_values, try_num_bool=True):
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
- mask = lib.ismember(values, na_values)
+ mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7199a38bb7a80..c4dc10d8174cc 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1363,14 +1363,17 @@ def test_isin_nan(self):
np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
+
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]),
np.array([False, True]))
+
+ # we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),
- np.array([False, True]))
+ np.array([False, False]))
def test_isin_level_kwarg(self):
def check_idx(idx):
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 093331e861fa7..5dc9746c6d6f9 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -13,6 +13,7 @@
from pandas.types.generic import (ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.types.missing import notnull
+from pandas.core import algorithms
import pandas.compat as compat
@@ -577,7 +578,7 @@ def calc_with_mask(carg, mask):
# string with NaN-like
try:
- mask = ~lib.ismember(arg, tslib._nat_strings)
+ mask = ~algorithms.isin(arg, list(tslib._nat_strings))
return calc_with_mask(arg, mask)
except:
pass
| - fixes ``.isin`` on 32-bit (hopefully)
- perf about 30% better (but this is a small number anyhow) & releases GIL
- more generic framework | https://api.github.com/repos/pandas-dev/pandas/pulls/15773 | 2017-03-21T20:46:35Z | 2017-03-21T23:20:44Z | null | 2017-03-21T23:23:16Z |
skipna added to groupby numeric ops | diff --git a/pandas/_libs/algos_groupby_helper.pxi.in b/pandas/_libs/algos_groupby_helper.pxi.in
index e2c263f49b110..dd226a8c86e58 100644
--- a/pandas/_libs/algos_groupby_helper.pxi.in
+++ b/pandas/_libs/algos_groupby_helper.pxi.in
@@ -9,26 +9,27 @@ cdef extern from "numpy/npy_math.h":
_int64_max = np.iinfo(np.int64).max
#----------------------------------------------------------------------
-# group_add, group_prod, group_var, group_mean, group_ohlc
+# group_add
#----------------------------------------------------------------------
{{py:
-# name, c_type, dest_type, dest_dtype
-dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'),
- ('float32', 'float32_t', 'float32_t', 'np.float32')]
+# name, c_type, dest_type, dest_dtype, nan_val
+dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64', 'NAN'),
+ ('float32', 'float32_t', 'float32_t', 'np.float32', 'NAN'),
+ ('int64', 'int64_t', 'int64_t', 'np.int64', 'iNaT')]
def get_dispatch(dtypes):
- for name, c_type, dest_type, dest_dtype in dtypes:
+ for name, c_type, dest_type, dest_dtype, nan_val in dtypes:
dest_type2 = dest_type
dest_type = dest_type.replace('_t', '')
- yield name, c_type, dest_type, dest_type2, dest_dtype
+ yield name, c_type, dest_type, dest_type2, dest_dtype, nan_val
}}
-{{for name, c_type, dest_type, dest_type2, dest_dtype in get_dispatch(dtypes)}}
+{{for name, c_type, dest_type, dest_type2, dest_dtype, nan_val in get_dispatch(dtypes)}}
@cython.wraparound(False)
@@ -36,7 +37,8 @@ def get_dispatch(dtypes):
def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint checknull):
"""
Only aggregates on axis=0
"""
@@ -54,25 +56,31 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
N, K = (<object> values).shape
with nogil:
-
if K > 1:
-
for i in range(N):
lab = labels[i]
if lab < 0:
continue
-
counts[lab] += 1
for j in range(K):
val = values[i, j]
- # not nan
- if val == val:
+ # val = nan
+ {{if name == 'int64'}}
+ if val == {{nan_val}}:
+ sumx[lab, j] = {{nan_val}}
+ else:
+ {{else}}
+ if val != val:
+ if checknull:
+ continue
+ else:
+ sumx[lab, j] = NAN
+ else:
+ {{endif}}
nobs[lab, j] += 1
sumx[lab, j] += val
-
else:
-
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -81,25 +89,65 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
counts[lab] += 1
val = values[i, 0]
- # not nan
- if val == val:
+ # val = nan
+ {{if name == 'int64'}}
+ if val == {{nan_val}}:
+ sumx[lab, 0] = {{nan_val}}
+ else:
+ {{else}}
+ if val != val:
+ if checknull:
+ continue
+ else:
+ sumx[lab, 0] = NAN
+ else:
+ {{endif}}
nobs[lab, 0] += 1
sumx[lab, 0] += val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
+ {{if name == 'int64'}}
+ out[i, j] = {{nan_val}}
+ {{else}}
out[i, j] = NAN
+ {{endif}}
else:
out[i, j] = sumx[i, j]
+{{endfor}}
+
+#----------------------------------------------------------------------
+# group_prod, group_var, group_mean, group_ohlc
+#----------------------------------------------------------------------
+
+{{py:
+
+# name, c_type, dest_type, dest_dtype
+dtypes = [('float64', 'float64_t', 'float64_t', 'np.float64'),
+ ('float32', 'float32_t', 'float32_t', 'np.float32')]
+
+def get_dispatch(dtypes):
+
+ for name, c_type, dest_type, dest_dtype in dtypes:
+
+ dest_type2 = dest_type
+ dest_type = dest_type.replace('_t', '')
+
+ yield name, c_type, dest_type, dest_type2, dest_dtype
+}}
+
+{{for name, c_type, dest_type, dest_type2, dest_dtype in get_dispatch(dtypes)}}
+
@cython.wraparound(False)
@cython.boundscheck(False)
def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{c_type}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint skipna):
"""
Only aggregates on axis=0
"""
@@ -117,38 +165,63 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
N, K = (<object> values).shape
with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
+ if skipna == False:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
nobs[lab, j] += 1
prodx[lab, j] *= val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- # not nan
- if val == val:
+ counts[lab] += 1
+ val = values[i, 0]
nobs[lab, 0] += 1
prodx[lab, 0] *= val
+ else:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+ #not nan
+ if val == val:
+ nobs[lab, j] += 1
+ prodx[lab, j] *= val
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ val = values[i, 0]
+ #not nan
+ if val == val:
+ nobs[lab, 0] += 1
+ prodx[lab, 0] *= val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
+ {{if name == 'int64'}}
+ out[i, j] = {{nan_val}}
+ {{else}}
out[i, j] = NAN
+ {{endif}}
else:
out[i, j] = prodx[i, j]
@@ -159,7 +232,8 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint skipna):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, ct, oldmean
@@ -176,28 +250,46 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
out[:, :] = 0.0
with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
+ if skipna == False:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- for j in range(K):
- val = values[i, j]
+ counts[lab] += 1
- # not nan
- if val == val:
+ for j in range(K):
+ val = values[i, j]
nobs[lab, j] += 1
oldmean = mean[lab, j]
mean[lab, j] += (val - oldmean) / nobs[lab, j]
out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+
+ for j in range(K):
+ val = values[i, j]
+ #not nan
+ if val == val:
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
ct = nobs[i, j]
if ct < 2:
+ {{if name == 'int64'}}
+ out[i, j] = {{nan_val}}
+ {{else}}
out[i, j] = NAN
+ {{endif}}
else:
out[i, j] /= (ct - 1)
# add passing bin edges, instead of labels
@@ -208,7 +300,8 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint skipna):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, count
@@ -223,39 +316,66 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
N, K = (<object> values).shape
with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
- # not nan
- if val == val:
+ if skipna == False:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
nobs[lab, j] += 1
sumx[lab, j] += val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[lab] += 1
- val = values[i, 0]
- # not nan
- if val == val:
+ counts[lab] += 1
+ val = values[i, 0]
nobs[lab, 0] += 1
sumx[lab, 0] += val
+ else:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+ #not nan
+ if val == val:
+ nobs[lab, j] += 1
+ sumx[lab, j] += val
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ val = values[i, 0]
+ #not nan
+ if val == val:
+ nobs[lab, 0] += 1
+ sumx[lab, 0] += val
for i in range(ncounts):
for j in range(K):
count = nobs[i, j]
if nobs[i, j] == 0:
+ {{if name == 'int64'}}
+ out[i, j] = {{nan_val}}
+ {{else}}
out[i, j] = NAN
+ {{endif}}
else:
- out[i, j] = sumx[i, j] / count
+ out[i, j] = sumx[i,j] / count
@cython.wraparound(False)
@@ -263,7 +383,8 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint skipna):
"""
Only aggregates on axis=0
"""
@@ -455,19 +576,21 @@ def get_dispatch(dtypes):
def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint checknull):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] maxx, nobs
+ ndarray[{{dest_type2}}, ndim=2] maxx, nobs, nancount
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
+ nancount = np.zeros_like(out)
maxx = np.empty_like(out)
maxx.fill(-{{inf_val}})
@@ -484,16 +607,11 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
counts[lab] += 1
for j in range(K):
val = values[i, j]
-
- # not nan
- {{if name == 'int64'}}
- if val != {{nan_val}}:
- {{else}}
- if val == val and val != {{nan_val}}:
- {{endif}}
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if val != val:
+ nancount[lab, j] += 1
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
else:
for i in range(N):
lab = labels[i]
@@ -502,13 +620,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
counts[lab] += 1
val = values[i, 0]
-
- # not nan
- {{if name == 'int64'}}
- if val != {{nan_val}}:
- {{else}}
- if val == val and val != {{nan_val}}:
- {{endif}}
+ if val != val:
+ nancount[lab, 0] += 1
nobs[lab, 0] += 1
if val > maxx[lab, 0]:
maxx[lab, 0] = val
@@ -517,6 +630,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = {{nan_val}}
+ elif checknull == False and nancount[i, j] > 0:
+ out[i, j] = {{nan_val}}
else:
out[i, j] = maxx[i, j]
@@ -526,19 +641,21 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
ndarray[int64_t] counts,
ndarray[{{dest_type2}}, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint skipna):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, count
- ndarray[{{dest_type2}}, ndim=2] minx, nobs
+ ndarray[{{dest_type2}}, ndim=2] minx, nobs, nancount
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
+ nancount = np.zeros_like(out)
minx = np.empty_like(out)
minx.fill({{inf_val}})
@@ -546,15 +663,62 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
N, K = (<object> values).shape
with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
+ if skipna == False:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+ if val != val:
+ nancount[lab, j] += 1
+ nobs[lab, j] += 1
+ if val < minx[lab, j]:
+ minx[lab, j] = val
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ val = values[i, 0]
+ if val != val:
+ nancount[lab, 0] += 1
+ nobs[lab, 0] += 1
+ if val < minx[lab, 0]:
+ minx[lab, 0] = val
+ else:
+ if K > 1:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ {{if name == 'int64'}}
+ if val != {{nan_val}}:
+ {{else}}
+ if val == val and val != {{nan_val}}:
+ {{endif}}
+ nobs[lab, j] += 1
+ if val < minx[lab, j]:
+ minx[lab, j] = val
+ else:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
+ counts[lab] += 1
+ val = values[i, 0]
# not nan
{{if name == 'int64'}}
@@ -562,32 +726,16 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
{{else}}
if val == val and val != {{nan_val}}:
{{endif}}
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- {{if name == 'int64'}}
- if val != {{nan_val}}:
- {{else}}
- if val == val and val != {{nan_val}}:
- {{endif}}
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
+ nobs[lab, 0] += 1
+ if val < minx[lab, 0]:
+ minx[lab, 0] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = {{nan_val}}
+ elif skipna == False and nancount[i, j] > 0:
+ out[i, j] = {{nan_val}}
else:
out[i, j] = minx[i, j]
@@ -684,7 +832,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
def group_median_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
+ ndarray[int64_t] labels,
+ bint checknull):
"""
Only aggregates on axis=0
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 4095a14aa5970..1751c873c2b42 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -105,7 +105,7 @@
'cummin', 'cummax'])
-def _groupby_function(name, alias, npfunc, numeric_only=True,
+def _groupby_function(name, alias, npfunc, numeric_only=True, skipna=True,
_convert=False):
_local_template = "Compute %(f)s of group values"
@@ -807,7 +807,7 @@ def _cython_transform(self, how, numeric_only=True):
return self._wrap_transformed_output(output, names)
- def _cython_agg_general(self, how, alt=None, numeric_only=True):
+ def _cython_agg_general(self, how, alt=None, numeric_only=True, skipna=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
@@ -815,7 +815,7 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True):
continue
try:
- result, names = self.grouper.aggregate(obj.values, how)
+ result, names = self.grouper.aggregate(obj.values, how, skipna)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
@@ -1020,7 +1020,7 @@ def mean(self, *args, **kwargs):
For multiple groupings, the result index will be a MultiIndex
"""
- nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
+ nv.validate_groupby_func('mean', args, kwargs, ['numeric_only','skipna'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
@@ -1067,7 +1067,7 @@ def std(self, ddof=1, *args, **kwargs):
"""
# TODO: implement at Cython level?
- nv.validate_groupby_func('std', args, kwargs)
+ nv.validate_groupby_func('std', args, kwargs,['skipna'])
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@@ -1083,7 +1083,7 @@ def var(self, ddof=1, *args, **kwargs):
ddof : integer, default 1
degrees of freedom
"""
- nv.validate_groupby_func('var', args, kwargs)
+ nv.validate_groupby_func('var', args, kwargs, ['skipna'])
if ddof == 1:
return self._cython_agg_general('var', **kwargs)
else:
@@ -1093,7 +1093,7 @@ def var(self, ddof=1, *args, **kwargs):
@Substitution(name='groupby')
@Appender(_doc_template)
- def sem(self, ddof=1):
+ def sem(self, ddof=1, **kwargs):
"""
Compute standard error of the mean of groups, excluding missing values
@@ -1105,7 +1105,7 @@ def sem(self, ddof=1):
degrees of freedom
"""
- return self.std(ddof=ddof) / np.sqrt(self.count())
+ return self.std(ddof=ddof, **kwargs) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
@@ -1117,10 +1117,10 @@ def size(self):
result.name = getattr(self, 'name', None)
return result
- sum = _groupby_function('sum', 'add', np.sum)
- prod = _groupby_function('prod', 'prod', np.prod)
- min = _groupby_function('min', 'min', np.min, numeric_only=False)
- max = _groupby_function('max', 'max', np.max, numeric_only=False)
+ sum = _groupby_function('sum', 'add', np.sum, skipna=True)
+ prod = _groupby_function('prod', 'prod', np.prod, skipna=True)
+ min = _groupby_function('min', 'min', np.min, numeric_only=False, skipna=True)
+ max = _groupby_function('max', 'max', np.max, numeric_only=False, skipna=True)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
@@ -1849,7 +1849,7 @@ def wrapper(*args, **kwargs):
(how, dtype_str))
return func, dtype_str
- def _cython_operation(self, kind, values, how, axis):
+ def _cython_operation(self, kind, values, how, axis, skipna):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
@@ -1933,7 +1933,7 @@ def _cython_operation(self, kind, values, how, axis):
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
- result, counts, values, labels, func, is_numeric,
+ result, counts, values, labels, skipna, func, is_numeric,
is_datetimelike)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
@@ -1975,13 +1975,13 @@ def _cython_operation(self, kind, values, how, axis):
return result, names
- def aggregate(self, values, how, axis=0):
- return self._cython_operation('aggregate', values, how, axis)
+ def aggregate(self, values, how, skipna, axis=0):
+ return self._cython_operation('aggregate', values, how, axis, skipna)
def transform(self, values, how, axis=0):
- return self._cython_operation('transform', values, how, axis)
+ return self._cython_operation('transform', values, how, axis, skipna=True)
- def _aggregate(self, result, counts, values, comp_ids, agg_func,
+ def _aggregate(self, result, counts, values, comp_ids, skipna, agg_func,
is_numeric, is_datetimelike):
if values.ndim > 3:
# punting for now
@@ -1991,9 +1991,9 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func,
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
- agg_func(result[:, :, i], counts, chunk, comp_ids)
+ agg_func(result[:, :, i], counts, chunk, comp_ids, skipna)
else:
- agg_func(result, counts, values, comp_ids)
+ agg_func(result, counts, values, comp_ids, skipna)
return result
@@ -3187,9 +3187,9 @@ def _iterate_slices(self):
continue
yield val, slicer(val)
- def _cython_agg_general(self, how, alt=None, numeric_only=True):
+ def _cython_agg_general(self, how, alt=None, numeric_only=True, skipna=True):
new_items, new_blocks = self._cython_agg_blocks(
- how, alt=alt, numeric_only=numeric_only)
+ how, alt=alt, numeric_only=numeric_only, skipna=skipna)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
@@ -3215,7 +3215,7 @@ def _wrap_agged_blocks(self, items, blocks):
_block_agg_axis = 0
- def _cython_agg_blocks(self, how, alt=None, numeric_only=True):
+ def _cython_agg_blocks(self, how, alt=None, numeric_only=True,skipna=None):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
@@ -3232,8 +3232,9 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True):
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
- block.values, how, axis=agg_axis)
+ block.values, how, skipna=skipna, axis=agg_axis)
except NotImplementedError:
+ continue
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
@@ -3327,6 +3328,9 @@ def aggregate(self, arg, *args, **kwargs):
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
+ if result.empty:
+ for col in result.columns:
+ result[col] = result[col].astype(self.obj[col])
return result._convert(datetime=True)
agg = aggregate
| - [x] closes #15675
- [x] tests added / passed
- [ ] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15772 | 2017-03-21T20:30:36Z | 2017-10-28T00:26:40Z | null | 2023-05-11T01:15:17Z |
CLN/INT: Rename _possibly to _maybe (GH15764) | diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py
index a782287175327..e78806b38c667 100644
--- a/pandas/computation/expr.py
+++ b/pandas/computation/expr.py
@@ -348,7 +348,7 @@ def _rewrite_membership_op(self, node, left, right):
op = self.visit(op_instance)
return op, op_instance, left, right
- def _possibly_transform_eq_ne(self, node, left=None, right=None):
+ def _maybe_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
@@ -357,7 +357,7 @@ def _possibly_transform_eq_ne(self, node, left=None, right=None):
right)
return op, op_class, left, right
- def _possibly_downcast_constants(self, left, right):
+ def _maybe_downcast_constants(self, left, right):
f32 = np.dtype(np.float32)
if left.isscalar and not right.isscalar and right.return_type == f32:
# right is a float32 array, left is a scalar
@@ -370,7 +370,7 @@ def _possibly_downcast_constants(self, left, right):
return left, right
- def _possibly_eval(self, binop, eval_in_python):
+ def _maybe_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
@@ -380,10 +380,10 @@ def _possibly_eval(self, binop, eval_in_python):
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
- def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
- eval_in_python=('in', 'not in'),
- maybe_eval_in_python=('==', '!=', '<', '>',
- '<=', '>=')):
+ def _maybe_evaluate_binop(self, op, op_class, lhs, rhs,
+ eval_in_python=('in', 'not in'),
+ maybe_eval_in_python=('==', '!=', '<', '>',
+ '<=', '>=')):
res = op(lhs, rhs)
if res.has_invalid_return_type:
@@ -397,24 +397,24 @@ def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
- return self._possibly_eval(res, self.binary_ops)
+ return self._maybe_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
- return self._possibly_eval(res, eval_in_python)
+ return self._maybe_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object or
getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
- return self._possibly_eval(res, eval_in_python +
- maybe_eval_in_python)
+ return self._maybe_eval(res, eval_in_python +
+ maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
- op, op_class, left, right = self._possibly_transform_eq_ne(node)
- left, right = self._possibly_downcast_constants(left, right)
- return self._possibly_evaluate_binop(op, op_class, left, right)
+ op, op_class, left, right = self._maybe_transform_eq_ne(node)
+ left, right = self._maybe_downcast_constants(left, right)
+ return self._maybe_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
@@ -662,9 +662,9 @@ def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
- op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
- rhs)
- return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
+ op, op_class, lhs, rhs = self._maybe_transform_eq_ne(
+ node, lhs, rhs)
+ return self._maybe_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 00a3264e6c74a..2fab80c13781d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -7,7 +7,7 @@
import numpy as np
from pandas import compat, _np_version_under1p8
-from pandas.types.cast import _maybe_promote
+from pandas.types.cast import maybe_promote
from pandas.types.generic import ABCSeries, ABCIndex
from pandas.types.common import (is_unsigned_integer_dtype,
is_signed_integer_dtype,
@@ -1279,7 +1279,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
- dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
@@ -1362,7 +1362,7 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
- dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index af51c7f2e2dc1..0e58c18631588 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -10,8 +10,8 @@
from pandas.types.generic import ABCSeries, ABCIndexClass, ABCCategoricalIndex
from pandas.types.missing import isnull, notnull
-from pandas.types.cast import (_possibly_infer_to_datetimelike,
- _coerce_indexer_dtype)
+from pandas.types.cast import (maybe_infer_to_datetimelike,
+ coerce_indexer_dtype)
from pandas.types.dtypes import CategoricalDtype
from pandas.types.common import (_ensure_int64,
_ensure_object,
@@ -237,7 +237,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
if fastpath:
# fast path
- self._codes = _coerce_indexer_dtype(values, categories)
+ self._codes = coerce_indexer_dtype(values, categories)
self._categories = self._validate_categories(
categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
@@ -266,8 +266,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
# correctly no need here this is an issue because _sanitize_array
# also coerces np.nan to a string under certain versions of numpy
# as well
- values = _possibly_infer_to_datetimelike(values,
- convert_dates=True)
+ values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
@@ -324,7 +323,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
- self._codes = _coerce_indexer_dtype(codes, categories)
+ self._codes = coerce_indexer_dtype(codes, categories)
@property
def _constructor(self):
@@ -877,7 +876,7 @@ def add_categories(self, new_categories, inplace=False):
new_categories = list(self._categories) + list(new_categories)
cat = self if inplace else self.copy()
cat._categories = self._validate_categories(new_categories)
- cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
+ cat._codes = coerce_indexer_dtype(cat._codes, new_categories)
if not inplace:
return cat
@@ -961,7 +960,7 @@ def remove_unused_categories(self, inplace=False):
idx, inv = idx[1:], inv - 1
cat._categories = cat.categories.take(idx)
- cat._codes = _coerce_indexer_dtype(inv, self._categories)
+ cat._codes = coerce_indexer_dtype(inv, self._categories)
if not inplace:
return cat
@@ -1065,8 +1064,8 @@ def __setstate__(self, state):
state['_categories'] = self._validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
- state['_codes'] = _coerce_indexer_dtype(state.pop('labels'),
- state['_categories'])
+ state['_codes'] = coerce_indexer_dtype(
+ state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
@@ -2062,7 +2061,7 @@ def _get_codes_for_values(values, categories):
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
- return _coerce_indexer_dtype(t.lookup(vals), cats)
+ return coerce_indexer_dtype(t.lookup(vals), cats)
def _convert_to_list_like(list_like):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 732d88b47ae2a..25cdf4aed46d3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -23,15 +23,15 @@
import numpy as np
import numpy.ma as ma
-from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
- _possibly_cast_to_datetime,
- _possibly_infer_to_datetimelike,
- _possibly_convert_platform,
- _possibly_downcast_to_dtype,
- _invalidate_string_dtypes,
- _coerce_to_dtypes,
- _maybe_upcast_putmask,
- _find_common_type)
+from pandas.types.cast import (maybe_upcast, infer_dtype_from_scalar,
+ maybe_cast_to_datetime,
+ maybe_infer_to_datetimelike,
+ maybe_convert_platform,
+ maybe_downcast_to_dtype,
+ invalidate_string_dtypes,
+ coerce_to_dtypes,
+ maybe_upcast_putmask,
+ find_common_type)
from pandas.types.common import (is_categorical_dtype,
is_object_dtype,
is_extension_type,
@@ -275,7 +275,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
else:
mask = ma.getmaskarray(data)
if mask.any():
- data, fill_value = _maybe_upcast(data, copy=True)
+ data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
@@ -335,7 +335,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
- dtype, data = _infer_dtype_from_scalar(data)
+ dtype, data = infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
@@ -469,7 +469,7 @@ def _get_axes(N, K, index=index, columns=columns):
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
- values = _possibly_infer_to_datetimelike(values)
+ values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@@ -2359,7 +2359,7 @@ def select_dtypes(self, include=None, exclude=None):
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
- _invalidate_string_dtypes(dtypes)
+ invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
@@ -2659,7 +2659,7 @@ def reindexer(value):
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
- value = _possibly_convert_platform(value)
+ value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
@@ -2671,13 +2671,13 @@ def reindexer(value):
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
- value = _possibly_infer_to_datetimelike(value)
+ value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
- dtype, value = _infer_dtype_from_scalar(value)
+ dtype, value = infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
- value = _possibly_cast_to_datetime(value, dtype)
+ value = maybe_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
@@ -3000,8 +3000,8 @@ def _maybe_casted_values(index, labels=None):
else:
values = values.take(labels)
if mask.any():
- values, changed = _maybe_upcast_putmask(values, mask,
- np.nan)
+ values, changed = maybe_upcast_putmask(
+ values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
@@ -3722,7 +3722,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
- new_dtype = _find_common_type([this_dtype, other_dtype])
+ new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
@@ -3743,13 +3743,13 @@ def combine(self, other, func, fill_value=None, overwrite=True):
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
- # _possibly_cast_to_datetime but the change affects lot...
+ # _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
- arr = _possibly_cast_to_datetime(arr, new_dtype)
+ arr = maybe_cast_to_datetime(arr, new_dtype)
else:
- arr = _possibly_downcast_to_dtype(arr, this_dtype)
+ arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
@@ -5003,7 +5003,7 @@ def f(x):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
- result = _coerce_to_dtypes(result, self.dtypes)
+ result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
@@ -5505,7 +5505,7 @@ def _prep_ndarray(values, copy=True):
return np.empty((0, 0), dtype=object)
def convert(v):
- return _possibly_convert_platform(v)
+ return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
@@ -5601,7 +5601,7 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
- arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
+ arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
@@ -5699,7 +5699,7 @@ def _convert_object_array(content, columns, coerce_float=False, dtype=None):
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
- arr = _possibly_cast_to_datetime(arr, dtype)
+ arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1db9677659ca3..87052800b8fb5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -23,7 +23,7 @@
is_list_like,
is_dict_like,
is_re_compilable)
-from pandas.types.cast import _maybe_promote, _maybe_upcast_putmask
+from pandas.types.cast import maybe_promote, maybe_upcast_putmask
from pandas.types.missing import isnull, notnull
from pandas.types.generic import ABCSeries, ABCPanel
@@ -4956,10 +4956,10 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
# or not try_quick
if not try_quick:
- dtype, fill_value = _maybe_promote(other.dtype)
+ dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
- _maybe_upcast_putmask(new_other, icond, other)
+ maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 4095a14aa5970..0a63981290df3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -32,7 +32,7 @@
_ensure_object,
_ensure_categorical,
_ensure_float)
-from pandas.types.cast import _possibly_downcast_to_dtype
+from pandas.types.cast import maybe_downcast_to_dtype
from pandas.types.missing import isnull, notnull, _maybe_fill
from pandas.core.common import (_values_from_object, AbstractMethodError,
@@ -783,7 +783,7 @@ def _try_cast(self, result, obj, numeric_only=False):
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
- result = _possibly_downcast_to_dtype(result, dtype)
+ result = maybe_downcast_to_dtype(result, dtype)
return result
@@ -2914,7 +2914,7 @@ def transform(self, func, *args, **kwargs):
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
- result = _possibly_downcast_to_dtype(result, dtype)
+ result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6487c2108028e..8db801f8e7212 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -29,15 +29,15 @@
is_re_compilable,
is_scalar,
_get_dtype)
-from pandas.types.cast import (_possibly_downcast_to_dtype,
- _maybe_convert_string_to_object,
- _maybe_upcast,
- _maybe_convert_scalar, _maybe_promote,
- _infer_dtype_from_scalar,
- _soft_convert_objects,
- _possibly_convert_objects,
- _astype_nansafe,
- _find_common_type)
+from pandas.types.cast import (maybe_downcast_to_dtype,
+ maybe_convert_string_to_object,
+ maybe_upcast,
+ maybe_convert_scalar, maybe_promote,
+ infer_dtype_from_scalar,
+ soft_convert_objects,
+ maybe_convert_objects,
+ astype_nansafe,
+ find_common_type)
from pandas.types.missing import (isnull, array_equivalent,
_is_na_compat,
is_null_datelike_scalar)
@@ -429,7 +429,7 @@ def downcast(self, dtypes=None, mgr=None):
if dtypes is None:
dtypes = 'infer'
- nv = _possibly_downcast_to_dtype(values, dtypes)
+ nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv, fastpath=True)
# ndim > 1
@@ -455,7 +455,7 @@ def downcast(self, dtypes=None, mgr=None):
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
- nv = _possibly_downcast_to_dtype(values[i], dtype)
+ nv = maybe_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(self.make_block(nv, fastpath=True, placement=[rl]))
@@ -514,7 +514,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
- values = _astype_nansafe(values.ravel(), dtype, copy=True)
+ values = astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype,
@@ -578,7 +578,7 @@ def _try_cast_result(self, result, dtype=None):
return result
# may need to change the dtype here
- return _possibly_downcast_to_dtype(result, dtype)
+ return maybe_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
@@ -684,7 +684,7 @@ def setitem(self, indexer, value, mgr=None):
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
- dtype, _ = _maybe_promote(arr_value.dtype)
+ dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
@@ -758,7 +758,7 @@ def _is_empty_indexer(indexer):
value.dtype):
dtype = value.dtype
elif is_scalar(value):
- dtype, _ = _infer_dtype_from_scalar(value)
+ dtype, _ = infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
@@ -871,7 +871,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
n = np.array(new)
# type of the new block
- dtype, _ = _maybe_promote(n.dtype)
+ dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
@@ -1066,7 +1066,7 @@ def shift(self, periods, axis=0, mgr=None):
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
- new_values, fill_value = _maybe_upcast(self.values)
+ new_values, fill_value = maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
@@ -1250,8 +1250,8 @@ def where(self, other, cond, align=True, raise_on_error=True,
raise ValueError("where must have a condition that is ndarray "
"like")
- other = _maybe_convert_string_to_object(other)
- other = _maybe_convert_scalar(other)
+ other = maybe_convert_string_to_object(other)
+ other = maybe_convert_scalar(other)
# our where function
def func(cond, values, other):
@@ -1864,10 +1864,10 @@ def convert(self, *args, **kwargs):
new_style |= kw in kwargs
if new_style:
- fn = _soft_convert_objects
+ fn = soft_convert_objects
fn_inputs = new_inputs
else:
- fn = _possibly_convert_objects
+ fn = maybe_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
@@ -2643,7 +2643,7 @@ def shift(self, periods, axis=0, mgr=None):
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
- new_values, fill_value = _maybe_upcast(new_values)
+ new_values, fill_value = maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
@@ -3239,13 +3239,12 @@ def replace_list(self, src_list, dest_list, inplace=False, regex=False,
def comp(s):
if isnull(s):
return isnull(values)
- return _possibly_compare(values, getattr(s, 'asm8', s),
- operator.eq)
+ return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq)
def _cast_scalar(block, scalar):
- dtype, val = _infer_dtype_from_scalar(scalar, pandas_dtype=True)
+ dtype, val = infer_dtype_from_scalar(scalar, pandas_dtype=True)
if not is_dtype_equal(block.dtype, dtype):
- dtype = _find_common_type([block.dtype, dtype])
+ dtype = find_common_type([block.dtype, dtype])
block = block.astype(dtype)
# use original value
val = scalar
@@ -3920,7 +3919,7 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
- _, fill_value = _maybe_promote(blk.dtype)
+ _, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
@@ -3978,7 +3977,7 @@ def _make_na_block(self, placement, fill_value=None):
block_shape = list(self.shape)
block_shape[0] = len(placement)
- dtype, fill_value = _infer_dtype_from_scalar(fill_value)
+ dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
@@ -4497,7 +4496,7 @@ def _interleaved_dtype(blocks):
if not len(blocks):
return None
- dtype = _find_common_type([b.dtype for b in blocks])
+ dtype = find_common_type([b.dtype for b in blocks])
# only numpy compat
if isinstance(dtype, ExtensionDtype):
@@ -4587,7 +4586,7 @@ def _vstack(to_stack, dtype):
return np.vstack(to_stack)
-def _possibly_compare(a, b, op):
+def _maybe_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
@@ -4637,7 +4636,7 @@ def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
- dtype, fill_value = _maybe_promote(values.dtype)
+ dtype, fill_value = maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
@@ -4786,7 +4785,7 @@ def _putmask_smart(v, m, n):
pass
# change the dtype
- dtype, _ = _maybe_promote(n.dtype)
+ dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
nv = v.get_values(dtype)
@@ -5142,8 +5141,8 @@ def dtype(self):
if not self.needs_filling:
return self.block.dtype
else:
- return _get_dtype(_maybe_promote(self.block.dtype,
- self.block.fill_value)[0])
+ return _get_dtype(maybe_promote(self.block.dtype,
+ self.block.fill_value)[0])
return self._dtype
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index bb6c9b4546d0f..6ec94e69740a2 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -20,7 +20,7 @@
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
-from pandas.types.cast import _int64_max, _maybe_upcast_putmask
+from pandas.types.cast import _int64_max, maybe_upcast_putmask
from pandas.types.missing import isnull, notnull
from pandas.core.common import _values_from_object
@@ -200,7 +200,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
# promote if needed
else:
- values, changed = _maybe_upcast_putmask(values, mask, fill_value)
+ values, changed = maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index fe83f8a352851..5dac8a7e4d2da 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -33,7 +33,7 @@
is_list_like,
is_scalar,
_ensure_object)
-from pandas.types.cast import _maybe_upcast_putmask, _find_common_type
+from pandas.types.cast import maybe_upcast_putmask, find_common_type
from pandas.types.generic import ABCSeries, ABCIndex, ABCPeriodIndex
# -----------------------------------------------------------------------------
@@ -657,7 +657,7 @@ def na_op(x, y):
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
- dtype = _find_common_type([x.dtype, y.dtype])
+ dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
@@ -670,7 +670,7 @@ def na_op(x, y):
"{op}".format(typ=type(x).__name__,
op=str_rep))
- result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
+ result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
@@ -1204,7 +1204,7 @@ def na_op(x, y):
"objects of type {x} and {y}".format(
op=name, x=type(x), y=type(y)))
- result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
+ result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
@@ -1329,7 +1329,7 @@ def na_op(x, y):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
- result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
+ result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 5c7b66a2d1356..50ddc24ac9656 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -6,8 +6,8 @@
import numpy as np
-from pandas.types.cast import (_infer_dtype_from_scalar,
- _possibly_cast_item)
+from pandas.types.cast import (infer_dtype_from_scalar,
+ maybe_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.types.missing import notnull
@@ -165,7 +165,7 @@ def _init_data(self, data, copy, dtype, **kwargs):
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
- dtype, data = _infer_dtype_from_scalar(data)
+ dtype, data = infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
@@ -533,11 +533,11 @@ def set_value(self, *args, **kwargs):
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
- likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
+ likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
- _possibly_cast_item(result, args[0], likely_dtype)
+ maybe_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
@@ -568,7 +568,7 @@ def __setitem__(self, key, value):
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
- dtype, value = _infer_dtype_from_scalar(value)
+ dtype, value = infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 1e685ae6895ad..2822d98b7c906 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -10,7 +10,7 @@
from pandas.types.common import (_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion)
-from pandas.types.cast import _maybe_promote
+from pandas.types.cast import maybe_promote
from pandas.types.missing import notnull
import pandas.types.concat as _concat
@@ -202,7 +202,7 @@ def get_new_values(self):
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
- dtype, fill_value = _maybe_promote(values.dtype, self.fill_value)
+ dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4c51ced1845fe..0913592e055cd 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -29,9 +29,9 @@
_is_unorderable_exception,
_ensure_platform_int)
from pandas.types.generic import ABCSparseArray, ABCDataFrame
-from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
- _possibly_convert_platform,
- _possibly_cast_to_datetime, _possibly_castable)
+from pandas.types.cast import (maybe_upcast, infer_dtype_from_scalar,
+ maybe_convert_platform,
+ maybe_cast_to_datetime, maybe_castable)
from pandas.types.missing import isnull, notnull
from pandas.core.common import (is_bool_indexer,
@@ -2794,7 +2794,7 @@ def _sanitize_array(data, index, dtype=None, copy=False,
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
- data, fill_value = _maybe_upcast(data, copy=True)
+ data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
@@ -2803,11 +2803,11 @@ def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
- if _possibly_castable(arr) and not copy and dtype is None:
+ if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
- subarr = _possibly_cast_to_datetime(arr, dtype)
+ subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
@@ -2863,9 +2863,9 @@ def _try_cast(arr, take_fast_path):
subarr = lib.maybe_convert_objects(subarr)
else:
- subarr = _possibly_convert_platform(data)
+ subarr = maybe_convert_platform(data)
- subarr = _possibly_cast_to_datetime(subarr, dtype)
+ subarr = maybe_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
@@ -2894,10 +2894,10 @@ def create_from_value(value, index, dtype):
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
- dtype, value = _infer_dtype_from_scalar(value)
+ dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
- value = _possibly_cast_to_datetime(value, dtype)
+ value = maybe_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py
index d262ecd818f1d..54f73a2466286 100644
--- a/pandas/indexes/base.py
+++ b/pandas/indexes/base.py
@@ -2445,7 +2445,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
- pself, ptarget = self._possibly_promote(target)
+ pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
@@ -2572,7 +2572,7 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance):
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
- pself, ptarget = self._possibly_promote(target)
+ pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
@@ -2595,7 +2595,7 @@ def get_indexer_for(self, target, **kwargs):
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
- def _possibly_promote(self, other):
+ def _maybe_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
diff --git a/pandas/indexes/frozen.py b/pandas/indexes/frozen.py
index e043ba64bbad7..97a1a3ea99e65 100644
--- a/pandas/indexes/frozen.py
+++ b/pandas/indexes/frozen.py
@@ -10,7 +10,7 @@
import numpy as np
from pandas.core.base import PandasObject
-from pandas.types.cast import _coerce_indexer_dtype
+from pandas.types.cast import coerce_indexer_dtype
from pandas.formats.printing import pprint_thing
@@ -119,7 +119,7 @@ def __unicode__(self):
def _ensure_frozen(array_like, categories, copy=False):
- array_like = _coerce_indexer_dtype(array_like, categories)
+ array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 18343670fb39e..461006b5cd2fa 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -21,7 +21,7 @@
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
from pandas.types.missing import isnull
-from pandas.types.cast import _astype_nansafe
+from pandas.types.cast import astype_nansafe
from pandas.core.index import Index, MultiIndex, RangeIndex
from pandas.core.series import Series
from pandas.core.frame import DataFrame
@@ -1486,11 +1486,11 @@ def _cast_types(self, values, cast_type, column):
# c-parser which parses all categories
# as strings
if not is_object_dtype(values):
- values = _astype_nansafe(values, str)
+ values = astype_nansafe(values, str)
values = Categorical(values)
else:
try:
- values = _astype_nansafe(values, cast_type, copy=True)
+ values = astype_nansafe(values, cast_type, copy=True)
except ValueError:
raise ValueError("Unable to convert column %s to "
"type %s" % (column, cast_type))
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index 5f4c07971d37e..f149e724c19c3 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -22,8 +22,8 @@
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
-from pandas.types.cast import (_possibly_convert_platform, _maybe_promote,
- _astype_nansafe, _find_common_type)
+from pandas.types.cast import (maybe_convert_platform, maybe_promote,
+ astype_nansafe, find_common_type)
from pandas.types.missing import isnull, notnull, na_value_for_dtype
from pandas.sparse import libsparse as splib
@@ -93,7 +93,7 @@ def _sparse_array_op(left, right, op, name, series=False):
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
- dtype = _find_common_type([left.dtype, right.dtype])
+ dtype = find_common_type([left.dtype, right.dtype])
left = left.astype(dtype)
right = right.astype(dtype)
else:
@@ -370,7 +370,7 @@ def fill_value(self, value):
if not is_scalar(value):
raise ValueError('fill_value must be a scalar')
# if the specified value triggers type promotion, raise ValueError
- new_dtype, fill_value = _maybe_promote(self.dtype, value)
+ new_dtype, fill_value = maybe_promote(self.dtype, value)
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
@@ -532,7 +532,7 @@ def __setslice__(self, i, j, value):
def astype(self, dtype=None, copy=True):
dtype = np.dtype(dtype)
- sp_values = _astype_nansafe(self.sp_values, dtype, copy=copy)
+ sp_values = astype_nansafe(self.sp_values, dtype, copy=copy)
try:
if is_bool_dtype(dtype):
# to avoid np.bool_ dtype
@@ -736,7 +736,7 @@ def _sanitize_values(arr):
pass
elif is_list_like(arr) and len(arr) > 0:
- arr = _possibly_convert_platform(arr)
+ arr = maybe_convert_platform(arr)
else:
arr = np.asarray(arr)
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index a21f64f524a0a..41f301f263374 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -11,7 +11,7 @@
import numpy as np
from pandas.types.missing import isnull, notnull
-from pandas.types.cast import _maybe_upcast, _find_common_type
+from pandas.types.cast import maybe_upcast, find_common_type
from pandas.types.common import _ensure_platform_int, is_scipy_sparse
from pandas.core.common import _try_sort
@@ -250,7 +250,7 @@ def to_coo(self):
except ImportError:
raise ImportError('Scipy is not installed')
- dtype = _find_common_type(self.dtypes)
+ dtype = find_common_type(self.dtypes)
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
@@ -635,7 +635,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
- new, fill_value = _maybe_upcast(new, fill_value=fill_value)
+ new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py
index d7b086daea1e3..dd4ea3bb02be9 100644
--- a/pandas/tests/types/test_cast.py
+++ b/pandas/tests/types/test_cast.py
@@ -9,33 +9,33 @@
import numpy as np
from pandas import Timedelta, Timestamp, DatetimeIndex
-from pandas.types.cast import (_possibly_downcast_to_dtype,
- _possibly_convert_objects,
- _infer_dtype_from_scalar,
- _maybe_convert_string_to_object,
- _maybe_convert_scalar,
- _find_common_type)
+from pandas.types.cast import (maybe_downcast_to_dtype,
+ maybe_convert_objects,
+ infer_dtype_from_scalar,
+ maybe_convert_string_to_object,
+ maybe_convert_scalar,
+ find_common_type)
from pandas.types.dtypes import (CategoricalDtype,
DatetimeTZDtype, PeriodDtype)
from pandas.util import testing as tm
-class TestPossiblyDowncast(tm.TestCase):
+class TestMaybeDowncast(tm.TestCase):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
- result = _possibly_downcast_to_dtype(arr, 'infer')
+ result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
- result = _possibly_downcast_to_dtype(arr, 'infer')
+ result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
- result = _possibly_downcast_to_dtype(arr, 'infer')
+ result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
@@ -44,41 +44,41 @@ def test_downcast_conv(self):
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
- result = _possibly_downcast_to_dtype(arr, 'infer')
+ result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
- result = _possibly_downcast_to_dtype(arr, 'infer')
+ result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
- result = _possibly_downcast_to_dtype(arr, 'int64')
+ result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
- res = _possibly_downcast_to_dtype(arr, 'datetime64[ns]')
+ res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
- res = _possibly_downcast_to_dtype(arr, 'timedelta64[ns]')
+ res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
- res = _possibly_downcast_to_dtype(exp, exp.dtype)
+ res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
- res = _possibly_downcast_to_dtype(exp.asi8, exp.dtype)
+ res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
@@ -91,121 +91,121 @@ def test_infer_dtype_from_scalar(self):
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, type(data))
data = 12
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.int64)
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, dtypec)
data = np.float(12)
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.float64)
for data in [True, False]:
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.bool_)
for data in [np.complex64(1), np.complex128(1)]:
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.complex_)
import datetime
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime.datetime(2000, 1, 1, 0, 0)]:
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, 'M8[ns]')
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
datetime.timedelta(1)]:
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, 'm8[ns]')
for data in [datetime.date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
- dtype, val = _infer_dtype_from_scalar(data)
+ dtype, val = infer_dtype_from_scalar(data)
self.assertEqual(dtype, np.object_)
class TestMaybe(tm.TestCase):
def test_maybe_convert_string_to_array(self):
- result = _maybe_convert_string_to_object('x')
+ result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
self.assertTrue(result.dtype == object)
- result = _maybe_convert_string_to_object(1)
+ result = maybe_convert_string_to_object(1)
self.assertEqual(result, 1)
arr = np.array(['x', 'y'], dtype=str)
- result = _maybe_convert_string_to_object(arr)
+ result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# unicode
arr = np.array(['x', 'y']).astype('U')
- result = _maybe_convert_string_to_object(arr)
+ result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# object
arr = np.array(['x', 2], dtype=object)
- result = _maybe_convert_string_to_object(arr)
+ result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
self.assertTrue(result.dtype == object)
def test_maybe_convert_scalar(self):
# pass thru
- result = _maybe_convert_scalar('x')
+ result = maybe_convert_scalar('x')
self.assertEqual(result, 'x')
- result = _maybe_convert_scalar(np.array([1]))
+ result = maybe_convert_scalar(np.array([1]))
self.assertEqual(result, np.array([1]))
# leave scalar dtype
- result = _maybe_convert_scalar(np.int64(1))
+ result = maybe_convert_scalar(np.int64(1))
self.assertEqual(result, np.int64(1))
- result = _maybe_convert_scalar(np.int32(1))
+ result = maybe_convert_scalar(np.int32(1))
self.assertEqual(result, np.int32(1))
- result = _maybe_convert_scalar(np.float32(1))
+ result = maybe_convert_scalar(np.float32(1))
self.assertEqual(result, np.float32(1))
- result = _maybe_convert_scalar(np.int64(1))
+ result = maybe_convert_scalar(np.int64(1))
self.assertEqual(result, np.float64(1))
# coerce
- result = _maybe_convert_scalar(1)
+ result = maybe_convert_scalar(1)
self.assertEqual(result, np.int64(1))
- result = _maybe_convert_scalar(1.0)
+ result = maybe_convert_scalar(1.0)
self.assertEqual(result, np.float64(1))
- result = _maybe_convert_scalar(Timestamp('20130101'))
+ result = maybe_convert_scalar(Timestamp('20130101'))
self.assertEqual(result, Timestamp('20130101').value)
- result = _maybe_convert_scalar(datetime(2013, 1, 1))
+ result = maybe_convert_scalar(datetime(2013, 1, 1))
self.assertEqual(result, Timestamp('20130101').value)
- result = _maybe_convert_scalar(Timedelta('1 day 1 min'))
+ result = maybe_convert_scalar(Timedelta('1 day 1 min'))
self.assertEqual(result, Timedelta('1 day 1 min').value)
class TestConvert(tm.TestCase):
- def test_possibly_convert_objects_copy(self):
+ def test_maybe_convert_objects_copy(self):
values = np.array([1, 2])
- out = _possibly_convert_objects(values, copy=False)
+ out = maybe_convert_objects(values, copy=False)
self.assertTrue(values is out)
- out = _possibly_convert_objects(values, copy=True)
+ out = maybe_convert_objects(values, copy=True)
self.assertTrue(values is not out)
values = np.array(['apply', 'banana'])
- out = _possibly_convert_objects(values, copy=False)
+ out = maybe_convert_objects(values, copy=False)
self.assertTrue(values is out)
- out = _possibly_convert_objects(values, copy=True)
+ out = maybe_convert_objects(values, copy=True)
self.assertTrue(values is not out)
@@ -267,34 +267,34 @@ def test_numpy_dtypes(self):
((np.dtype('datetime64[ns]'), np.int64), np.object)
)
for src, common in testcases:
- self.assertEqual(_find_common_type(src), common)
+ self.assertEqual(find_common_type(src), common)
with tm.assertRaises(ValueError):
# empty
- _find_common_type([])
+ find_common_type([])
def test_categorical_dtype(self):
dtype = CategoricalDtype()
- self.assertEqual(_find_common_type([dtype]), 'category')
- self.assertEqual(_find_common_type([dtype, dtype]), 'category')
- self.assertEqual(_find_common_type([np.object, dtype]), np.object)
+ self.assertEqual(find_common_type([dtype]), 'category')
+ self.assertEqual(find_common_type([dtype, dtype]), 'category')
+ self.assertEqual(find_common_type([np.object, dtype]), np.object)
def test_datetimetz_dtype(self):
dtype = DatetimeTZDtype(unit='ns', tz='US/Eastern')
- self.assertEqual(_find_common_type([dtype, dtype]),
+ self.assertEqual(find_common_type([dtype, dtype]),
'datetime64[ns, US/Eastern]')
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
- self.assertEqual(_find_common_type([dtype, dtype2]), np.object)
- self.assertEqual(_find_common_type([dtype2, dtype]), np.object)
+ self.assertEqual(find_common_type([dtype, dtype2]), np.object)
+ self.assertEqual(find_common_type([dtype2, dtype]), np.object)
def test_period_dtype(self):
dtype = PeriodDtype(freq='D')
- self.assertEqual(_find_common_type([dtype, dtype]), 'period[D]')
+ self.assertEqual(find_common_type([dtype, dtype]), 'period[D]')
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
PeriodDtype(freq='2D'), PeriodDtype(freq='H'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
- self.assertEqual(_find_common_type([dtype, dtype2]), np.object)
- self.assertEqual(_find_common_type([dtype2, dtype]), np.object)
+ self.assertEqual(find_common_type([dtype, dtype2]), np.object)
+ self.assertEqual(find_common_type([dtype2, dtype]), np.object)
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index bf78a9dfb65cc..263d2f16a4216 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -9,7 +9,7 @@
is_decimal,
is_scalar as isscalar)
-from pandas.types.cast import _possibly_downcast_to_dtype
+from pandas.types.cast import maybe_downcast_to_dtype
import pandas as pd
from pandas.compat import reduce
@@ -226,8 +226,7 @@ def to_numeric(arg, errors='raise', downcast=None):
# from smallest to largest
for dtype in typecodes:
if np.dtype(dtype).itemsize <= values.dtype.itemsize:
- values = _possibly_downcast_to_dtype(
- values, dtype)
+ values = maybe_downcast_to_dtype(values, dtype)
# successful conversion
if values.dtype == dtype:
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index f80618ef34373..983c1a4cd9de9 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1329,7 +1329,7 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
- def _possibly_promote(self, other):
+ def _maybe_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index f47d80a31b174..13d844bb6a399 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -623,7 +623,7 @@ def intersection(self, other):
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
- def _possibly_promote(self, other):
+ def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
diff --git a/pandas/types/cast.py b/pandas/types/cast.py
index 0e26cd085db5a..91c7d287d6d46 100644
--- a/pandas/types/cast.py
+++ b/pandas/types/cast.py
@@ -32,7 +32,7 @@
_int64_max = np.iinfo(np.int64).max
-def _possibly_convert_platform(values):
+def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
@@ -45,7 +45,7 @@ def _possibly_convert_platform(values):
return values
-def _possibly_downcast_to_dtype(result, dtype):
+def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
@@ -142,7 +142,7 @@ def trans(x): # noqa
return result
-def _maybe_upcast_putmask(result, mask, other):
+def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result
@@ -193,7 +193,7 @@ def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
- r, _ = _maybe_upcast(result, fill_value=other, copy=True)
+ r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
@@ -203,7 +203,7 @@ def changeit():
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
- new_dtype, _ = _maybe_promote(result.dtype, other)
+ new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
@@ -227,7 +227,7 @@ def changeit():
return result, False
-def _maybe_promote(dtype, fill_value=np.nan):
+def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
@@ -312,7 +312,7 @@ def _maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
-def _infer_dtype_from_scalar(val, pandas_dtype=False):
+def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
@@ -387,7 +387,7 @@ def _infer_dtype_from_scalar(val, pandas_dtype=False):
return dtype, val
-def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
+def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
Parameters
@@ -404,7 +404,7 @@ def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
else:
if dtype is None:
dtype = values.dtype
- new_dtype, fill_value = _maybe_promote(dtype, fill_value)
+ new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
@@ -413,7 +413,7 @@ def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
return values, fill_value
-def _possibly_cast_item(obj, item, dtype):
+def maybe_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
@@ -423,7 +423,7 @@ def _possibly_cast_item(obj, item, dtype):
raise ValueError("Unexpected dtype encountered: %s" % dtype)
-def _invalidate_string_dtypes(dtype_set):
+def invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
@@ -432,7 +432,7 @@ def _invalidate_string_dtypes(dtype_set):
raise TypeError("string dtypes are not allowed, use 'object' instead")
-def _maybe_convert_string_to_object(values):
+def maybe_convert_string_to_object(values):
"""
Convert string-like and string-like array to convert object dtype.
@@ -446,13 +446,13 @@ def _maybe_convert_string_to_object(values):
return values
-def _maybe_convert_scalar(values):
+def maybe_convert_scalar(values):
"""
Convert a python scalar to the appropriate numpy dtype if possible
This avoids numpy directly converting according to platform preferences
"""
if is_scalar(values):
- dtype, values = _infer_dtype_from_scalar(values)
+ dtype, values = infer_dtype_from_scalar(values)
try:
values = dtype(values)
except TypeError:
@@ -460,7 +460,7 @@ def _maybe_convert_scalar(values):
return values
-def _coerce_indexer_dtype(indexer, categories):
+def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
l = len(categories)
if l < _int8_max:
@@ -472,7 +472,7 @@ def _coerce_indexer_dtype(indexer, categories):
return _ensure_int64(indexer)
-def _coerce_to_dtypes(result, dtypes):
+def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
@@ -507,7 +507,7 @@ def conv(r, dtype):
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
-def _astype_nansafe(arr, dtype, copy=True):
+def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
@@ -564,8 +564,8 @@ def _astype_nansafe(arr, dtype, copy=True):
return arr.view(dtype)
-def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
- convert_timedeltas=True, copy=True):
+def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
+ convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
@@ -579,8 +579,8 @@ def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
- new_values = _possibly_cast_to_datetime(values, 'M8[ns]',
- errors='coerce')
+ new_values = maybe_cast_to_datetime(
+ values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isnull(new_values).all():
@@ -627,8 +627,8 @@ def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
return values
-def _soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
- coerce=False, copy=True):
+def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
+ coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
@@ -683,7 +683,7 @@ def _soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
return values
-def _possibly_castable(arr):
+def maybe_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
@@ -695,7 +695,7 @@ def _possibly_castable(arr):
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
-def _possibly_infer_to_datetimelike(value, convert_dates=False):
+def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
@@ -788,7 +788,7 @@ def _try_timedelta(v):
return value
-def _possibly_cast_to_datetime(value, dtype, errors='raise'):
+def maybe_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
@@ -886,12 +886,12 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
- value = _possibly_infer_to_datetimelike(value)
+ value = maybe_infer_to_datetimelike(value)
return value
-def _find_common_type(types):
+def find_common_type(types):
"""
Find a common data type among the given dtypes.
| Also rename "private" functions in pandas.type.cast
- [x] closes #15764
- [x] tests passed
- [x] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff`` | https://api.github.com/repos/pandas-dev/pandas/pulls/15771 | 2017-03-21T19:42:54Z | 2017-03-22T11:57:12Z | null | 2017-03-22T12:24:35Z |
DOC: Ensure basic flake8 diff checks only Python | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 918d427ee4f4c..9281c51059087 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,4 +1,4 @@
- [ ] closes #xxxx
- [ ] tests added / passed
- - [ ] passes ``git diff upstream/master | flake8 --diff``
+ - [ ] passes ``git diff upstream/master --name-only -- '*.py' | flake8 --diff``
- [ ] whatsnew entry
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 7ad5916a8809d..5e551a7fd5349 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -518,7 +518,7 @@ Travis-CI will run the `flake8 <http://pypi.python.org/pypi/flake8>`_ tool
and report any stylistic errors in your code. Therefore, it is helpful before
submitting code to run the check yourself on the diff::
- git diff master | flake8 --diff
+ git diff master --name-only -- '*.py' | flake8 --diff
This command will catch any stylistic errors in your changes specifically, but
be beware it may not catch all of them. For example, if you delete the only
| Let's make sure all of the `flake8` commands in our documentation check only Python files (will be applicable to majority of contributions and PR's).
Follow-up to #15749 | https://api.github.com/repos/pandas-dev/pandas/pulls/15769 | 2017-03-21T17:56:49Z | 2017-03-21T18:00:11Z | 2017-03-21T18:00:11Z | 2017-03-21T18:00:58Z |
BUG: Enforce correct encoding in stata | diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index e0d15c218ec85..b57b6a3898fd4 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -917,6 +917,8 @@ Bug Fixes
- Avoid use of ``np.finfo()`` during ``import pandas`` removed to mitigate deadlock on Python GIL misuse (:issue:`14641`)
- Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`)
+- Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`)
+
- Bug in ``pd.concat()`` in which concatting with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
- Bug in ``groupby.agg()`` incorrectly localizing timezone on ``datetime`` (:issue:`15426`, :issue:`10668`, :issue:`13046`)
@@ -931,3 +933,4 @@ Bug Fixes
- Bug in ``pd.melt()`` where passing a tuple value for ``value_vars`` caused a ``TypeError`` (:issue:`15348`)
- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`)
- Bug in ``pd.read_msgpack`` which did not allow to load dataframe with an index of type ``CategoricalIndex`` (:issue:`15487`)
+
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index af4bc6a6b7ddb..1d2951da68086 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -33,6 +33,9 @@
from pandas._libs.lib import max_len_string_array, infer_dtype
from pandas._libs.tslib import NaT, Timestamp
+VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
+ 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
+
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
@@ -45,7 +48,7 @@
_encoding_params = """\
encoding : string, None or encoding
- Encoding used to parse the files. None defaults to iso-8859-1."""
+ Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index : identifier of index column
@@ -816,9 +819,14 @@ def get_base_missing_value(cls, dtype):
class StataParser(object):
- _default_encoding = 'iso-8859-1'
+ _default_encoding = 'latin-1'
def __init__(self, encoding):
+ if encoding is not None:
+ if encoding not in VALID_ENCODINGS:
+ raise ValueError('Unknown encoding. Only latin-1 and ascii '
+ 'supported.')
+
self._encoding = encoding
# type code.
@@ -936,7 +944,7 @@ def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
- encoding='iso-8859-1', chunksize=None):
+ encoding='latin-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
@@ -949,6 +957,10 @@ def __init__(self, path_or_buf, convert_dates=True,
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
+ if encoding is not None:
+ if encoding not in VALID_ENCODINGS:
+ raise ValueError('Unknown encoding. Only latin-1 and ascii '
+ 'supported.')
self._encoding = encoding
self._chunksize = chunksize
@@ -1362,7 +1374,8 @@ def _read_value_labels(self):
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
- self.GSO = {0: ''}
+ # Wrap v_o in a string to allow uint64 values as keys on 32bit OS
+ self.GSO = {'0': ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
@@ -1387,7 +1400,8 @@ def _read_strls(self):
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
- self.GSO[v_o] = va
+ # Wrap v_o in a string to allow uint64 values as keys on 32bit OS
+ self.GSO[str(v_o)] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
@@ -1623,7 +1637,8 @@ def _insert_strls(self, data):
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
- data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
+ # Wrap v_o in a string to allow uint64 values as keys on 32bit OS
+ data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
@@ -1855,7 +1870,7 @@ class StataWriter(StataParser):
write_index : bool
Write the index to Stata dataset.
encoding : str
- Default is latin-1. Unicode is not supported
+ Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 5188adf54b887..064f7de971919 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1276,3 +1276,10 @@ def test_out_of_range_float(self):
original.to_stata(path)
tm.assertTrue('ColumnTooBig' in cm.exception)
tm.assertTrue('infinity' in cm.exception)
+
+ # GH15723, validate encoding
+ def test_invalid_encoding(self):
+ original = self.read_csv(self.csv3)
+ with tm.assertRaises(ValueError):
+ with tm.ensure_clean() as path:
+ original.to_stata(path, encoding='utf-8')
| Ensure StataReader and StataWriter have the correct encoding.
Standardized default encoding to 'latin-1'
closes #15723
- [x] closes #15723
- [x] tests added / passed
- [x] passes ``git diff upstream/master | flake8 --diff``
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/15768 | 2017-03-21T17:14:27Z | 2017-03-21T21:54:22Z | null | 2018-04-22T21:12:04Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.