title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: cleanup up platform / python version checks. fix GB10151 | diff --git a/bench/bench_sparse.py b/bench/bench_sparse.py
index 7dc2db05cfe20..0aa705118d970 100644
--- a/bench/bench_sparse.py
+++ b/bench/bench_sparse.py
@@ -1,4 +1,3 @@
-import sys
import numpy as np
from pandas import *
@@ -30,7 +29,7 @@
s1_dense = s1.to_dense()
s2_dense = s2.to_dense()
-if 'linux' in sys.platform:
+if compat.is_platform_linux():
pth = '/home/wesm/code/pandas/example'
else:
pth = '/Users/wesm/code/pandas/example'
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 8f82e2eaea711..61bc40e34b5a3 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -163,9 +163,7 @@ def test_floor_division(self):
self.check_floor_division(lhs, '//', rhs)
def test_pow(self):
- import platform
- if platform.system() == 'Windows':
- raise nose.SkipTest('not testing pow on Windows')
+ tm._skip_if_windows()
# odd failure on win32 platform, so skip
for lhs, rhs in product(self.lhses, self.rhses):
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index fcd5515419537..303ecdbf0ec6e 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -36,7 +36,7 @@ def _skip_if_python_ver(skip_major, skip_minor=None):
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
-json_unicode = (json.dumps if sys.version_info[0] >= 3
+json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 3dae9f383db8f..e6aee76df4e74 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -163,7 +163,7 @@ def test_empty_string(self):
def test_read_csv(self):
if not compat.PY3:
- if 'win' in sys.platform:
+ if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index e9c39127d1032..ace3e4c5e18dd 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2067,9 +2067,8 @@ def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
- import platform
- if platform.system() == "Windows":
- raise nose.SkipTest("timezone setting not supported on windows")
+ # timezone setting not supported on windows
+ tm._skip_if_windows()
import datetime
import time
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 9ac7083289461..f7121fa54a5b1 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -2,7 +2,6 @@
import collections
from datetime import datetime
import re
-import sys
import nose
from nose.tools import assert_equal
@@ -447,7 +446,7 @@ def __hash__(self):
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
- if sys.version_info[0] == 2:
+ if compat.PY2:
class OldStyleClass():
pass
c = OldStyleClass()
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index b38dba5008905..b733cacc01e05 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -990,7 +990,7 @@ def test_to_html_truncate(self):
</table>
<p>20 rows × 20 columns</p>
</div>'''.format(div_style)
- if sys.version_info[0] < 3:
+ if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
@@ -1106,7 +1106,7 @@ def test_to_html_truncate_multi_index(self):
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
- if sys.version_info[0] < 3:
+ if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
@@ -1216,7 +1216,7 @@ def test_to_html_truncate_multi_index_sparse_off(self):
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
- if sys.version_info[0] < 3:
+ if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index a69db34bdd2df..32f20337c109a 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1022,7 +1022,7 @@ def test_format(self):
# windows has different precision on datetime.datetime.now (it doesn't include us
# since the default for Timestamp shows these but Index formating does not
# we are skipping
- if not is_platform_windows:
+ if not is_platform_windows():
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py
index 9480ea7ee5bf8..1adf47e946a96 100644
--- a/pandas/tools/tests/test_util.py
+++ b/pandas/tools/tests/test_util.py
@@ -43,8 +43,7 @@ def setUpClass(cls):
if not cls.locales:
raise nose.SkipTest("No locales found")
- if os.name == 'nt': # we're on windows
- raise nose.SkipTest("Running on Windows")
+ tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index ee30b188bce4d..86e0f7162c545 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -1,7 +1,6 @@
from datetime import datetime
from pandas.compat import range
import nose
-import sys
import numpy as np
from pandas.core.index import Index
@@ -16,11 +15,6 @@
import pandas.util.testing as tm
-def _skip_if_windows_python_3():
- if sys.version_info > (3,) and sys.platform == 'win32':
- raise nose.SkipTest("not used on python 3/win32")
-
-
def eq_gen_range(kwargs, expected):
rng = generate_range(**kwargs)
assert(np.array_equal(list(rng), expected))
@@ -459,7 +453,7 @@ def test_month_range_union_tz_pytz(self):
early_dr.union(late_dr)
def test_month_range_union_tz_dateutil(self):
- _skip_if_windows_python_3()
+ tm._skip_if_windows_python_3()
tm._skip_if_no_dateutil()
from pandas.tslib import _dateutil_gettz as timezone
tz = timezone('US/Eastern')
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 48fe84ccfb0d5..d167982b5b0bd 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -46,14 +46,6 @@ def _skip_if_has_locale():
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
-def _skip_if_windows_python_3():
- if sys.version_info > (3,) and sys.platform == 'win32':
- raise nose.SkipTest("not used on python 3/win32")
-
-def _skip_if_not_windows_python_3():
- if sys.version_info < (3,) or sys.platform != 'win32':
- raise nose.SkipTest("only run on python 3/win32")
-
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
@@ -417,7 +409,7 @@ def test_timestamp_to_datetime_explicit_pytz(self):
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_timestamp_to_datetime_explicit_dateutil(self):
- _skip_if_windows_python_3()
+ tm._skip_if_windows_python_3()
tm._skip_if_no_dateutil()
from pandas.tslib import _dateutil_gettz as gettz
rng = date_range('20090415', '20090519',
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index c5107046a3f1c..aa655efc08ca5 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -1,7 +1,5 @@
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta, tzinfo, date
-import sys
-import os
import nose
import numpy as np
@@ -837,8 +835,8 @@ def localize(self, tz, x):
return x.replace(tzinfo=tz)
def test_utc_with_system_utc(self):
- if sys.platform == 'win32':
- raise nose.SkipTest('Skipped on win32 due to dateutil bug.')
+ # Skipped on win32 due to dateutil bug
+ tm._skip_if_windows()
from pandas.tslib import maybe_get_tz
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 8bb6741f73475..3096bddcf6247 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -28,7 +28,7 @@
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
- raise_with_traceback, httplib
+ raise_with_traceback, httplib, is_platform_windows
)
from pandas.computation import expressions as expr
@@ -223,6 +223,17 @@ def _skip_if_no_dateutil():
raise nose.SkipTest("dateutil not installed")
+def _skip_if_windows_python_3():
+ if compat.PY3 and is_platform_windows():
+ import nose
+ raise nose.SkipTest("not used on python 3/win32")
+
+def _skip_if_windows():
+ if is_platform_windows():
+ import nose
+ raise nose.SkipTest("Running on Windows")
+
+
def _skip_if_no_cday():
from pandas.core.datetools import cday
if cday is None:
| Notes:
- I did not use `is_platform_xxx` on `pandas/util/clipboard.py` because this module is a fork. So I guess better not deviate from the original, right?
- I added the helper `_skip_if_windows` function in `pandas/util/testing.py`, I added an underscore before `skip` to maintain consistency with other `skip` functions (although none of them should have this underscore...)
> I think we could use an update on the wiki on the same
I didnt find any references on the wiki or docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10542 | 2015-07-10T20:04:16Z | 2015-07-11T16:47:31Z | 2015-07-11T16:47:31Z | 2015-07-11T20:07:32Z |
ENH: Update exception message to resolve #10515 | diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index b28e0587264d4..5baef2e4f0225 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -1074,7 +1074,8 @@ cdef class TextReader:
na_filter, na_hashset)
if user_dtype and na_count is not None:
if na_count > 0:
- raise Exception('Integer column has NA values')
+ raise Exception("Integer column has NA values in "
+ "column {column}".format(column=i))
if result is not None and dtype[1:] != 'i8':
result = result.astype(dtype)
| Made a quick change to the `Exception` raised in `_convert_with_dtype` per the issue raised in #10515.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10541 | 2015-07-10T02:57:13Z | 2015-07-10T12:57:06Z | 2015-07-10T12:57:06Z | 2015-07-10T12:57:14Z |
BUG: get_dummies not returning SparseDataFrame | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 206c5e2e22711..82fb2b4fe45e7 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -377,16 +377,12 @@ Bug Fixes
- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
-
-
-
-
-
-
-
-
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
+
+
+- Bug in `get_dummies` with `sparse=True` not returning SparseDataFrame (:issue:`10531`)
+
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index fd786fa30f842..99767ab199843 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -957,13 +957,15 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
- Whether the returned DataFrame should be sparse or not.
+ Whether the dummy columns should be sparse or not. Returns
+ SparseDataFrame if `data` is a Series or if all columns are included.
+ Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
Returns
-------
- dummies : DataFrame
+ dummies : DataFrame or SparseDataFrame
Examples
--------
@@ -1042,8 +1044,11 @@ def check_len(item, name):
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
- result = data.drop(columns_to_encode, axis=1)
- with_dummies = [result]
+ if set(columns_to_encode) == set(data.columns):
+ with_dummies = []
+ else:
+ with_dummies = [data.drop(columns_to_encode, axis=1)]
+
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index 346c9e2598985..f1670ce885d6c 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -8,6 +8,7 @@
import nose
from pandas import DataFrame, Series
+from pandas.core.sparse import SparseDataFrame
import pandas as pd
from numpy import nan
@@ -171,6 +172,33 @@ def test_basic(self):
expected.index = list('ABC')
assert_frame_equal(get_dummies(s_series_index, sparse=self.sparse), expected)
+ def test_basic_types(self):
+ # GH 10531
+ s_list = list('abc')
+ s_series = Series(s_list)
+ s_df = DataFrame({'a': [0, 1, 0, 1, 2],
+ 'b': ['A', 'A', 'B', 'C', 'C'],
+ 'c': [2, 3, 3, 3, 2]})
+
+ if not self.sparse:
+ exp_df_type = DataFrame
+ exp_blk_type = pd.core.internals.FloatBlock
+ else:
+ exp_df_type = SparseDataFrame
+ exp_blk_type = pd.core.internals.SparseBlock
+
+ self.assertEqual(type(get_dummies(s_list, sparse=self.sparse)), exp_df_type)
+ self.assertEqual(type(get_dummies(s_series, sparse=self.sparse)), exp_df_type)
+
+ r = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
+ self.assertEqual(type(r), exp_df_type)
+
+ r = get_dummies(s_df, sparse=self.sparse, columns=['a'])
+ self.assertEqual(type(r[['a_0']]._data.blocks[0]), exp_blk_type)
+ self.assertEqual(type(r[['a_1']]._data.blocks[0]), exp_blk_type)
+ self.assertEqual(type(r[['a_2']]._data.blocks[0]), exp_blk_type)
+
+
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
| Fixes #10531 .
| https://api.github.com/repos/pandas-dev/pandas/pulls/10535 | 2015-07-09T18:14:28Z | 2015-07-21T10:34:24Z | null | 2015-07-21T10:34:24Z |
BUG : read_csv() twice decodes stream on URL file #10424 | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 65cfdff1df14b..75f7a542525f5 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -94,7 +94,7 @@ def maybe_read_encoded_stream(reader, encoding=None):
else:
errors = 'replace'
encoding = 'utf-8'
- reader = StringIO(reader.read().decode(encoding, errors))
+ reader = BytesIO(reader.read())
else:
encoding = None
return reader, encoding
| As described in #10424 , reading non-utf-8 csv files from URL leads to decoding problems, i.e. a decoding may first be made in io.common.get_filepath_or_buffer() when file is URL.
Modification done makes this function read stream from URL without decoding (this is done later, at the same place as for local files).
It's also used by io.common.read_stata(), io.common.read_json() and io.common.read_msgpack(). Similar problems reading stata and msgpack URL files may also be solved using this modification.
Thanks for reviewing.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10529 | 2015-07-08T11:53:41Z | 2015-10-25T15:29:12Z | null | 2022-10-13T00:16:41Z |
BUG: GH9618 in read_msgpack where DataFrame has duplicate column names | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 5bd0f46dd0b18..ddd97c8d1b199 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -384,4 +384,6 @@ Bug Fixes
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
-- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
\ No newline at end of file
+- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
+
+- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index f5e000449f232..847a7c4f90216 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -357,6 +357,7 @@ def encode(obj):
'klass': obj.__class__.__name__,
'axes': data.axes,
'blocks': [{'items': data.items.take(b.mgr_locs),
+ 'locs': b.mgr_locs.as_array,
'values': convert(b.values),
'shape': b.values.shape,
'dtype': b.dtype.num,
@@ -485,9 +486,15 @@ def decode(obj):
def create_block(b):
values = unconvert(b['values'], dtype_for(b['dtype']),
b['compress']).reshape(b['shape'])
+
+ # locs handles duplicate column names, and should be used instead of items; see GH 9618
+ if 'locs' in b:
+ placement = b['locs']
+ else:
+ placement = axes[0].get_indexer(b['items'])
return make_block(values=values,
klass=getattr(internals, b['klass']),
- placement=axes[0].get_indexer(b['items']))
+ placement=placement)
blocks = [create_block(b) for b in obj['blocks']]
return globals()[obj['klass']](BlockManager(blocks, axes))
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_2.7.10.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_2.7.10.msgpack
new file mode 100644
index 0000000000000..6bf1b9b9afaaa
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_2.7.10.msgpack differ
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_3.4.3.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_3.4.3.msgpack
new file mode 100644
index 0000000000000..6607570797846
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_linux_3.4.3.msgpack differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_2.7.10.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_2.7.10.pickle
new file mode 100644
index 0000000000000..60101c2f1e95e
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_2.7.10.pickle differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_3.4.3.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_3.4.3.pickle
new file mode 100644
index 0000000000000..6d5451f96e20d
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_linux_3.4.3.pickle differ
diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py
deleted file mode 100644
index 2d93ecf38a76d..0000000000000
--- a/pandas/io/tests/generate_legacy_pickles.py
+++ /dev/null
@@ -1,167 +0,0 @@
-""" self-contained to write legacy pickle files """
-from __future__ import print_function
-
-
-def _create_sp_series():
-
- import numpy as np
- from pandas import SparseSeries
-
- nan = np.nan
-
- # nan-based
- arr = np.arange(15, dtype=np.float64)
- arr[7:12] = nan
- arr[-1:] = nan
-
- bseries = SparseSeries(arr, kind='block')
- bseries.name = 'bseries'
- return bseries
-
-def _create_sp_tsseries():
-
- import numpy as np
- from pandas import bdate_range, SparseTimeSeries
-
- nan = np.nan
-
- # nan-based
- arr = np.arange(15, dtype=np.float64)
- arr[7:12] = nan
- arr[-1:] = nan
-
- date_index = bdate_range('1/1/2011', periods=len(arr))
- bseries = SparseTimeSeries(arr, index=date_index, kind='block')
- bseries.name = 'btsseries'
- return bseries
-
-def _create_sp_frame():
- import numpy as np
- from pandas import bdate_range, SparseDataFrame
-
- nan = np.nan
-
- data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
- 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
- 'C': np.arange(10).astype(np.int64),
- 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
-
- dates = bdate_range('1/1/2011', periods=10)
- return SparseDataFrame(data, index=dates)
-
-def create_data():
- """ create the pickle data """
-
- from distutils.version import LooseVersion
- import numpy as np
- import pandas
- from pandas import (Series,TimeSeries,DataFrame,Panel,
- SparseSeries,SparseTimeSeries,SparseDataFrame,SparsePanel,
- Index,MultiIndex,PeriodIndex,
- date_range,period_range,bdate_range,Timestamp,Categorical)
- nan = np.nan
-
- data = {
- 'A': [0., 1., 2., 3., np.nan],
- 'B': [0, 1, 0, 1, 0],
- 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
- 'D': date_range('1/1/2009', periods=5),
- 'E' : [0., 1, Timestamp('20100101'),'foo',2.],
- }
-
- index = dict(int = Index(np.arange(10)),
- date = date_range('20130101',periods=10),
- period = period_range('2013-01-01', freq='M', periods=10))
-
- mi = dict(reg2 = MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])),
- names=['first', 'second']))
- series = dict(float = Series(data['A']),
- int = Series(data['B']),
- mixed = Series(data['E']),
- ts = TimeSeries(np.arange(10).astype(np.int64),index=date_range('20130101',periods=10)),
- mi = Series(np.arange(5).astype(np.float64),index=MultiIndex.from_tuples(tuple(zip(*[[1,1,2,2,2],
- [3,4,3,4,5]])),
- names=['one','two'])),
- dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
- cat=Series(Categorical(['foo', 'bar', 'baz'])))
-
- frame = dict(float = DataFrame(dict(A = series['float'], B = series['float'] + 1)),
- int = DataFrame(dict(A = series['int'] , B = series['int'] + 1)),
- mixed = DataFrame(dict([ (k,data[k]) for k in ['A','B','C','D']])),
- mi = DataFrame(dict(A = np.arange(5).astype(np.float64), B = np.arange(5).astype(np.int64)),
- index=MultiIndex.from_tuples(tuple(zip(*[['bar','bar','baz','baz','baz'],
- ['one','two','one','two','three']])),
- names=['first','second'])),
- dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
- columns=['A', 'B', 'A']),
- cat_onecol=DataFrame(dict(A=Categorical(['foo', 'bar']))),
- cat_and_float=DataFrame(dict(A=Categorical(['foo', 'bar', 'baz']),
- B=np.arange(3).astype(np.int64))),
- )
- panel = dict(float = Panel(dict(ItemA = frame['float'], ItemB = frame['float']+1)),
- dup = Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64),
- items=['A', 'B', 'A']))
-
- if LooseVersion(pandas.__version__) >= '0.14.1':
- # Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
- # panels if their columns/items were non-unique.
- mixed_dup_df = DataFrame(data)
- mixed_dup_df.columns = list("ABCDA")
-
- mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int']))
- mixed_dup_panel.items = ['ItemA', 'ItemA']
-
- frame['mixed_dup'] = mixed_dup_df
- panel['mixed_dup'] = mixed_dup_panel
-
- return dict( series = series,
- frame = frame,
- panel = panel,
- index = index,
- mi = mi,
- sp_series = dict(float = _create_sp_series(),
- ts = _create_sp_tsseries()),
- sp_frame = dict(float = _create_sp_frame())
- )
-
-def write_legacy_pickles():
-
- # force our cwd to be the first searched
- import sys
- sys.path.insert(0,'.')
-
- import os, os.path
- import numpy as np
- import pandas
- import pandas.util.testing as tm
- import platform as pl
-
- # make sure we are < 0.13 compat (in py3)
- try:
- from pandas.compat import zip, cPickle as pickle
- except:
- import pickle
-
- version = pandas.__version__
- if len(sys.argv) != 2:
- exit("Specify output directory: generate_legacy_pickles.py <output_dir>")
-
- output_dir = str(sys.argv[1])
-
- print("This script generates a pickle file for the current arch, system, and python version")
- print(" pandas version: {0}".format(version))
- print(" output dir : {0}".format(output_dir))
-
- # construct a reasonable platform name
- f = '_'.join([ str(version), str(pl.machine()), str(pl.system().lower()), str(pl.python_version()) ])
- pth = '{0}.pickle'.format(f)
-
- fh = open(os.path.join(output_dir,pth),'wb')
- pickle.dump(create_data(),fh,pickle.HIGHEST_PROTOCOL)
- fh.close()
-
- print("created pickle file: %s" % pth)
-
-if __name__ == '__main__':
- write_legacy_pickles()
diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py
new file mode 100644
index 0000000000000..e7cc89fcc0b61
--- /dev/null
+++ b/pandas/io/tests/generate_legacy_storage_files.py
@@ -0,0 +1,205 @@
+""" self-contained to write legacy storage (pickle/msgpack) files """
+from __future__ import print_function
+from distutils.version import LooseVersion
+from pandas import (Series, TimeSeries, DataFrame, Panel,
+ SparseSeries, SparseTimeSeries, SparseDataFrame, SparsePanel,
+ Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack,
+ date_range, period_range, bdate_range, Timestamp, Categorical)
+import os
+import sys
+import numpy as np
+import pandas
+import pandas.util.testing as tm
+import platform as pl
+
+
+def _create_sp_series():
+ nan = np.nan
+
+ # nan-based
+ arr = np.arange(15, dtype=np.float64)
+ arr[7:12] = nan
+ arr[-1:] = nan
+
+ bseries = SparseSeries(arr, kind='block')
+ bseries.name = 'bseries'
+ return bseries
+
+
+def _create_sp_tsseries():
+ nan = np.nan
+
+ # nan-based
+ arr = np.arange(15, dtype=np.float64)
+ arr[7:12] = nan
+ arr[-1:] = nan
+
+ date_index = bdate_range('1/1/2011', periods=len(arr))
+ bseries = SparseTimeSeries(arr, index=date_index, kind='block')
+ bseries.name = 'btsseries'
+ return bseries
+
+
+def _create_sp_frame():
+ nan = np.nan
+
+ data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
+ 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
+ 'C': np.arange(10).astype(np.int64),
+ 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
+
+ dates = bdate_range('1/1/2011', periods=10)
+ return SparseDataFrame(data, index=dates)
+
+
+def create_data():
+ """ create the pickle/msgpack data """
+
+ data = {
+ 'A': [0., 1., 2., 3., np.nan],
+ 'B': [0, 1, 0, 1, 0],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': date_range('1/1/2009', periods=5),
+ 'E': [0., 1, Timestamp('20100101'), 'foo', 2.]
+ }
+
+ index = dict(int=Index(np.arange(10)),
+ date=date_range('20130101', periods=10),
+ period=period_range('2013-01-01', freq='M', periods=10))
+
+ mi = dict(reg2=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
+ ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])),
+ names=['first', 'second']))
+ series = dict(float=Series(data['A']),
+ int=Series(data['B']),
+ mixed=Series(data['E']),
+ ts=TimeSeries(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)),
+ mi=Series(np.arange(5).astype(np.float64),
+ index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
+ names=['one', 'two'])),
+ dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
+ cat=Series(Categorical(['foo', 'bar', 'baz'])))
+
+ mixed_dup_df = DataFrame(data)
+ mixed_dup_df.columns = list("ABCDA")
+ frame = dict(float=DataFrame(dict(A=series['float'], B=series['float'] + 1)),
+ int=DataFrame(dict(A=series['int'], B=series['int'] + 1)),
+ mixed=DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']])),
+ mi=DataFrame(dict(A=np.arange(5).astype(np.float64), B=np.arange(5).astype(np.int64)),
+ index=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'baz'],
+ ['one', 'two', 'one', 'two', 'three']])),
+ names=['first', 'second'])),
+ dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
+ columns=['A', 'B', 'A']),
+ cat_onecol=DataFrame(dict(A=Categorical(['foo', 'bar']))),
+ cat_and_float=DataFrame(dict(A=Categorical(['foo', 'bar', 'baz']),
+ B=np.arange(3).astype(np.int64))),
+ mixed_dup=mixed_dup_df)
+
+ mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int']))
+ mixed_dup_panel.items = ['ItemA', 'ItemA']
+ panel = dict(float=Panel(dict(ItemA=frame['float'], ItemB=frame['float'] + 1)),
+ dup=Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64),
+ items=['A', 'B', 'A']),
+ mixed_dup=mixed_dup_panel)
+
+ return dict(series=series,
+ frame=frame,
+ panel=panel,
+ index=index,
+ mi=mi,
+ sp_series=dict(float=_create_sp_series(),
+ ts=_create_sp_tsseries()),
+ sp_frame=dict(float=_create_sp_frame()))
+
+
+def create_pickle_data():
+ data = create_data()
+
+ # Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
+ # panels if their columns/items were non-unique.
+ if LooseVersion(pandas.__version__) < '0.14.1':
+ del data['frame']['mixed_dup']
+ del data['panel']['mixed_dup']
+ return data
+
+
+def create_msgpack_data():
+ data = create_data()
+ if LooseVersion(pandas.__version__) < '0.17.0':
+ del data['frame']['mixed_dup']
+ del data['panel']['mixed_dup']
+ del data['frame']['dup']
+ del data['panel']['dup']
+ # Not supported
+ del data['sp_series']
+ del data['sp_frame']
+ del data['series']['cat']
+ del data['frame']['cat_onecol']
+ del data['frame']['cat_and_float']
+ return data
+
+
+def platform_name():
+ return '_'.join([str(pandas.__version__), str(pl.machine()), str(pl.system().lower()), str(pl.python_version())])
+
+
+def write_legacy_pickles(output_dir):
+
+ # make sure we are < 0.13 compat (in py3)
+ try:
+ from pandas.compat import zip, cPickle as pickle
+ except:
+ import pickle
+
+ version = pandas.__version__
+
+ print("This script generates a storage file for the current arch, system, and python version")
+ print(" pandas version: {0}".format(version))
+ print(" output dir : {0}".format(output_dir))
+ print(" storage format: pickle")
+
+ pth = '{0}.pickle'.format(platform_name())
+
+ fh = open(os.path.join(output_dir, pth), 'wb')
+ pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
+ fh.close()
+
+ print("created pickle file: %s" % pth)
+
+
+def write_legacy_msgpack(output_dir):
+
+ version = pandas.__version__
+
+ print("This script generates a storage file for the current arch, system, and python version")
+ print(" pandas version: {0}".format(version))
+ print(" output dir : {0}".format(output_dir))
+ print(" storage format: msgpack")
+
+ pth = '{0}.msgpack'.format(platform_name())
+ to_msgpack(os.path.join(output_dir, pth), create_msgpack_data())
+
+ print("created msgpack file: %s" % pth)
+
+
+def write_legacy_file():
+ # force our cwd to be the first searched
+ sys.path.insert(0, '.')
+
+ if len(sys.argv) != 3:
+ exit("Specify output directory and storage type: generate_legacy_storage_files.py <output_dir> <storage_type>")
+
+ output_dir = str(sys.argv[1])
+ storage_type = str(sys.argv[2])
+
+ if storage_type == 'pickle':
+ write_legacy_pickles(output_dir=output_dir)
+ elif storage_type == 'msgpack':
+ write_legacy_msgpack(output_dir=output_dir)
+ else:
+ exit("storage_type must be one of {'pickle', 'msgpack'}")
+
+
+if __name__ == '__main__':
+ write_legacy_file()
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 9f1fd41e90413..33b7cc79083db 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -1,5 +1,6 @@
import nose
+import os
import datetime
import numpy as np
import sys
@@ -11,7 +12,7 @@
date_range, period_range, Index, SparseSeries, SparseDataFrame,
SparsePanel)
import pandas.util.testing as tm
-from pandas.util.testing import ensure_clean
+from pandas.util.testing import ensure_clean, assert_index_equal
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas.tests.test_panel import assert_panel_equal
@@ -39,6 +40,8 @@ def check_arbitrary(a, b):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
+ elif isinstance(a, Index):
+ assert_index_equal(a, b)
else:
assert(a == b)
@@ -396,6 +399,24 @@ def tests_datetimeindex_freq_issue(self):
result = self.encode_decode(df)
assert_frame_equal(result, df)
+ def test_dataframe_duplicate_column_names(self):
+
+ # GH 9618
+ expected_1 = DataFrame(columns=['a', 'a'])
+ expected_2 = DataFrame(columns=[1]*100)
+ expected_2.loc[0] = np.random.randn(100)
+ expected_3 = DataFrame(columns=[1, 1])
+ expected_3.loc[0] = ['abc', np.nan]
+
+ result_1 = self.encode_decode(expected_1)
+ result_2 = self.encode_decode(expected_2)
+ result_3 = self.encode_decode(expected_3)
+
+ assert_frame_equal(result_1, expected_1)
+ assert_frame_equal(result_2, expected_2)
+ assert_frame_equal(result_3, expected_3)
+
+
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
@@ -496,6 +517,58 @@ def test_compression_blosc(self):
assert_frame_equal(self.frame[k], i_rec[k])
+class TestMsgpack():
+ """
+ How to add msgpack tests:
+
+ 1. Install pandas version intended to output the msgpack.
+
+ 2. Execute "generate_legacy_storage_files.py" to create the msgpack.
+ $ python generate_legacy_storage_files.py <output_dir> msgpack
+
+ 3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
+
+ NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
+ http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
+ """
+ def setUp(self):
+ from pandas.io.tests.generate_legacy_storage_files import create_msgpack_data
+ self.data = create_msgpack_data()
+ self.path = u('__%s__.msgpack' % tm.rands(10))
+
+ def compare(self, vf):
+ data = read_msgpack(vf)
+ for typ, dv in data.items():
+ for dt, result in dv.items():
+ try:
+ expected = self.data[typ][dt]
+ except KeyError:
+ continue
+ check_arbitrary(result, expected)
+
+ return data
+
+ def read_msgpacks(self, version):
+
+ pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
+ n = 0
+ for f in os.listdir(pth):
+ vf = os.path.join(pth, f)
+ self.compare(vf)
+ n += 1
+ assert n > 0, 'Msgpack files are not tested'
+
+ def test_msgpack(self):
+ msgpack_path = tm.get_data_path('legacy_msgpack')
+ n = 0
+ for v in os.listdir(msgpack_path):
+ pth = os.path.join(msgpack_path, v)
+ if os.path.isdir(pth):
+ yield self.read_msgpacks, v
+ n += 1
+ assert n > 0, 'Msgpack files are not tested'
+
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py
index d1396463f3b23..e691fac215002 100644
--- a/pandas/io/tests/test_pickle.py
+++ b/pandas/io/tests/test_pickle.py
@@ -24,8 +24,8 @@ class TestPickle():
1. Install pandas version intended to output the pickle.
- 2. Execute "generate_legacy_pkcles.py" to create the pickle.
- $ python generate_legacy_pickles.py <version> <output_dir>
+ 2. Execute "generate_legacy_storage_files.py" to create the pickle.
+ $ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
@@ -35,8 +35,8 @@ class TestPickle():
_multiprocess_can_split_ = True
def setUp(self):
- from pandas.io.tests.generate_legacy_pickles import create_data
- self.data = create_data()
+ from pandas.io.tests.generate_legacy_storage_files import create_pickle_data
+ self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, typ, result, expected):
diff --git a/setup.py b/setup.py
index f20b0ac0a5fb5..30c5d1052d9b3 100755
--- a/setup.py
+++ b/setup.py
@@ -537,6 +537,7 @@ def pxd(name):
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/*/*.pickle',
+ 'tests/data/legacy_msgpack/*/*.msgpack',
'tests/data/*.csv*',
'tests/data/*.dta',
'tests/data/*.txt',
| To close #9618
Note I modified `encode`, so it's not backward compatible.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10527 | 2015-07-08T04:04:17Z | 2015-07-18T13:30:43Z | 2015-07-18T13:30:43Z | 2015-07-18T13:30:47Z |
Fix a typo 'does' -> 'do' | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 0f3a4adb0b33a..4c94ab66d3de2 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1055,7 +1055,7 @@ def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
- raise TypeError("Indexes does not support mutable operations")
+ raise TypeError("Indexes do not support mutable operations")
def __getitem__(self, key):
"""
| This came up during the SciPy 2015 tutorial.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10525 | 2015-07-07T18:38:34Z | 2015-07-07T19:18:50Z | 2015-07-07T19:18:50Z | 2015-07-08T04:44:49Z |
Fix and tests for issue #10154 inconsistent behavior with invalid dates | diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index a078aba2269bb..c95281d49586a 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -11,7 +11,7 @@
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
- Int64Index, to_datetime, bdate_range, Float64Index, TimedeltaIndex)
+ Int64Index, to_datetime, bdate_range, Float64Index, TimedeltaIndex, NaT)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
@@ -4461,6 +4461,27 @@ def test_second(self):
self.assertIsInstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
+class TestDaysInMonth(tm.TestCase):
+ # tests for issue #10154
+ # Not all tests pass there are other issues, see comments on lines
+ def test_day_not_in_month_coerce_true_NaT(self):
+ self.assertTrue(isnull(to_datetime('2015-02-29', coerce=True)))
+ self.assertTrue(isnull(to_datetime('2015-02-29', format="%Y-%m-%d", coerce=True))) # this test fails
+ self.assertTrue(isnull(to_datetime('2015-02-32', format="%Y-%m-%d", coerce=True)))
+ self.assertTrue(isnull(to_datetime('2015-04-31', format="%Y-%m-%d", coerce=True))) # this test fails
+
+ def test_day_not_in_month_coerce_false_raise(self):
+ self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', coerce=False)
+ self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', format="%Y-%m-%d", coerce=False)
+ self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='raise', format="%Y-%m-%d", coerce=False)
+ self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='raise', format="%Y-%m-%d", coerce=False)
+
+ def test_day_not_in_month_coerce_false_ignore(self):
+ self.assertEqual(to_datetime('2015-02-29', errors='ignore', coerce=False), '2015-02-29')
+ self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='ignore', format="%Y-%m-%d", coerce=False)
+ self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='ignore', format="%Y-%m-%d", coerce=False)
+ self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='ignore', format="%Y-%m-%d", coerce=False)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index b3a6059db384f..27cd5e89220a9 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -2760,17 +2760,23 @@ def array_strptime(ndarray[object] values, object fmt, bint exact=True, bint coe
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the wk
# calculation.
- if julian == -1:
- # Need to add 1 to result since first day of the year is 1, not 0.
- julian = datetime_date(year, month, day).toordinal() - \
- datetime_date(year, 1, 1).toordinal() + 1
- else: # Assume that if they bothered to include Julian day it will
- # be accurate.
- datetime_result = datetime_date.fromordinal(
- (julian - 1) + datetime_date(year, 1, 1).toordinal())
- year = datetime_result.year
- month = datetime_result.month
- day = datetime_result.day
+ try:
+ if julian == -1:
+ # Need to add 1 to result since first day of the year is 1, not 0.
+ julian = datetime_date(year, month, day).toordinal() - \
+ datetime_date(year, 1, 1).toordinal() + 1
+ else: # Assume that if they bothered to include Julian day it will
+ # be accurate.
+ datetime_result = datetime_date.fromordinal(
+ (julian - 1) + datetime_date(year, 1, 1).toordinal())
+ year = datetime_result.year
+ month = datetime_result.month
+ day = datetime_result.day
+ except ValueError:
+ if coerce:
+ iresult[i] = iNaT
+ continue
+ raise
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
| closes #10154
This addresses the original issue and another discovered with the test cases.
Please review the tests to be sure you agree with the assertions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10520 | 2015-07-06T21:15:58Z | 2015-07-07T10:19:46Z | null | 2015-07-07T10:19:46Z |
DOC: Clarified PyTables "natural" names | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 7a4318fb02cfc..0c15c91b4690b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3179,9 +3179,10 @@ Notes & Caveats
.. warning::
``PyTables`` will show a ``NaturalNameWarning`` if a column name
- cannot be used as an attribute selector. Generally identifiers that
- have spaces, start with numbers, or ``_``, or have ``-`` embedded are not considered
- *natural*. These types of identifiers cannot be used in a ``where`` clause
+ cannot be used as an attribute selector.
+ *Natural* identifiers contain only letters, numbers, and underscores,
+ and may not begin with a number.
+ Other identifiers cannot be used in a ``where`` clause
and are generally a bad idea.
DataTypes
| The HDF5 documentation "Notes & Caveats" defines a PyTables NaturalNameWarning vaguely, and incorrectly states that a natural name may not begin with an underscore. I clarified the definition of a natural identifier. The relevant regex is defined [here](https://github.com/PyTables/PyTables/blob/2fde39957c7b5263dd4d0b11aa797900250f16bc/tables/path.py#L39) in the PyTables source.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10518 | 2015-07-06T18:27:37Z | 2015-07-07T15:53:55Z | 2015-07-07T15:53:55Z | 2015-07-07T15:53:59Z |
Read Stata version 118 files closes #9882 | diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index db9362c5c821e..76cea11e4efd8 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -4,7 +4,7 @@
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
-an once again improved version.
+a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://statsmodels.sourceforge.net/devel/
@@ -29,6 +29,8 @@
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
+_version_error = "Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)"
+
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
@@ -827,7 +829,7 @@ def __init__(self, encoding):
self.TYPE_MAP_XML = \
dict(
[
- (32768, 'L'),
+ (32768, 'Q'), # Not really a Q, unclear how to handle byteswap
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
@@ -876,7 +878,7 @@ def __init__(self, encoding):
'l': 'i4',
'f': 'f4',
'd': 'f8',
- 'L': 'u8'
+ 'Q': 'u8'
}
# Reserved words cannot be used as variable names
@@ -949,6 +951,7 @@ def __init__(self, path_or_buf, convert_dates=True,
self._read_header()
+
def __enter__(self):
""" enter context manager """
return self
@@ -956,233 +959,316 @@ def __enter__(self):
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
-
+
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
-
+
+
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
- # format 117 or higher (XML like)
- self.path_or_buf.read(27) # stata_dta><header><release>
- self.format_version = int(self.path_or_buf.read(3))
- if self.format_version not in [117]:
- raise ValueError("Version of given Stata file is not 104, "
- "105, 108, 113 (Stata 8/9), 114 (Stata "
- "10/11), 115 (Stata 12) or 117 (Stata 13)")
- self.path_or_buf.read(21) # </release><byteorder>
- self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
- self.path_or_buf.read(15) # </byteorder><K>
- self.nvar = struct.unpack(self.byteorder + 'H',
- self.path_or_buf.read(2))[0]
- self.path_or_buf.read(7) # </K><N>
- self.nobs = struct.unpack(self.byteorder + 'I',
- self.path_or_buf.read(4))[0]
- self.path_or_buf.read(11) # </N><label>
+ self._read_new_header(first_char)
+ else:
+ self._read_old_header(first_char)
+
+ self.has_string_data = len([x for x in self.typlist
+ if type(x) is int]) > 0
+
+ # calculate size of a data record
+ self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
+
+ # remove format details from %td
+ self.fmtlist = ["%td" if x.startswith("%td") else x for x in self.fmtlist]
+
+
+ def _read_new_header(self, first_char):
+ # The first part of the header is common to 117 and 118.
+ self.path_or_buf.read(27) # stata_dta><header><release>
+ self.format_version = int(self.path_or_buf.read(3))
+ if self.format_version not in [117, 118]:
+ raise ValueError(_version_error)
+ self.path_or_buf.read(21) # </release><byteorder>
+ self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
+ self.path_or_buf.read(15) # </byteorder><K>
+ self.nvar = struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
+ self.path_or_buf.read(7) # </K><N>
+
+ self.nobs = self._get_nobs()
+ self.path_or_buf.read(11) # </N><label>
+ self.data_label = self._get_data_label()
+ self.path_or_buf.read(19) # </label><timestamp>
+ self.time_stamp = self._get_time_stamp()
+ self.path_or_buf.read(26) # </timestamp></header><map>
+ self.path_or_buf.read(8) # 0x0000000000000000
+ self.path_or_buf.read(8) # position of <map>
+
+ self._seek_vartypes = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
+ self._seek_varnames = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
+ self._seek_sortlist = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
+ self._seek_formats = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
+ self._seek_value_label_names = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
+
+ # Requires version-specific treatment
+ self._seek_variable_labels = self._get_seek_variable_labels()
+
+ self.path_or_buf.read(8) # <characteristics>
+ self.data_location = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
+ self.seek_strls = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
+ self.seek_value_labels = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
+
+ self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
+
+ self.path_or_buf.seek(self._seek_varnames)
+ self.varlist = self._get_varlist()
+
+ self.path_or_buf.seek(self._seek_sortlist)
+ self.srtlist = struct.unpack(
+ self.byteorder + ('h' * (self.nvar + 1)),
+ self.path_or_buf.read(2 * (self.nvar + 1))
+ )[:-1]
+
+ self.path_or_buf.seek(self._seek_formats)
+ self.fmtlist = self._get_fmtlist()
+
+ self.path_or_buf.seek(self._seek_value_label_names)
+ self.lbllist = self._get_lbllist()
+
+ self.path_or_buf.seek(self._seek_variable_labels)
+ self.vlblist = self._get_vlblist()
+
+
+ # Get data type information, works for versions 117-118.
+ def _get_dtypes(self, seek_vartypes):
+
+ self.path_or_buf.seek(seek_vartypes)
+ raw_typlist = [struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
+ for i in range(self.nvar)]
+
+ def f(typ):
+ if typ <= 2045:
+ return typ
+ try:
+ return self.TYPE_MAP_XML[typ]
+ except KeyError:
+ raise ValueError("cannot convert stata types [{0}]".
+ format(typ))
+
+ typlist = [f(x) for x in raw_typlist]
+
+ def f(typ):
+ if typ <= 2045:
+ return str(typ)
+ try:
+ return self.DTYPE_MAP_XML[typ]
+ except KeyError:
+ raise ValueError("cannot convert stata dtype [{0}]"
+ .format(typ))
+
+ dtyplist = [f(x) for x in raw_typlist]
+
+ return typlist, dtyplist
+
+
+ def _get_varlist(self):
+ if self.format_version == 117:
+ b = 33
+ elif self.format_version == 118:
+ b = 129
+
+ return [self._null_terminate(self.path_or_buf.read(b))
+ for i in range(self.nvar)]
+
+
+ # Returns the format list
+ def _get_fmtlist(self):
+ if self.format_version == 118:
+ b = 57
+ elif self.format_version > 113:
+ b = 49
+ elif self.format_version > 104:
+ b = 12
+ else:
+ b = 7
+
+ return [self._null_terminate(self.path_or_buf.read(b))
+ for i in range(self.nvar)]
+
+
+ # Returns the label list
+ def _get_lbllist(self):
+ if self.format_version >= 118:
+ b = 129
+ elif self.format_version > 108:
+ b = 33
+ else:
+ b = 9
+ return [self._null_terminate(self.path_or_buf.read(b))
+ for i in range(self.nvar)]
+
+
+ def _get_vlblist(self):
+ if self.format_version == 118:
+ vlblist = [self._decode(self.path_or_buf.read(321))
+ for i in range(self.nvar)]
+ elif self.format_version > 105:
+ vlblist = [self._null_terminate(self.path_or_buf.read(81))
+ for i in range(self.nvar)]
+ else:
+ vlblist = [self._null_terminate(self.path_or_buf.read(32))
+ for i in range(self.nvar)]
+ return vlblist
+
+
+ def _get_nobs(self):
+ if self.format_version == 118:
+ return struct.unpack(self.byteorder + 'Q',
+ self.path_or_buf.read(8))[0]
+ else:
+ return struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
+
+
+ def _get_data_label(self):
+ if self.format_version == 118:
+ strlen = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0]
+ return self._decode(self.path_or_buf.read(strlen))
+ elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
- self.data_label = self._null_terminate(self.path_or_buf.read(strlen))
- self.path_or_buf.read(19) # </label><timestamp>
+ return self._null_terminate(self.path_or_buf.read(strlen))
+ elif self.format_version > 105:
+ return self._null_terminate(self.path_or_buf.read(81))
+ else:
+ return self._null_terminate(self.path_or_buf.read(32))
+
+
+ def _get_time_stamp(self):
+ if self.format_version == 118:
+ strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
+ return self.path_or_buf.read(strlen).decode("utf-8")
+ elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
- self.time_stamp = self._null_terminate(self.path_or_buf.read(strlen))
- self.path_or_buf.read(26) # </timestamp></header><map>
- self.path_or_buf.read(8) # 0x0000000000000000
- self.path_or_buf.read(8) # position of <map>
- seek_vartypes = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
- seek_varnames = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
- seek_sortlist = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
- seek_formats = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
- seek_value_label_names = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
+ return self._null_terminate(self.path_or_buf.read(strlen))
+ elif self.format_version > 104:
+ return self._null_terminate(self.path_or_buf.read(18))
+ else:
+ raise ValueError()
+
+
+ def _get_seek_variable_labels(self):
+ if self.format_version == 117:
+ self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
- self.path_or_buf.read(8) # <variable_lables>, throw away
- seek_variable_labels = seek_value_label_names + (33*self.nvar) + 20 + 17
- # Below is the original, correct code (per Stata sta format doc,
- # although this is not followed in actual 117 dtas)
- #seek_variable_labels = struct.unpack(
- # self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
- self.path_or_buf.read(8) # <characteristics>
- self.data_location = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
- self.seek_strls = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
- self.seek_value_labels = struct.unpack(
- self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
- #self.path_or_buf.read(8) # </stata_dta>
- #self.path_or_buf.read(8) # EOF
- self.path_or_buf.seek(seek_vartypes)
- typlist = [struct.unpack(self.byteorder + 'H',
- self.path_or_buf.read(2))[0]
- for i in range(self.nvar)]
- self.typlist = [None]*self.nvar
- try:
- i = 0
- for typ in typlist:
- if typ <= 2045:
- self.typlist[i] = typ
- #elif typ == 32768:
- # raise ValueError("Long strings are not supported")
- else:
- self.typlist[i] = self.TYPE_MAP_XML[typ]
- i += 1
- except:
- raise ValueError("cannot convert stata types [{0}]"
- .format(','.join(typlist)))
- self.dtyplist = [None]*self.nvar
- try:
- i = 0
- for typ in typlist:
- if typ <= 2045:
- self.dtyplist[i] = str(typ)
- else:
- self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
- i += 1
- except:
- raise ValueError("cannot convert stata dtypes [{0}]"
- .format(','.join(typlist)))
+ return self._seek_value_label_names + (33*self.nvar) + 20 + 17
+ elif self.format_version == 118:
+ return struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
+ else:
+ raise ValueError()
- self.path_or_buf.seek(seek_varnames)
- self.varlist = [self._null_terminate(self.path_or_buf.read(33))
- for i in range(self.nvar)]
- self.path_or_buf.seek(seek_sortlist)
- self.srtlist = struct.unpack(
- self.byteorder + ('h' * (self.nvar + 1)),
- self.path_or_buf.read(2 * (self.nvar + 1))
- )[:-1]
+ def _read_old_header(self, first_char):
+ self.format_version = struct.unpack('b', first_char)[0]
+ if self.format_version not in [104, 105, 108, 113, 114, 115]:
+ raise ValueError(_version_error)
+ self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[0] == 0x1 and '>' or '<'
+ self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
+ self.path_or_buf.read(1) # unused
- self.path_or_buf.seek(seek_formats)
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
- for i in range(self.nvar)]
+ self.nvar = struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
+ self.nobs = self._get_nobs()
- self.path_or_buf.seek(seek_value_label_names)
- self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
- for i in range(self.nvar)]
+ self.data_label = self._get_data_label()
- self.path_or_buf.seek(seek_variable_labels)
- self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
- for i in range(self.nvar)]
+ self.time_stamp = self._get_time_stamp()
+
+ # descriptors
+ if self.format_version > 108:
+ typlist = [ord(self.path_or_buf.read(1))
+ for i in range(self.nvar)]
else:
- # header
- self.format_version = struct.unpack('b', first_char)[0]
- if self.format_version not in [104, 105, 108, 113, 114, 115]:
- raise ValueError("Version of given Stata file is not 104, "
- "105, 108, 113 (Stata 8/9), 114 (Stata "
- "10/11), 115 (Stata 12) or 117 (Stata 13)")
- self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[0] == 0x1 and '>' or '<'
- self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
- self.path_or_buf.read(1) # unused
-
- self.nvar = struct.unpack(self.byteorder + 'H',
- self.path_or_buf.read(2))[0]
- self.nobs = struct.unpack(self.byteorder + 'I',
- self.path_or_buf.read(4))[0]
- if self.format_version > 105:
- self.data_label = self._null_terminate(self.path_or_buf.read(81))
- else:
- self.data_label = self._null_terminate(self.path_or_buf.read(32))
- if self.format_version > 104:
- self.time_stamp = self._null_terminate(self.path_or_buf.read(18))
-
- # descriptors
- if self.format_version > 108:
- typlist = [ord(self.path_or_buf.read(1))
- for i in range(self.nvar)]
- else:
- typlist = [
- self.OLD_TYPE_MAPPING[
- self._decode_bytes(self.path_or_buf.read(1))
- ] for i in range(self.nvar)
- ]
+ typlist = [
+ self.OLD_TYPE_MAPPING[
+ self._decode_bytes(self.path_or_buf.read(1))
+ ] for i in range(self.nvar)
+ ]
- try:
- self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
- except:
- raise ValueError("cannot convert stata types [{0}]"
- .format(','.join(typlist)))
- try:
- self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
- except:
- raise ValueError("cannot convert stata dtypes [{0}]"
- .format(','.join(typlist)))
+ try:
+ self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
+ except:
+ raise ValueError("cannot convert stata types [{0}]"
+ .format(','.join(typlist)))
+ try:
+ self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
+ except:
+ raise ValueError("cannot convert stata dtypes [{0}]"
+ .format(','.join(typlist)))
- if self.format_version > 108:
- self.varlist = [self._null_terminate(self.path_or_buf.read(33))
- for i in range(self.nvar)]
- else:
- self.varlist = [self._null_terminate(self.path_or_buf.read(9))
- for i in range(self.nvar)]
- self.srtlist = struct.unpack(
- self.byteorder + ('h' * (self.nvar + 1)),
- self.path_or_buf.read(2 * (self.nvar + 1))
- )[:-1]
- if self.format_version > 113:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
- for i in range(self.nvar)]
- elif self.format_version > 104:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(12))
- for i in range(self.nvar)]
- else:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(7))
- for i in range(self.nvar)]
- if self.format_version > 108:
- self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
- for i in range(self.nvar)]
- else:
- self.lbllist = [self._null_terminate(self.path_or_buf.read(9))
- for i in range(self.nvar)]
- if self.format_version > 105:
- self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
- for i in range(self.nvar)]
- else:
- self.vlblist = [self._null_terminate(self.path_or_buf.read(32))
- for i in range(self.nvar)]
+ if self.format_version > 108:
+ self.varlist = [self._null_terminate(self.path_or_buf.read(33))
+ for i in range(self.nvar)]
+ else:
+ self.varlist = [self._null_terminate(self.path_or_buf.read(9))
+ for i in range(self.nvar)]
+ self.srtlist = struct.unpack(
+ self.byteorder + ('h' * (self.nvar + 1)),
+ self.path_or_buf.read(2 * (self.nvar + 1))
+ )[:-1]
- # ignore expansion fields (Format 105 and later)
- # When reading, read five bytes; the last four bytes now tell you
- # the size of the next read, which you discard. You then continue
- # like this until you read 5 bytes of zeros.
+ self.fmtlist = self._get_fmtlist()
- if self.format_version > 104:
- while True:
- data_type = struct.unpack(self.byteorder + 'b',
- self.path_or_buf.read(1))[0]
- if self.format_version > 108:
- data_len = struct.unpack(self.byteorder + 'i',
- self.path_or_buf.read(4))[0]
- else:
- data_len = struct.unpack(self.byteorder + 'h',
- self.path_or_buf.read(2))[0]
- if data_type == 0:
- break
- self.path_or_buf.read(data_len)
+ self.lbllist = self._get_lbllist()
- # necessary data to continue parsing
- self.data_location = self.path_or_buf.tell()
+ self.vlblist = self._get_vlblist()
- self.has_string_data = len([x for x in self.typlist
- if type(x) is int]) > 0
+ # ignore expansion fields (Format 105 and later)
+ # When reading, read five bytes; the last four bytes now tell you
+ # the size of the next read, which you discard. You then continue
+ # like this until you read 5 bytes of zeros.
- # calculate size of a data record
- self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
+ if self.format_version > 104:
+ while True:
+ data_type = struct.unpack(self.byteorder + 'b',
+ self.path_or_buf.read(1))[0]
+ if self.format_version > 108:
+ data_len = struct.unpack(self.byteorder + 'i',
+ self.path_or_buf.read(4))[0]
+ else:
+ data_len = struct.unpack(self.byteorder + 'h',
+ self.path_or_buf.read(2))[0]
+ if data_type == 0:
+ break
+ self.path_or_buf.read(data_len)
- # remove format details from %td
- self.fmtlist = ["%td" if x.startswith("%td") else x for x in self.fmtlist]
+ # necessary data to continue parsing
+ self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt
or struct.calcsize(self.byteorder + fmt))
+
+ def _decode(self, s):
+ s = s.partition(b"\0")[0]
+ return s.decode('utf-8')
+
+
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None: # have bytes not strings,
# so must decode
@@ -1220,7 +1306,10 @@ def _read_value_labels(self):
slength = self.path_or_buf.read(4)
if not slength:
break # end of variable label table (format < 117)
- labname = self._null_terminate(self.path_or_buf.read(33))
+ if self.format_version <= 117:
+ labname = self._null_terminate(self.path_or_buf.read(33))
+ else:
+ labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
@@ -1238,28 +1327,45 @@ def _read_value_labels(self):
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
- self.value_label_dict[labname][val[i]] = (
- self._null_terminate(txt[off[i]:])
- )
-
+ if self.format_version <= 117:
+ self.value_label_dict[labname][val[i]] = (
+ self._null_terminate(txt[off[i]:])
+ )
+ else:
+ self.value_label_dict[labname][val[i]] = (
+ self._decode(txt[off[i]:])
+ )
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
+
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
- self.GSO = dict()
+ self.GSO = {0 : ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
- v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0]
+ if self.format_version == 117:
+ v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0]
+ else:
+ buf = self.path_or_buf.read(12)
+ # Only tested on little endian file on little endian machine.
+ if self.byteorder == '<':
+ buf = buf[0:2] + buf[4:10]
+ else:
+ buf = buf[0:2] + buf[6:]
+ v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
- va = va[0:-1].decode(self._encoding or self._default_encoding)
+ encoding = 'utf-8'
+ if self.format_version == 117:
+ encoding = self._encoding or self._default_encoding
+ va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@@ -1366,6 +1472,7 @@ def read(self, nrows=None, convert_dates=None,
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
+
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
@@ -1486,7 +1593,7 @@ def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
- if typ != 'L':
+ if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
@@ -1772,7 +1879,7 @@ def _write(self, to_write):
self._file.write(to_write)
def _prepare_categoricals(self, data):
- """Check for categorigal columns, retain categorical information for
+ """Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [com.is_categorical_dtype(data[col]) for col in data]
diff --git a/pandas/io/tests/data/stata14_118.dta b/pandas/io/tests/data/stata14_118.dta
new file mode 100644
index 0000000000000..1fc65f1a6e988
Binary files /dev/null and b/pandas/io/tests/data/stata14_118.dta differ
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 4b2781c9dceb6..cc9ab977241f9 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# pylint: disable=E1101
from datetime import datetime
@@ -22,6 +23,7 @@
from pandas.tslib import NaT
from pandas import compat
+
class TestStata(tm.TestCase):
def setUp(self):
@@ -77,6 +79,8 @@ def setUp(self):
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
+ self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
+
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
@@ -244,6 +248,36 @@ def test_read_dta12(self):
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
+
+ def test_read_dta18(self):
+ parsed_118 = self.read_dta(self.dta22_118)
+ parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
+ expected = DataFrame.from_records(
+ [['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
+ ['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan],
+ ['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0],
+ ['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4],
+ ['', '', '', 0, 0.3332999, 'option a', 1/3.]
+ ],
+ columns=['Things', 'Cities', 'Unicode_Cities_Strl', 'Ints', 'Floats', 'Bytes', 'Longs'])
+ expected["Floats"] = expected["Floats"].astype(np.float32)
+ for col in parsed_118.columns:
+ tm.assert_almost_equal(parsed_118[col], expected[col])
+
+ rdr = StataReader(self.dta22_118)
+ vl = rdr.variable_labels()
+ vl_expected = {u'Unicode_Cities_Strl': u'Here are some strls with Ünicode chars',
+ u'Longs': u'long data',
+ u'Things': u'Here are some things',
+ u'Bytes': u'byte data',
+ u'Ints': u'int data',
+ u'Cities': u'Here are some cities',
+ u'Floats': u'float data'}
+ tm.assert_dict_equal(vl, vl_expected)
+
+ self.assertEqual(rdr.data_label, u'This is a Ünicode data label')
+
+
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
| closes #9882
This extends the Stata reader to handle version 118 (stata 14) format files.
It correctly handles the test file posted by @makmanalp. I don't have stata14 now to generate additional test files, but I'm not sure that it's necessary.
There is one point I'm not sure about, relating to the way that strls are indexed by a weird composite of a short and a 6 byte integer. The stata docs aren't clear on how this 8 byte composite should (or shouldn't) be byteswapped when the file and local endianness don't match. We currently byteswap it in this case (inheriting from our dta117 code) but I'm not sure that this is correct.
I can confirm that it works when the file and local machine are both little endian, which covers the most common situation.
I haven't looked at the writer much, it seems to always write version 114 files.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10516 | 2015-07-06T11:37:01Z | 2015-07-23T11:37:59Z | 2015-07-23T11:37:58Z | 2015-11-12T23:43:50Z |
BUG: display.precision options seems off-by-one (GH10451) | diff --git a/doc/source/options.rst b/doc/source/options.rst
index 7e140b1b2deaf..26871a11473de 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -227,7 +227,7 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa
df.info()
pd.reset_option('max_info_rows')
-``display.precision`` sets the output display precision. This is only a
+``display.precision`` sets the output display precision in terms of decimal places. This is only a
suggestion.
.. ipython:: python
@@ -368,9 +368,11 @@ display.notebook_repr_html True When True, IPython notebook will
pandas objects (if it is available).
display.pprint_nest_depth 3 Controls the number of nested levels
to process when pretty-printing
-display.precision 7 Floating point output precision
- (number of significant digits). This is
- only a suggestion
+display.precision 6 Floating point output precision in
+ terms of number of places after the
+ decimal, for regular formatting as well
+ as scientific notation. Similar to
+ numpy's ``precision`` print option
display.show_dimensions truncate Whether to print out dimensions
at the end of DataFrame repr.
If 'truncate' is specified, only
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fe5e7371bddf6..9c8cae6b6af72 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -391,6 +391,42 @@ New behavior:
See :ref:`documentation <io.hdf5>` for more details.
+Changes to ``display.precision`` option
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``display.precision`` option has been clarified to refer to decimal places (:issue:`10451`).
+
+Earlier versions of pandas would format floating point numbers to have one less decimal place than the value in
+``display.precision``.
+
+.. code-block:: python
+
+ In [1]: pd.set_option('display.precision', 2)
+
+ In [2]: pd.DataFrame({'x': [123.456789]})
+ Out[2]:
+ x
+ 0 123.5
+
+If interpreting precision as "significant figures" this did work for scientific notation but that same interpretation
+did not work for values with standard formatting. It was also out of step with how numpy handles formatting.
+
+Going forward the value of ``display.precision`` will directly control the number of places after the decimal, for
+regular formatting as well as scientific notation, similar to how numpy's ``precision`` print option works.
+
+.. ipython:: python
+
+ pd.set_option('display.precision', 2)
+ pd.DataFrame({'x': [123.456789]})
+
+To preserve output behavior with prior versions the default value of ``display.precision`` has been reduced to ``6``
+from ``7``.
+
+.. ipython:: python
+ :suppress:
+ pd.set_option('display.precision', 6)
+
+
.. _whatsnew_0170.api_breaking.other:
Other API Changes
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index a56d3b93d87da..03eaa45582bef 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -236,7 +236,7 @@ def mpl_style_cb(key):
return val
with cf.config_prefix('display'):
- cf.register_option('precision', 7, pc_precision_doc, validator=is_int)
+ cf.register_option('precision', 6, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc)
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 6a05f819908af..4115788e4dd90 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -2014,7 +2014,7 @@ def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
- fmt_str = '%% .%df' % (self.digits - 1)
+ fmt_str = '%% .%df' % self.digits
fmt_values = self._format_with(fmt_str)
if len(fmt_values) > 0:
@@ -2022,20 +2022,20 @@ def _format_strings(self):
else:
maxlen = 0
- too_long = maxlen > self.digits + 5
+ too_long = maxlen > self.digits + 6
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
- has_small_values = ((abs_vals < 10 ** (-self.digits+1)) &
+ has_small_values = ((abs_vals < 10 ** (-self.digits)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
- fmt_str = '%% .%de' % (self.digits - 1)
+ fmt_str = '%% .%de' % self.digits
fmt_values = self._format_with(fmt_str)
elif has_small_values:
- fmt_str = '%% .%de' % (self.digits - 1)
+ fmt_str = '%% .%de' % self.digits
fmt_values = self._format_with(fmt_str)
return fmt_values
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index b733cacc01e05..b94f4046630e4 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1523,7 +1523,7 @@ def test_to_string_no_index(self):
def test_to_string_float_formatting(self):
self.reset_display_options()
- fmt.set_option('display.precision', 6, 'display.column_space',
+ fmt.set_option('display.precision', 5, 'display.column_space',
12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
@@ -1554,7 +1554,7 @@ def test_to_string_float_formatting(self):
self.assertEqual(df_s, expected)
self.reset_display_options()
- self.assertEqual(get_option("display.precision"), 7)
+ self.assertEqual(get_option("display.precision"), 6)
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
@@ -3055,7 +3055,7 @@ def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
- with pd.option_context('display.precision', 7):
+ with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d=pd.DataFrame({'col1':[9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7, 5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6, 4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
@@ -3070,6 +3070,17 @@ def test_output_significant_digits(self):
for (start, stop), v in expected_output.items():
self.assertEqual(str(d[start:stop]), v)
+ def test_too_long(self):
+ # GH 10451
+ with pd.option_context('display.precision', 4):
+ # need both a number > 1e8 and something that normally formats to having length > display.precision + 6
+ df = pd.DataFrame(dict(x=[12345.6789]))
+ self.assertEqual(str(df), ' x\n0 12345.6789')
+ df = pd.DataFrame(dict(x=[2e8]))
+ self.assertEqual(str(df), ' x\n0 200000000')
+ df = pd.DataFrame(dict(x=[12345.6789, 2e8]))
+ self.assertEqual(str(df), ' x\n0 1.2346e+04\n1 2.0000e+08')
+
class TestRepr_timedelta64(tm.TestCase):
| Closes #10451
I made a call here in response to my questions on #10451 and hopefully people like it. I made it clear that "precision" refers to places after the decimal, not significant figures, and changed the default value to match so that for many pandas users no change would be detected. I updated the Options docs and also What's New. For tests I basically updated the precision setting to the new semantics, so that the expected strings wouldn't need to change.
The one question I have is the code that computes `too_long`. This compares the longest formatted string against what looks to be an arbitrary constant of "number of digits + 5". Changing the 5 to a 4 or 6 doesn't trip up any unit tests. If it's desired, this could be increased by 1 as the "new" digits value is effectively 1 less than what the old value in terms of its effects on formatting, so the 5 would need to change to a 6 to maintain behavior. I could write tests on this as well.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10513 | 2015-07-05T23:26:35Z | 2015-08-02T21:26:30Z | 2015-08-02T21:26:30Z | 2015-08-02T21:31:20Z |
TST: Deprecate assert_numpy_array_equivalent | diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index d455d9d0d8679..4f998319d922d 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -220,7 +220,7 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
expected = _eval_single_bin(
lhs_new, binop, rhs_new, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
skip_these = _scalar_skip
@@ -240,7 +240,7 @@ def check_operands(left, right, cmp_op):
for ex in (ex1, ex2, ex3):
result = pd.eval(ex, engine=self.engine,
parser=self.parser)
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = 'lhs {0} rhs'.format(cmp1)
@@ -251,13 +251,13 @@ def check_simple_cmp_op(self, lhs, cmp1, rhs):
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
ex = 'lhs {0} rhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs,
@@ -273,7 +273,7 @@ def check_alignment(self, result, nlhs, ghs, op):
pass
else:
expected = self.ne.evaluate('nlhs {0} ghs'.format(op))
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# modulus, pow, and floor division require special casing
@@ -291,7 +291,7 @@ def check_floor_division(self, lhs, arith1, rhs):
if self.engine == 'python':
res = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs // rhs
- tm.assert_numpy_array_equivalent(res, expected)
+ tm.assert_numpy_array_equal(res, expected)
else:
self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs,
'rhs': rhs},
@@ -325,8 +325,8 @@ def check_pow(self, lhs, arith1, rhs):
if (np.isscalar(lhs) and np.isscalar(rhs) and
_is_py3_complex_incompat(result, expected)):
- self.assertRaises(AssertionError, tm.assert_numpy_array_equivalent, result,
- expected)
+ self.assertRaises(AssertionError, tm.assert_numpy_array_equal,
+ result, expected)
else:
assert_allclose(result, expected)
@@ -345,12 +345,12 @@ def check_single_invert_op(self, lhs, cmp1, rhs):
elb = np.array([bool(el)])
expected = ~elb
result = pd.eval('~elb', engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(expected, result)
+ tm.assert_numpy_array_equal(expected, result)
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
- tm.assert_numpy_array_equivalent(result, pd.eval('~elb', engine=engine,
- parser=self.parser))
+ tm.assert_numpy_array_equal(result, pd.eval('~elb', engine=engine,
+ parser=self.parser))
def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = 'in', 'not in'
@@ -370,13 +370,13 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
else:
expected = ~expected
result = pd.eval(ex, engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(expected, result)
+ tm.assert_numpy_array_equal(expected, result)
# make sure the other engines work the same as this one
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
ev = pd.eval(ex, engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(ev, result)
+ tm.assert_numpy_array_equal(ev, result)
def ex(self, op, var_name='lhs'):
return '{0}{1}'.format(op, var_name)
@@ -639,17 +639,17 @@ def test_identical(self):
x = np.array([1])
result = pd.eval('x', engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(result, np.array([1]))
+ tm.assert_numpy_array_equal(result, np.array([1]))
self.assertEqual(result.shape, (1, ))
x = np.array([1.5])
result = pd.eval('x', engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(result, np.array([1.5]))
+ tm.assert_numpy_array_equal(result, np.array([1.5]))
self.assertEqual(result.shape, (1, ))
x = np.array([False])
result = pd.eval('x', engine=self.engine, parser=self.parser)
- tm.assert_numpy_array_equivalent(result, np.array([False]))
+ tm.assert_numpy_array_equal(result, np.array([False]))
self.assertEqual(result.shape, (1, ))
@@ -707,7 +707,7 @@ def check_alignment(self, result, nlhs, ghs, op):
pass
else:
expected = eval('nlhs {0} ghs'.format(op))
- tm.assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
class TestEvalPythonPandas(TestEvalPythonPython):
@@ -1118,10 +1118,10 @@ def test_truediv(self):
if PY3:
res = self.eval(ex, truediv=False)
- tm.assert_numpy_array_equivalent(res, np.array([1.0]))
+ tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval(ex, truediv=True)
- tm.assert_numpy_array_equivalent(res, np.array([1.0]))
+ tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
@@ -1140,10 +1140,10 @@ def test_truediv(self):
self.assertEqual(res, expec)
else:
res = self.eval(ex, truediv=False)
- tm.assert_numpy_array_equivalent(res, np.array([1]))
+ tm.assert_numpy_array_equal(res, np.array([1]))
res = self.eval(ex, truediv=True)
- tm.assert_numpy_array_equivalent(res, np.array([1.0]))
+ tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
@@ -1446,8 +1446,8 @@ class TestScope(object):
def check_global_scope(self, e, engine, parser):
tm.skip_if_no_ne(engine)
- tm.assert_numpy_array_equivalent(_var_s * 2, pd.eval(e, engine=engine,
- parser=parser))
+ tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine,
+ parser=parser))
def test_global_scope(self):
e = '_var_s * 2'
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 63ed26ea7d931..ddc588d218312 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -14,7 +14,6 @@
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
-from numpy.testing import assert_array_equal
if compat.PY3:
from urllib.error import HTTPError
@@ -533,7 +532,7 @@ def test_fred_part2(self):
[848.3],
[933.3]]
result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5]
- assert_array_equal(result.values, np.array(expected))
+ tm.assert_numpy_array_equal(result.values, np.array(expected))
@network
def test_invalid_series(self):
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index cb99c1705c5eb..43e1c5c89dd5e 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -21,8 +21,7 @@
import pandas.compat as compat
import numpy as np
-from numpy.testing import (assert_array_equal,
- assert_array_almost_equal_nulp,
+from numpy.testing import (assert_array_almost_equal_nulp,
assert_approx_equal)
import pytz
import dateutil
@@ -166,7 +165,7 @@ def test_encodeArrayOfNestedArrays(self):
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = np.array(input)
- assert_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
+ tm.assert_numpy_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
@@ -174,7 +173,7 @@ def test_encodeArrayOfDoubles(self):
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
- assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
@@ -271,7 +270,7 @@ def test_encodeArrayInArray(self):
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
- assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
@@ -307,7 +306,7 @@ def test_encodeListConversion(self):
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
- assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
@@ -676,8 +675,8 @@ def test_encodeListLongConversion(self):
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
- assert_array_equal(np.array(input), ujson.decode(output, numpy=True,
- dtype=np.int64))
+ tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True,
+ dtype=np.int64))
pass
def test_encodeLongConversion(self):
@@ -755,7 +754,7 @@ def test_loadFile(self):
f = StringIO("[1,2,3,4]")
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = StringIO("[1,2,3,4]")
- assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+ tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
@@ -768,7 +767,7 @@ def read(self):
f = filelike()
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = filelike()
- assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+ tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
@@ -906,7 +905,7 @@ def testBoolArray(self):
inpt = np.array([True, False, True, True, False, True, False , False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
- assert_array_equal(inpt, outp)
+ tm.assert_numpy_array_equal(inpt, outp)
def testInt(self):
num = np.int(2562010)
@@ -943,7 +942,7 @@ def testIntArray(self):
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
- assert_array_equal(inpt, outp)
+ tm.assert_numpy_array_equal(inpt, outp)
def testIntMax(self):
num = np.int(np.iinfo(np.int).max)
@@ -1008,26 +1007,26 @@ def testArrays(self):
arr = np.arange(100);
arr = arr.reshape((10, 10))
- assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
- assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+ tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
- assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
- assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+ tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
- assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
- assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+ tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = np.arange(96);
arr = arr.reshape((2, 2, 2, 2, 3, 2))
- assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
- assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+ tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
- assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32);
arr = arr.reshape((5, 5, 4))
@@ -1158,19 +1157,19 @@ def testDataFrame(self):
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
- assert_array_equal(df.index, outp.index)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
- assert_array_equal(df.index, outp.index)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
@@ -1178,8 +1177,8 @@ def testDataFrame(self):
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
self.assertTrue((df.transpose() == outp).values.all())
- assert_array_equal(df.transpose().columns, outp.columns)
- assert_array_equal(df.transpose().index, outp.index)
+ tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
+ tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNumpy(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
@@ -1187,20 +1186,20 @@ def testDataFrameNumpy(self):
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
- assert_array_equal(df.index, outp.index)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
- assert_array_equal(df.index, outp.index)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
self.assertTrue((df.transpose() == outp).values.all())
- assert_array_equal(df.transpose().columns, outp.columns)
- assert_array_equal(df.transpose().index, outp.index)
+ tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
+ tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNested(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
@@ -1233,18 +1232,18 @@ def testDataFrameNumpyLabelled(self):
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
self.assertTrue((df.T == outp).values.all())
- assert_array_equal(df.T.columns, outp.columns)
- assert_array_equal(df.T.index, outp.index)
+ tm.assert_numpy_array_equal(df.T.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
outp.index = df.index
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
self.assertTrue((df == outp).values.all())
- assert_array_equal(df.columns, outp.columns)
- assert_array_equal(df.index, outp.index)
+ tm.assert_numpy_array_equal(df.columns, outp.columns)
+ tm.assert_numpy_array_equal(df.index, outp.index)
def testSeries(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 64f923be8ad77..facbff5e047db 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -33,7 +33,6 @@
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
-from numpy.testing import assert_array_equal
import pandas.parser
@@ -747,7 +746,7 @@ def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A','N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A',''])
- assert_array_equal (_NA_VALUES, parsers._NA_VALUES)
+ self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index 788e4bd7ef80a..cadf008fb40fb 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -13,7 +13,7 @@
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal, assertRaisesRegexp,
- assert_array_equal, assert_attr_equal)
+ assert_numpy_array_equal, assert_attr_equal)
from numpy.testing import assert_equal
from pandas import Series, DataFrame, bdate_range, Panel, MultiIndex
@@ -575,7 +575,7 @@ def _compare_with_series(sps, new_index):
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
- np.testing.assert_array_equal(self.bseries.sp_values, 1.)
+ tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))
def test_sparse_reindex(self):
length = 10
@@ -899,7 +899,7 @@ def _check_results_to_coo(results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
- assert_array_equal(A.todense(), A_result.todense())
+ assert_numpy_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 1741676abf773..bb6cb5a444dd9 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -539,7 +539,7 @@ def _check_ndarray(self, func, static_comp, window=50,
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
- self.assert_numpy_array_equivalent(result, expected)
+ self.assert_numpy_array_equal(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
@@ -1684,7 +1684,7 @@ def test_pairwise_stats_column_names_order(self):
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ self.assert_numpy_array_equal(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
@@ -1701,7 +1701,7 @@ def test_pairwise_stats_column_names_order(self):
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ self.assert_numpy_array_equal(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
@@ -1717,7 +1717,7 @@ def test_pairwise_stats_column_names_order(self):
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ self.assert_numpy_array_equal(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
@@ -1734,7 +1734,7 @@ def test_pairwise_stats_column_names_order(self):
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ self.assert_numpy_array_equal(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
@@ -1769,7 +1769,7 @@ def test_pairwise_stats_column_names_order(self):
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
- self.assert_numpy_array_equivalent(result, results[0])
+ self.assert_numpy_array_equal(result, results[0])
def test_rolling_skew_edge_cases(self):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 91b03aceaa14c..cb5687acf3a34 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -305,7 +305,7 @@ def test_group_var_generic_1d(self):
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
- tm.assert_array_equal(counts, expected_counts)
+ tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
@@ -321,7 +321,7 @@ def test_group_var_generic_1d_flat_labels(self):
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
- tm.assert_array_equal(counts, expected_counts)
+ tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
@@ -337,7 +337,7 @@ def test_group_var_generic_2d_all_finite(self):
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
- tm.assert_array_equal(counts, expected_counts)
+ tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
@@ -356,7 +356,7 @@ def test_group_var_generic_2d_some_nan(self):
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
- tm.assert_array_equal(counts, expected_counts)
+ tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
@@ -421,12 +421,12 @@ def test_unique_label_indices():
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
- tm.assert_array_equal(left, right)
+ tm.assert_numpy_array_equal(left, right)
a[np.random.choice(len(a), 10)] = -1
left= unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
- tm.assert_array_equal(left, right)
+ tm.assert_numpy_array_equal(left, right)
if __name__ == '__main__':
import nose
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index db23b13edd42b..d47e7dbe751c7 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -553,7 +553,7 @@ def test_value_counts_inferred(self):
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
- self.assert_numpy_array_equivalent(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
+ self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 86831a8485786..1ca23c124e250 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -9,7 +9,6 @@
import operator
import numpy as np
-from numpy.testing import assert_array_equal
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
@@ -271,7 +270,7 @@ def testit():
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
- assert_array_equal(result,expected.values)
+ tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
@@ -306,7 +305,7 @@ def testit():
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
- assert_array_equal(result,expected.values)
+ tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
@@ -331,7 +330,7 @@ def testit():
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
- assert_array_equal(result,expected)
+ tm.assert_numpy_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2a7022da4fdc4..9ab004eb31a99 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -24,7 +24,6 @@
from numpy.random import randn
import numpy as np
import numpy.ma as ma
-from numpy.testing import assert_array_equal
import numpy.ma.mrecords as mrecords
import pandas.core.nanops as nanops
@@ -40,6 +39,7 @@
from pandas.util.misc import is_little_endian
from pandas.util.testing import (assert_almost_equal,
+ assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp,
@@ -125,7 +125,7 @@ def test_getitem(self):
df['@awesome_domain'] = ad
self.assertRaises(KeyError, df.__getitem__, 'df["$10"]')
res = df['@awesome_domain']
- assert_array_equal(ad, res.values)
+ assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
@@ -4664,13 +4664,13 @@ def test_from_records_empty(self):
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
- assert_array_equal(df.index, Index([1], name='id'))
+ assert_numpy_array_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
- assert_array_equal(df.columns, Index(['value']))
+ assert_numpy_array_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
- assert_array_equal(df.index, Index([], name='id'))
+ assert_numpy_array_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
@@ -6108,7 +6108,7 @@ def test_boolean_comparison(self):
assert_frame_equal(result,expected)
result = df.values>b
- assert_array_equal(result,expected.values)
+ assert_numpy_array_equal(result,expected.values)
result = df>l
assert_frame_equal(result,expected)
@@ -6120,7 +6120,7 @@ def test_boolean_comparison(self):
assert_frame_equal(result,expected)
result = df.values>b_r
- assert_array_equal(result,expected.values)
+ assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
@@ -6140,7 +6140,7 @@ def test_boolean_comparison(self):
assert_frame_equal(result,expected)
result = df.values == b_r
- assert_array_equal(result,expected.values)
+ assert_numpy_array_equal(result,expected.values)
self.assertRaises(ValueError, lambda : df == b_c)
self.assertFalse((df.values == b_c))
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 800c6f83f4902..da481d71aa59c 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -25,7 +25,7 @@
from numpy import random
from numpy.random import rand, randn
-from numpy.testing import assert_array_equal, assert_allclose
+from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
@@ -646,11 +646,11 @@ def test_bar_log(self):
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
- assert_array_equal(ax.yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot(log=True, kind='barh')
- assert_array_equal(ax.xaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
@@ -660,13 +660,13 @@ def test_bar_log(self):
expected = np.hstack((1.0e-04, expected, 1.0e+01))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
- assert_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001))
- assert_array_equal(ax.yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001))
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
- assert_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001))
- assert_array_equal(ax.xaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001))
+ tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
@@ -2154,7 +2154,7 @@ def test_bar_log_no_subplots(self):
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
- assert_array_equal(ax.yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
@@ -2166,8 +2166,8 @@ def test_bar_log_subplots(self):
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
- assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
- assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
@@ -2178,7 +2178,7 @@ def test_boxplot(self):
ax = _check_plot_works(df.plot, kind='box')
self._check_text_labels(ax.get_xticklabels(), labels)
- assert_array_equal(ax.xaxis.get_ticklocs(),
+ tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
self.assertEqual(len(ax.lines),
self.bp_n_objects * len(numeric_cols))
@@ -2205,7 +2205,7 @@ def test_boxplot(self):
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
- assert_array_equal(ax.xaxis.get_ticklocs(), positions)
+ tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
@@ -2231,7 +2231,7 @@ def test_boxplot_vertical(self):
positions = np.array([3, 2, 8])
ax = df.plot(kind='box', positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
- assert_array_equal(ax.yaxis.get_ticklocs(), positions)
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
@@ -2888,7 +2888,7 @@ def test_unordered_ts(self):
xticks = ax.lines[0].get_xdata()
self.assertTrue(xticks[0] < xticks[1])
ydata = ax.lines[0].get_ydata()
- assert_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
+ tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f1df0d711b5d0..c0b5bcd55d873 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3735,7 +3735,7 @@ def test_groupby_categorical_no_compress(self):
result = data.groupby("b").mean()
result = result["a"].values
exp = np.array([1,2,4,np.nan])
- self.assert_numpy_array_equivalent(result, exp)
+ self.assert_numpy_array_equal(result, exp)
def test_groupby_non_arithmetic_agg_types(self):
# GH9311, GH6620
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 81c6366b4cb41..87e06bad7fbe1 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -16,7 +16,7 @@
CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex)
from pandas.core.index import InvalidIndexError, NumericIndex
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
- assert_copy, assert_numpy_array_equivalent, assert_numpy_array_equal)
+ assert_copy)
from pandas import compat
from pandas.compat import long, is_platform_windows
@@ -100,7 +100,7 @@ def test_reindex_base(self):
expected = np.arange(idx.size)
actual = idx.get_indexer(idx)
- assert_numpy_array_equivalent(expected, actual)
+ tm.assert_numpy_array_equal(expected, actual)
with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
@@ -247,7 +247,7 @@ def test_argsort(self):
result = ind.argsort()
expected = np.array(ind).argsort()
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def test_pickle(self):
for ind in self.indices.values():
@@ -357,7 +357,7 @@ def test_difference_base(self):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
self.assertEqual(result.__class__, answer.__class__)
- self.assert_numpy_array_equal(result.asi8, answer.asi8)
+ tm.assert_numpy_array_equal(result.asi8, answer.asi8)
else:
result = first.difference(case)
self.assertTrue(tm.equalContents(result, answer))
@@ -443,8 +443,8 @@ def test_equals_op(self):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
- assert_numpy_array_equivalent(index_a == index_a, expected1)
- assert_numpy_array_equivalent(index_a == index_c, expected2)
+ tm.assert_numpy_array_equal(index_a == index_a, expected1)
+ tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
@@ -453,8 +453,8 @@ def test_equals_op(self):
array_d = np.array(index_a[0:1])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
index_a == array_b
- assert_numpy_array_equivalent(index_a == array_a, expected1)
- assert_numpy_array_equivalent(index_a == array_c, expected2)
+ tm.assert_numpy_array_equal(index_a == array_a, expected1)
+ tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
@@ -463,8 +463,8 @@ def test_equals_op(self):
series_d = Series(array_d)
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
index_a == series_b
- assert_numpy_array_equivalent(index_a == series_a, expected1)
- assert_numpy_array_equivalent(index_a == series_c, expected2)
+ tm.assert_numpy_array_equal(index_a == series_a, expected1)
+ tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
@@ -486,8 +486,8 @@ def test_equals_op(self):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
- assert_numpy_array_equivalent(index_a == item, expected3)
- assert_numpy_array_equivalent(series_a == item, expected3)
+ tm.assert_numpy_array_equal(index_a == item, expected3)
+ tm.assert_numpy_array_equal(series_a == item, expected3)
class TestIndex(Base, tm.TestCase):
@@ -534,14 +534,14 @@ def test_constructor(self):
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
- self.assert_numpy_array_equal(self.strIndex, index)
+ tm.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assertIsInstance(index, Index)
self.assertEqual(index.name, 'name')
- assert_numpy_array_equivalent(arr, index)
+ tm.assert_numpy_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
@@ -598,7 +598,7 @@ def __array__(self, dtype=None):
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
- assert_numpy_array_equivalent(rs, xp)
+ tm.assert_numpy_array_equal(rs, xp)
tm.assertIsInstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
@@ -789,7 +789,7 @@ def _check(op):
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
- self.assert_numpy_array_equal(arr_result, index_result)
+ tm.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
@@ -847,10 +847,10 @@ def test_shift(self):
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
- self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
+ tm.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
- self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
+ tm.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
@@ -1203,11 +1203,11 @@ def test_get_indexer_nearest(self):
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
- self.assert_numpy_array_equivalent(actual, [0, 5, 9])
+ tm.assert_numpy_array_equal(actual, [0, 5, 9])
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9], [0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- self.assert_numpy_array_equivalent(actual, expected)
+ tm.assert_numpy_array_equal(actual, expected)
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
@@ -1218,22 +1218,22 @@ def test_get_indexer_nearest_decreasing(self):
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
- self.assert_numpy_array_equivalent(actual, [9, 4, 0])
+ tm.assert_numpy_array_equal(actual, [9, 4, 0])
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1], [9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- self.assert_numpy_array_equivalent(actual, expected)
+ tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = [-1, 0, 1, 1]
- self.assert_numpy_array_equivalent(actual, expected)
+ tm.assert_numpy_array_equal(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = [0, 0, 1, -1]
- self.assert_numpy_array_equivalent(actual, expected)
+ tm.assert_numpy_array_equal(actual, expected)
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
@@ -1432,7 +1432,7 @@ def test_isin(self):
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
@@ -1441,20 +1441,20 @@ def test_isin(self):
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
- self.assert_numpy_array_equal(
+ tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
@@ -1462,8 +1462,8 @@ def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
- self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
- self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
+ tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))
+ tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
@@ -1473,7 +1473,7 @@ def check_idx(idx):
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
- self.assert_numpy_array_equal(expected,
+ tm.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
@@ -1489,7 +1489,7 @@ def test_boolean_cmp(self):
idx = Index(values)
res = (idx == values)
- self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
+ tm.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
@@ -1539,7 +1539,7 @@ def test_str_attribute(self):
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
- self.assert_numpy_array_equivalent(idx.str.startswith('a'), expected)
+ tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)
self.assertIsInstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
@@ -1646,12 +1646,12 @@ def test_equals_op_multiindex(self):
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
- assert_numpy_array_equivalent(df.index == df.index, np.array([True, True]))
+ tm.assert_numpy_array_equal(df.index == df.index, np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
- assert_numpy_array_equivalent(df.index == mi1, np.array([True, True]))
+ tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
- assert_numpy_array_equivalent(df.index == mi2, np.array([True, False]))
+ tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == mi3
@@ -1659,7 +1659,8 @@ def test_equals_op_multiindex(self):
index_a = Index(['foo', 'bar', 'baz'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == index_a
- assert_numpy_array_equivalent(index_a == mi3, np.array([False, False, False]))
+ tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False]))
+
class TestCategoricalIndex(Base, tm.TestCase):
_holder = CategoricalIndex
@@ -1689,39 +1690,39 @@ def test_construction(self):
# empty
result = CategoricalIndex(categories=categories)
self.assertTrue(result.categories.equals(Index(categories)))
- self.assert_numpy_array_equal(result.codes,np.array([],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes, np.array([],dtype='int8'))
self.assertFalse(result.ordered)
# passing categories
result = CategoricalIndex(list('aabbca'),categories=categories)
self.assertTrue(result.categories.equals(Index(categories)))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
self.assertTrue(result.categories.equals(Index(list('abc'))))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(c,categories=categories)
self.assertTrue(result.categories.equals(Index(categories)))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
self.assertFalse(result.ordered)
ci = CategoricalIndex(c,categories=list('abcd'))
result = CategoricalIndex(ci)
self.assertTrue(result.categories.equals(Index(categories)))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(ci, categories=list('ab'))
self.assertTrue(result.categories.equals(Index(list('ab'))))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
self.assertTrue(result.categories.equals(Index(list('ab'))))
- self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))
+ tm.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8'))
self.assertTrue(result.ordered)
# turn me to an Index
@@ -1914,7 +1915,7 @@ def test_reindex_base(self):
expected = np.array([4,0,1,5,2,3])
actual = idx.get_indexer(idx)
- assert_numpy_array_equivalent(expected, actual)
+ tm.assert_numpy_array_equal(expected, actual)
with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
@@ -1929,7 +1930,7 @@ def test_reindexing(self):
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
- assert_numpy_array_equivalent(expected, actual)
+ tm.assert_numpy_array_equal(expected, actual)
def test_duplicates(self):
@@ -1976,13 +1977,13 @@ def test_repr_roundtrip(self):
def test_isin(self):
ci = CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b',np.nan])
- self.assert_numpy_array_equal(ci.isin(['c']),np.array([False,False,False,True,False,False]))
- self.assert_numpy_array_equal(ci.isin(['c','a','b']),np.array([True]*5 + [False]))
- self.assert_numpy_array_equal(ci.isin(['c','a','b',np.nan]),np.array([True]*6))
+ tm.assert_numpy_array_equal(ci.isin(['c']),np.array([False,False,False,True,False,False]))
+ tm.assert_numpy_array_equal(ci.isin(['c','a','b']),np.array([True]*5 + [False]))
+ tm.assert_numpy_array_equal(ci.isin(['c','a','b',np.nan]),np.array([True]*6))
# mismatched categorical -> coerced to ndarray so doesn't matter
- self.assert_numpy_array_equal(ci.isin(ci.set_categories(list('abcdefghi'))),np.array([True]*6))
- self.assert_numpy_array_equal(ci.isin(ci.set_categories(list('defghi'))),np.array([False]*5 + [True]))
+ tm.assert_numpy_array_equal(ci.isin(ci.set_categories(list('abcdefghi'))),np.array([True]*6))
+ tm.assert_numpy_array_equal(ci.isin(ci.set_categories(list('defghi'))),np.array([False]*5 + [True]))
def test_identical(self):
@@ -2231,12 +2232,12 @@ def test_equals(self):
def test_get_indexer(self):
idx = Float64Index([0.0, 1.0, 2.0])
- self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])
target = [-0.1, 0.5, 1.1]
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_get_loc(self):
idx = Float64Index([0.0, 1.0, 2.0])
@@ -2274,16 +2275,16 @@ def test_doesnt_contain_all_the_things(self):
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
- assert_numpy_array_equivalent(i.isin([1.0]), np.array([True, False]))
- assert_numpy_array_equivalent(i.isin([2.0, np.pi]),
- np.array([False, False]))
- assert_numpy_array_equivalent(i.isin([np.nan]),
- np.array([False, True]))
- assert_numpy_array_equivalent(i.isin([1.0, np.nan]),
- np.array([True, True]))
+ tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False]))
+ tm.assert_numpy_array_equal(i.isin([2.0, np.pi]),
+ np.array([False, False]))
+ tm.assert_numpy_array_equal(i.isin([np.nan]),
+ np.array([False, True]))
+ tm.assert_numpy_array_equal(i.isin([1.0, np.nan]),
+ np.array([True, True]))
i = Float64Index([1.0, 2.0])
- assert_numpy_array_equivalent(i.isin([np.nan]),
- np.array([False, False]))
+ tm.assert_numpy_array_equal(i.isin([np.nan]),
+ np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
@@ -2313,11 +2314,11 @@ def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
- self.assert_numpy_array_equal(index, expected)
+ tm.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
- self.assert_numpy_array_equal(index, expected)
+ tm.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
@@ -2325,7 +2326,7 @@ def test_constructor(self):
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
- self.assert_numpy_array_equal(new_index, self.index)
+ tm.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
@@ -2438,19 +2439,19 @@ def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
- self.assert_numpy_array_equal(indexer, expected)
+ tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
- self.assert_numpy_array_equal(indexer, expected)
+ tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
- self.assert_numpy_array_equal(indexer, expected)
+ tm.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
@@ -2471,8 +2472,8 @@ def test_join_outer(self):
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
@@ -2484,8 +2485,8 @@ def test_join_outer(self):
dtype=np.int64)
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
@@ -2507,8 +2508,8 @@ def test_join_inner(self):
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
@@ -2520,8 +2521,8 @@ def test_join_inner(self):
eridx = np.array([1, 4])
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
@@ -2537,7 +2538,7 @@ def test_join_left(self):
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
@@ -2547,7 +2548,7 @@ def test_join_left(self):
tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
@@ -2558,8 +2559,8 @@ def test_join_left(self):
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
@@ -2575,7 +2576,7 @@ def test_join_right(self):
tm.assertIsInstance(other, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
@@ -2586,7 +2587,7 @@ def test_join_right(self):
dtype=np.int64)
tm.assertIsInstance(other, Int64Index)
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
@@ -2598,8 +2599,8 @@ def test_join_right(self):
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
- self.assert_numpy_array_equal(lidx, elidx)
- self.assert_numpy_array_equal(ridx, eridx)
+ tm.assert_numpy_array_equal(lidx, elidx)
+ tm.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
@@ -2645,10 +2646,10 @@ def test_join_non_unique(self):
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
- self.assert_numpy_array_equal(lidx, exp_lidx)
+ tm.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
- self.assert_numpy_array_equal(ridx, exp_ridx)
+ tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
@@ -2660,12 +2661,12 @@ def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
@@ -2683,11 +2684,11 @@ def test_union_noncomparable(self):
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
@@ -2831,19 +2832,19 @@ def test_get_loc(self):
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
- assert_numpy_array_equivalent(idx.get_loc(time(12)), [12])
- assert_numpy_array_equivalent(idx.get_loc(time(12, 30)), [])
+ tm.assert_numpy_array_equal(idx.get_loc(time(12)), [12])
+ tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), [])
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
- self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_roundtrip_pickle_with_tz(self):
@@ -2873,7 +2874,7 @@ def test_time_loc(self): # GH8667
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
- tm.assert_numpy_array_equivalent(ts.index.get_loc(key), i)
+ tm.assert_numpy_array_equal(ts.index.get_loc(key), i)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
@@ -2956,13 +2957,13 @@ def test_get_loc(self):
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
- self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
with self.assertRaisesRegexp(ValueError, 'different freq'):
idx.asfreq('D').get_indexer(idx)
@@ -3000,12 +3001,12 @@ def test_get_loc(self):
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
- self.assert_numpy_array_equivalent(idx.get_indexer(idx), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'pad'), [-1, 0, 1])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'backfill'), [0, 1, 2])
- self.assert_numpy_array_equivalent(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
+ tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
def test_numeric_compat(self):
@@ -3585,7 +3586,7 @@ def test_from_product(self):
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
- assert_numpy_array_equivalent(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
@@ -3595,7 +3596,7 @@ def test_from_product_datetimeindex(self):
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
- assert_numpy_array_equivalent(mi.values, etalon)
+ tm.assert_numpy_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
@@ -3605,9 +3606,9 @@ def test_values_boxed(self):
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
- assert_numpy_array_equivalent(mi.values, pd.lib.list_to_object_array(tuples))
+ tm.assert_numpy_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
- assert_numpy_array_equivalent(mi.values[:4], mi[:4].values)
+ tm.assert_numpy_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
@@ -3624,13 +3625,13 @@ def test_append(self):
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# GH 10460
index = MultiIndex(levels=[CategoricalIndex(['A', 'B']),
@@ -3647,28 +3648,28 @@ def test_get_level_values_na(self):
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
- assert_numpy_array_equivalent(values.values.astype(float), expected)
+ tm.assert_numpy_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
- assert_numpy_array_equivalent(values.values.astype(float), expected)
+ tm.assert_numpy_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
- assert_numpy_array_equivalent(values.values.astype(float), expected)
+ tm.assert_numpy_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
- assert_numpy_array_equivalent(values.values, expected)
+ tm.assert_numpy_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
- assert_numpy_array_equivalent(values.values, expected.values)
+ tm.assert_numpy_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
@@ -4318,7 +4319,7 @@ def test_from_tuples(self):
def test_argsort(self):
result = self.index.argsort()
expected = self.index._tuple_index.argsort()
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
@@ -4464,9 +4465,9 @@ def test_insert(self):
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
- self.assert_numpy_array_equal(new_index.levels[0],
+ tm.assert_numpy_array_equal(new_index.levels[0],
list(self.index.levels[0]) + ['abc'])
- self.assert_numpy_array_equal(new_index.levels[1],
+ tm.assert_numpy_array_equal(new_index.levels[1],
list(self.index.levels[1]) + ['three'])
self.assertEqual(new_index[0], ('abc', 'three'))
@@ -4542,7 +4543,7 @@ def _check_how(other, how):
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
- self.assert_numpy_array_equal(join_index.values, exp_values)
+ tm.assert_numpy_array_equal(join_index.values, exp_values)
if how in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
@@ -4550,9 +4551,9 @@ def _check_how(other, how):
return_indexers=True)
self.assertTrue(join_index.equals(join_index2))
- self.assert_numpy_array_equal(lidx, lidx2)
- self.assert_numpy_array_equal(ridx, ridx2)
- self.assert_numpy_array_equal(join_index2.values, exp_values)
+ tm.assert_numpy_array_equal(lidx, lidx2)
+ tm.assert_numpy_array_equal(ridx, ridx2)
+ tm.assert_numpy_array_equal(join_index2.values, exp_values)
def _check_all(other):
_check_how(other, 'outer')
@@ -4600,11 +4601,11 @@ def test_reindex_level(self):
self.assertTrue(target.equals(exp_index))
exp_indexer = np.array([0, 2, 4])
- self.assert_numpy_array_equal(indexer, exp_indexer)
+ tm.assert_numpy_array_equal(indexer, exp_indexer)
self.assertTrue(target2.equals(exp_index2))
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
- self.assert_numpy_array_equal(indexer2, exp_indexer2)
+ tm.assert_numpy_array_equal(indexer2, exp_indexer2)
assertRaisesRegexp(TypeError, "Fill method not supported",
self.index.reindex, self.index, method='pad',
@@ -4694,14 +4695,14 @@ def check(nlevels, with_nulls):
for take_last in [False, True]:
left = mi.duplicated(take_last=take_last)
right = pd.lib.duplicated(mi.values, take_last=take_last)
- tm.assert_numpy_array_equivalent(left, right)
+ tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
- self.assert_numpy_array_equivalent(mi.duplicated(), np.zeros(2, dtype='bool'))
+ tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
@@ -4712,8 +4713,8 @@ def check(nlevels, with_nulls):
self.assertEqual(len(mi), (n + 1) * (m + 1))
self.assertFalse(mi.has_duplicates)
self.assertEqual(mi.get_duplicates(), [])
- self.assert_numpy_array_equivalent(mi.duplicated(),
- np.zeros(len(mi), dtype='bool'))
+ tm.assert_numpy_array_equal(mi.duplicated(),
+ np.zeros(len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
@@ -4807,7 +4808,7 @@ def test_isin(self):
np.arange(4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
- self.assert_numpy_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
@@ -4817,9 +4818,9 @@ def test_isin(self):
def test_isin_nan(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
- self.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
+ tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
[False, False])
- self.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
+ tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
[False, False])
def test_isin_level_kwarg(self):
@@ -4830,11 +4831,11 @@ def test_isin_level_kwarg(self):
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
- self.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
- self.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
- self.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
- self.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
self.assertRaises(IndexError, idx.isin, vals_0, level=5)
self.assertRaises(IndexError, idx.isin, vals_0, level=-5)
@@ -4844,8 +4845,8 @@ def test_isin_level_kwarg(self):
self.assertRaises(KeyError, idx.isin, vals_1, level='A')
idx.names = ['A', 'B']
- self.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
- self.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
+ tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
self.assertRaises(KeyError, idx.isin, vals_1, level='C')
@@ -4916,6 +4917,7 @@ def test_equals_operator(self):
# GH9785
self.assertTrue((self.index == self.index).all())
+
def test_get_combined_index():
from pandas.core.index import _get_combined_index
result = _get_combined_index([])
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 624fa11ac908a..d8fde1c2eaa98 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -813,7 +813,7 @@ def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from 0.12
def check(result, expected):
- self.assert_numpy_array_equal(result,expected)
+ tm.assert_numpy_array_equal(result,expected)
tm.assertIsInstance(result, np.ndarray)
@@ -4765,7 +4765,7 @@ def test_coercion_with_setitem(self):
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
@@ -4777,7 +4777,7 @@ def test_coercion_with_loc_setitem(self):
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
@@ -4789,7 +4789,7 @@ def test_coercion_with_setitem_and_series(self):
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
@@ -4801,7 +4801,7 @@ def test_coercion_with_loc_and_series(self):
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
@@ -4828,7 +4828,7 @@ def test_coercion_with_loc(self):
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
@@ -4840,7 +4840,7 @@ def test_coercion_with_setitem_and_dataframe(self):
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
@@ -4852,7 +4852,7 @@ def test_none_coercion_loc_and_dataframe(self):
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
@@ -4872,7 +4872,7 @@ def test_none_coercion_mixed_dtypes(self):
for column in expected_dataframe.columns:
assert_attr_equal('dtype', start_dataframe[column], expected_dataframe[column])
- self.assert_numpy_array_equivalent(
+ tm.assert_numpy_array_equal(
start_dataframe[column].values,
expected_dataframe[column].values, strict_nan=True)
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 5a1eb719270c4..7c51641b8e5da 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -658,7 +658,9 @@ def test_interleave_non_unique_cols(self):
df_unique = df.copy()
df_unique.columns = ['x', 'y']
- np.testing.assert_array_equal(df_unique.values, df.values)
+ self.assertEqual(df_unique.values.shape, df.values.shape)
+ tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
+ tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
@@ -1066,7 +1068,7 @@ def test_slice_iter(self):
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
- np.testing.assert_array_equal(
+ tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray))
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index f1670ce885d6c..2961301366188 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -15,7 +15,6 @@
import numpy as np
from pandas.util.testing import assert_frame_equal
-from numpy.testing import assert_array_equal
from pandas.core.reshape import (melt, lreshape, get_dummies,
wide_to_long)
@@ -234,7 +233,7 @@ def test_include_na(self):
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1.0,index=[0]),columns=[nan])
- assert_array_equal(res_just_na.values, exp_just_na.values)
+ tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index cb6659af9eca5..4beba4ee3751c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1562,7 +1562,7 @@ def test_reshape_non_2d(self):
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
- np.testing.assert_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
self.assertTrue(type(result) is type(expected))
def test_reshape_2d_return_array(self):
@@ -7070,13 +7070,13 @@ def test_searchsorted_numeric_dtypes_scalar(self):
r = s.searchsorted([30])
e = np.array([2])
- tm.assert_array_equal(r, e)
+ tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4])
- tm.assert_array_equal(r, e)
+ tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
@@ -7090,14 +7090,14 @@ def test_search_sorted_datetime64_list(self):
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2])
- tm.assert_array_equal(r, e)
+ tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2])
- tm.assert_array_equal(r, e)
+ tm.assert_numpy_array_equal(r, e)
def test_to_frame_expanddim(self):
# GH 9762
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index facbd57512257..6a9ad175f42dd 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -11,7 +11,6 @@
from numpy import nan as NA
import numpy as np
-from numpy.testing import assert_array_equal
from numpy.random import randint
from pandas.compat import range, lrange, u, unichr
@@ -53,7 +52,7 @@ def test_iter(self):
# indices of each yielded Series should be equal to the index of
# the original Series
- assert_array_equal(s.index, ds.index)
+ tm.assert_numpy_array_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
@@ -561,7 +560,7 @@ def test_extract(self):
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d')
tm.assert_equal(result.name, 'uno')
- tm.assert_array_equal(result, klass(['A', 'A']))
+ tm.assert_numpy_array_equal(result, klass(['A', 'A']))
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
@@ -918,34 +917,34 @@ def test_index(self):
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
- tm.assert_array_equal(result, klass([4, 3, 1, 0]))
+ tm.assert_numpy_array_equal(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
- tm.assert_array_equal(result, klass([4, 5, 7, 4]))
+ tm.assert_numpy_array_equal(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
- tm.assert_array_equal(result, klass([4, 3, 7, 4]))
+ tm.assert_numpy_array_equal(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
- tm.assert_array_equal(result, klass([4, 5, 7, 4]))
+ tm.assert_numpy_array_equal(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
- tm.assert_array_equal(result, klass([4, 5, 7, 4]))
+ tm.assert_numpy_array_equal(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
- tm.assert_array_equal(result, klass([4, 3, 1, 4]))
+ tm.assert_numpy_array_equal(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values])
- tm.assert_array_equal(result.values, expected)
+ tm.assert_numpy_array_equal(result.values, expected)
with tm.assertRaisesRegexp(ValueError, "substring not found"):
result = s.str.index('DE')
@@ -956,9 +955,9 @@ def test_index(self):
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
- tm.assert_array_equal(result, Series([1, 1, 0, np.nan]))
+ tm.assert_numpy_array_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
- tm.assert_array_equal(result, Series([3, 1, 2, np.nan]))
+ tm.assert_numpy_array_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
@@ -1054,17 +1053,17 @@ def test_translate(self):
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
- tm.assert_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
- tm.assert_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
- tm.assert_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
else:
with tm.assertRaisesRegexp(ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
@@ -1073,7 +1072,7 @@ def test_translate(self):
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
- tm.assert_array_equal(result, expected)
+ tm.assert_numpy_array_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 38296e3a5ff5a..668579911d6d5 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -10,8 +10,7 @@
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assertRaisesRegexp, raise_with_traceback,
- assert_series_equal, assert_frame_equal, assert_isinstance,
- RNGContext
+ assert_series_equal, assert_frame_equal, RNGContext
)
# let's get meta.
@@ -259,7 +258,7 @@ def test_warning(self):
self.assertNotAlmostEquals(1, 2)
with tm.assert_produces_warning(FutureWarning):
- assert_isinstance(Series([1, 2]), Series, msg='xxx')
+ tm.assert_isinstance(Series([1, 2]), Series, msg='xxx')
class TestLocale(tm.TestCase):
diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py
index 4a0218bef6001..eac6973bffb25 100644
--- a/pandas/tools/tests/test_tile.py
+++ b/pandas/tools/tests/test_tile.py
@@ -13,8 +13,6 @@
from pandas.tools.tile import cut, qcut
import pandas.tools.tile as tmod
-from numpy.testing import assert_equal, assert_almost_equal
-
class TestCut(tm.TestCase):
@@ -22,31 +20,31 @@ def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
desired = [1, 1, 1, 1, 1]
- assert_equal(result, desired)
+ tm.assert_numpy_array_equal(result, desired)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
- assert_equal(result.codes, [0, 0, 0, 1, 2, 0])
- assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
+ tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 1, 2, 0])
+ tm.assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
- assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 0])
- assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7])
+ tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 2, 3, 0, 0])
+ tm.assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7])
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
- assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 1])
- assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095])
+ tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 2, 3, 0, 1])
+ tm.assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095])
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
- assert_equal(result.codes, [0, 0, 0, 1, 2, 0])
- assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
+ tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 1, 2, 0])
+ tm.assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
@@ -68,7 +66,7 @@ def test_cut_out_of_range_more(self):
s = Series([0, -1, 0, 1, -3])
ind = cut(s, [0, 1], labels=False)
exp = [np.nan, np.nan, np.nan, 0, np.nan]
- assert_almost_equal(ind, exp)
+ tm.assert_almost_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
@@ -122,8 +120,8 @@ def test_inf_handling(self):
ex_categories = ['(-inf, 2]', '(2, 4]', '(4, inf]']
- np.testing.assert_array_equal(result.categories, ex_categories)
- np.testing.assert_array_equal(result_ser.cat.categories, ex_categories)
+ tm.assert_numpy_array_equal(result.categories, ex_categories)
+ tm.assert_numpy_array_equal(result_ser.cat.categories, ex_categories)
self.assertEqual(result[5], '(4, inf]')
self.assertEqual(result[0], '(-inf, 2]')
self.assertEqual(result_ser[5], '(4, inf]')
@@ -134,7 +132,7 @@ def test_qcut(self):
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
- assert_almost_equal(bins, ex_bins)
+ tm.assert_almost_equal(bins, ex_bins)
ex_levels = cut(arr, ex_bins, include_lowest=True)
self.assert_numpy_array_equal(labels, ex_levels)
@@ -252,12 +250,12 @@ def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
- assert_equal(result.cat.codes.values, [0, 0, 1, 1])
- assert_almost_equal(bins, [-0.003, 1.5, 3])
+ tm.assert_numpy_array_equal(result.cat.codes.values, [0, 0, 1, 1])
+ tm.assert_almost_equal(bins, [-0.003, 1.5, 3])
result, bins = qcut(s, 2, retbins=True)
- assert_equal(result.cat.codes.values, [0, 0, 1, 1])
- assert_almost_equal(bins, [0, 1.5, 3])
+ tm.assert_numpy_array_equal(result.cat.codes.values, [0, 0, 1, 1])
+ tm.assert_almost_equal(bins, [0, 1.5, 3])
def curpath():
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index a597087316f77..b9757c9e1b5d7 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -30,7 +30,6 @@
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
-from numpy.testing import assert_array_equal
class TestPeriodProperties(tm.TestCase):
@@ -1979,7 +1978,7 @@ def test_negative_ordinals(self):
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
- assert_array_equal(idx1,idx2)
+ tm.assert_numpy_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
@@ -2393,7 +2392,7 @@ def test_map(self):
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
- assert_array_equal(result, exp)
+ tm.assert_numpy_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
@@ -2418,7 +2417,7 @@ def test_map_with_string_constructor(self):
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
- assert_array_equal(res, expected)
+ tm.assert_numpy_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 2ba65c07aa114..11c630599e0e7 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -5,7 +5,6 @@
import numpy as np
from numpy.testing.decorators import slow
-from numpy.testing import assert_array_equal
from pandas import Index, Series, DataFrame
@@ -306,7 +305,7 @@ def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
- assert_array_equal(bts.index.to_period(), PeriodIndex(idx))
+ tm.assert_numpy_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
@@ -642,9 +641,9 @@ def test_mixed_freq_irregular_first(self):
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
- assert_array_equal(x1, s2.index.asobject.values)
+ tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
- assert_array_equal(x2, s1.index.asobject.values)
+ tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
@@ -674,9 +673,9 @@ def test_mixed_freq_irregular_first_df(self):
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
- assert_array_equal(x1, s2.index.asobject.values)
+ tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
- assert_array_equal(x2, s1.index.asobject.values)
+ tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
@@ -1044,7 +1043,7 @@ def test_ax_plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
- assert_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
+ tm.assert_numpy_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
@@ -1063,9 +1062,9 @@ def test_mpl_nopandas(self):
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
- assert_array_equal(np.array([x.toordinal() for x in dates]),
+ tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
- assert_array_equal(np.array([x.toordinal() for x in dates]),
+ tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 9703accc42695..85aaf32e4dae2 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -30,7 +30,6 @@
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
from numpy.random import rand
-from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
@@ -849,11 +848,11 @@ def test_string_na_nat_conversion(self):
result2 = to_datetime(strings)
tm.assertIsInstance(result2, DatetimeIndex)
- self.assert_numpy_array_equivalent(result, result2)
+ tm.assert_numpy_array_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
- self.assert_numpy_array_equivalent(result, malformed)
+ tm.assert_numpy_array_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
@@ -936,7 +935,7 @@ def test_nat_vector_field_access(self):
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else np.nan
for x in idx]
- self.assert_numpy_array_equivalent(result, np.array(expected))
+ self.assert_numpy_array_equal(result, np.array(expected))
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
@@ -2758,7 +2757,7 @@ def test_does_not_convert_mixed_integer(self):
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
- assert_array_equal(cols.values, joined.values)
+ tm.assert_numpy_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 97bae51b18248..54f4e70b36cc2 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -21,7 +21,6 @@
from numpy.random import randn, rand
import numpy as np
-from numpy.testing import assert_array_equal
import pandas as pd
from pandas.core.common import (is_sequence, array_equivalent, is_list_like, is_number,
@@ -631,34 +630,14 @@ def assert_categorical_equal(res, exp):
raise AssertionError("ordered not the same")
-def assert_numpy_array_equal(np_array, assert_equal, err_msg=None):
- """Checks that 'np_array' is equal to 'assert_equal'
+def assert_numpy_array_equal(np_array, assert_equal,
+ strict_nan=False, err_msg=None):
+ """Checks that 'np_array' is equivalent to 'assert_equal'.
- Note that the expected array should not contain `np.nan`!
- Two numpy arrays are equal if all
- elements are equal, which is not possible if `np.nan` is such an element!
-
- If the expected array includes `np.nan` use
- `assert_numpy_array_equivalent(...)`.
- """
- if np.array_equal(np_array, assert_equal):
- return
- if err_msg is None:
- err_msg = '{0} is not equal to {1}.'.format(np_array, assert_equal)
- raise AssertionError(err_msg)
-
-
-def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False, err_msg=None):
- """Checks that 'np_array' is equivalent to 'assert_equal'
-
- Two numpy arrays are equivalent if the arrays have equal non-NaN elements,
- and `np.nan` in corresponding locations.
-
- If the the expected array does not contain `np.nan`
- `assert_numpy_array_equivalent` is the similar to
- `assert_numpy_array_equal()`. If the expected array includes
- `np.nan` use this
- function.
+ This is similar to ``numpy.testing.assert_array_equal``, but can
+ check equality including ``np.nan``. Two numpy arrays are regarded as
+ equivalent if the arrays have equal non-NaN elements,
+ and `np.nan` in corresponding locations.
"""
if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
return
@@ -694,7 +673,7 @@ def assert_series_equal(left, right, check_dtype=True,
'[datetimelike_compat=True] {0} is not equal to {1}.'.format(left.values,
right.values))
else:
- assert_numpy_array_equivalent(left.values, right.values)
+ assert_numpy_array_equal(left.values, right.values)
else:
assert_almost_equal(left.values, right.values, check_less_precise)
if check_less_precise:
| Closes #10427.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10512 | 2015-07-04T23:29:46Z | 2015-07-28T21:45:17Z | 2015-07-28T21:45:17Z | 2015-07-28T22:13:59Z |
TST: Simplify genelate_legacy_pickles.py usage | diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py
index e1cf541fe6195..2d93ecf38a76d 100644
--- a/pandas/io/tests/generate_legacy_pickles.py
+++ b/pandas/io/tests/generate_legacy_pickles.py
@@ -143,16 +143,14 @@ def write_legacy_pickles():
except:
import pickle
- sys_version = version = pandas.__version__
- if len(sys.argv) < 2:
- exit("{0} <version> <output_dir>".format(sys.argv[0]))
+ version = pandas.__version__
+ if len(sys.argv) != 2:
+ exit("Specify output directory: generate_legacy_pickles.py <output_dir>")
- version = str(sys.argv[1])
- output_dir = str(sys.argv[2])
+ output_dir = str(sys.argv[1])
print("This script generates a pickle file for the current arch, system, and python version")
- print(" system version: {0}".format(sys_version))
- print(" output version: {0}".format(version))
+ print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
# construct a reasonable platform name
| Should not input pandas version manually to avoid any mistake.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10509 | 2015-07-04T11:42:17Z | 2015-07-06T12:41:42Z | 2015-07-06T12:41:42Z | 2015-07-06T12:59:35Z |
BUG: Groupby(sort=False) with datetime-like Categorical raises ValueError | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 322f431a37a79..83e5ec5b1d107 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -303,7 +303,22 @@ Other API Changes
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
- ``Categorical.name`` was removed to make `Categorical` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
+- ``Categorical.unique`` now returns new ``Categorical`` which ``categories`` and ``codes`` are unique, rather than returnning ``np.array`` (:issue:`10508`)
+ - unordered category: values and categories are sorted by appearance order.
+ - ordered category: values are sorted by appearance order, categories keeps existing order.
+
+.. ipython :: python
+
+ cat = pd.Categorical(['C', 'A', 'B', 'C'], categories=['A', 'B', 'C'], ordered=True)
+ cat
+ cat.unique()
+
+ cat = pd.Categorical(['C', 'A', 'B', 'C'], categories=['A', 'B', 'C'])
+ cat
+ cat.unique()
+
+- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`)
- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`)
=============================== ==============================================================
@@ -365,6 +380,9 @@ Bug Fixes
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
- Bug in ``io.sql.get_schema`` when specifying multiple columns as primary
key (:issue:`10385`).
+
+- Bug in ``groupby(sort=False)`` with datetime-like ``Categorical`` raises ``ValueError`` (:issue:`10505`)
+
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
- Bug in ``Series.shift`` and ``DataFrame.shift`` not supporting categorical data (:issue:`9416`)
- Bug in ``Series.map`` using categorical ``Series`` raises ``AttributeError`` (:issue:`10324`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 1d1f0d7da80e4..1604705ff824a 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1558,19 +1558,30 @@ def mode(self):
def unique(self):
"""
- Return the unique values.
+ Return the ``Categorical`` which ``categories`` and ``codes`` are unique.
+ Unused categories are NOT returned.
- Unused categories are NOT returned. Unique values are returned in order
- of appearance.
+ - unordered category: values and categories are sorted by appearance
+ order.
+ - ordered category: values are sorted by appearance order, categories
+ keeps existing order.
Returns
-------
- unique values : array
+ unique values : ``Categorical``
"""
+
from pandas.core.nanops import unique1d
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
- return take_1d(self.categories.values, unique_codes)
+ cat = self.copy()
+ # keep nan in codes
+ cat._codes = unique_codes
+ # exclude nan from indexer for categories
+ take_codes = unique_codes[unique_codes != -1]
+ if self.ordered:
+ take_codes = sorted(take_codes)
+ return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index c01c6104ab904..2ed5774bdbec6 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1959,7 +1959,8 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
# fix bug #GH8868 sort=False being ignored in categorical groupby
else:
- self.grouper = self.grouper.reorder_categories(self.grouper.unique())
+ cat = self.grouper.unique()
+ self.grouper = self.grouper.reorder_categories(cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index fdd20af6ab6ce..41c487adc0d6e 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -958,20 +958,59 @@ def test_min_max(self):
self.assertEqual(_max, 1)
def test_unique(self):
- cat = Categorical(["a","b"])
- exp = np.asarray(["a","b"])
+ # categories are reordered based on value when ordered=False
+ cat = Categorical(["a", "b"])
+ exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
- cat = Categorical(["a","b","a","a"], categories=["a","b","c"])
+ cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, Categorical(exp))
- # unique should not sort
- cat = Categorical(["b", "b", np.nan, "a"], categories=["a","b","c"])
+ cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
+ exp = np.asarray(["c", "a", "b"])
+ res = cat.unique()
+ self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, Categorical(exp, categories=['c', 'a', 'b']))
+
+ # nan must be removed
+ cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, Categorical(["b", np.nan, "a"], categories=["b", "a"]))
+
+ def test_unique_ordered(self):
+ # keep categories order when ordered=True
+ cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
+ res = cat.unique()
+ exp = np.asarray(['b', 'a'])
+ exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
+ self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
+ res = cat.unique()
+ exp = np.asarray(['c', 'b', 'a'])
+ exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
+ self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
+ res = cat.unique()
+ exp = np.asarray(['b', 'a'])
+ exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
+ self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'], ordered=True)
+ res = cat.unique()
+ exp = np.asarray(['b', np.nan, 'a'], dtype=object)
+ exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
+ self.assert_numpy_array_equal(res, exp)
+ tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index a73f4e2939578..f1df0d711b5d0 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3413,7 +3413,8 @@ def test_groupby_sort_categorical(self):
col = 'range'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
- assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
+ # when categories is ordered, group is ordered by category's order
+ assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
df['range'] = Categorical(df['range'],ordered=False)
index = Index(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], dtype='object')
@@ -3431,6 +3432,55 @@ def test_groupby_sort_categorical(self):
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
+ def test_groupby_sort_categorical_datetimelike(self):
+ # GH10505
+
+ # use same data as test_groupby_sort_categorical, which category is
+ # corresponding to datetime.month
+ df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
+ datetime(2011, 2, 1), datetime(2011, 5, 1),
+ datetime(2011, 2, 1), datetime(2011, 1, 1),
+ datetime(2011, 5, 1)],
+ 'foo': [10, 8, 5, 6, 4, 1, 7],
+ 'bar': [10, 20, 30, 40, 50, 60, 70]},
+ columns=['dt', 'foo', 'bar'])
+
+ # ordered=True
+ df['dt'] = Categorical(df['dt'], ordered=True)
+ index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 7, 1)]
+ result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
+ result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
+
+ index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 1, 1)]
+ result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
+ columns=['foo', 'bar'])
+ result_nosort.index = CategoricalIndex(index, categories=index,
+ name='dt', ordered=True)
+
+ col = 'dt'
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ # when categories is ordered, group is ordered by category's order
+ assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
+
+ # ordered = False
+ df['dt'] = Categorical(df['dt'], ordered=False)
+ index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 7, 1)]
+ result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
+ result_sort.index = CategoricalIndex(index, name='dt')
+
+ index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 1, 1)]
+ result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
+ columns=['foo', 'bar'])
+ result_nosort.index = CategoricalIndex(index, categories=index, name='dt')
+
+ col = 'dt'
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
+
def test_groupby_sort_multiindex_series(self):
# series multiindex groupby sort argument was not being passed through _compress_group_index
| Closes #10505.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10508 | 2015-07-04T06:14:30Z | 2015-07-28T15:28:20Z | 2015-07-28T15:28:20Z | 2015-07-28T21:02:20Z |
TST: make assertion messages more understandable | diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index c145c717df4c4..66c2bbde0b3f8 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -178,7 +178,10 @@ def _check_orient(df, orient, dtype=None, numpy=False,
self.assertTrue(df.columns.equals(unser.columns))
elif orient == "values":
# index and cols are not captured in this orientation
- assert_almost_equal(df.values, unser.values)
+ if numpy is True and df.shape == (0, 0):
+ assert unser.shape[0] == 0
+ else:
+ assert_almost_equal(df.values, unser.values)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
@@ -670,15 +673,20 @@ def test_doc_example(self):
def test_misc_example(self):
# parsing unordered input fails
- result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]',numpy=True)
- expected = DataFrame([[1,2],[1,2]],columns=['a','b'])
- with tm.assertRaisesRegexp(AssertionError,
- '\[index\] left \[.+\], right \[.+\]'):
+ result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
+ expected = DataFrame([[1,2], [1,2]], columns=['a', 'b'])
+
+ error_msg = """DataFrame\\.index are different
+
+DataFrame\\.index values are different \\(100\\.0 %\\)
+\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
+\\[right\\]: Int64Index\\(\\[0, 1\\], dtype='int64'\\)"""
+ with tm.assertRaisesRegexp(AssertionError, error_msg):
assert_frame_equal(result, expected)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
- expected = DataFrame([[1,2],[1,2]],columns=['a','b'])
- assert_frame_equal(result,expected)
+ expected = DataFrame([[1,2], [1,2]], columns=['a','b'])
+ assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
@@ -739,3 +747,9 @@ def my_handler_raises(obj):
raise TypeError("raisin")
self.assertRaises(TypeError, DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
+
+
+if __name__ == '__main__':
+ import nose
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
+ '--pdb-failure', '-s'], exit=False)
\ No newline at end of file
diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx
index 4977a80acc936..1abc758559e70 100644
--- a/pandas/src/testing.pyx
+++ b/pandas/src/testing.pyx
@@ -55,11 +55,39 @@ cpdef assert_dict_equal(a, b, bint compare_keys=True):
return True
-cpdef assert_almost_equal(a, b, bint check_less_precise=False):
+cpdef assert_almost_equal(a, b, bint check_less_precise=False,
+ obj=None, lobj=None, robj=None):
+ """Check that left and right objects are almost equal.
+
+ Parameters
+ ----------
+ a : object
+ b : object
+ check_less_precise : bool, default False
+ Specify comparison precision.
+ 5 digits (False) or 3 digits (True) after decimal points are compared.
+ obj : str, default None
+ Specify object name being compared, internally used to show appropriate
+ assertion message
+ lobj : str, default None
+ Specify left object name being compared, internally used to show
+ appropriate assertion message
+ robj : str, default None
+ Specify right object name being compared, internally used to show
+ appropriate assertion message
+ """
+
cdef:
int decimal
+ double diff = 0.0
Py_ssize_t i, na, nb
double fa, fb
+ bint is_unequal = False
+
+ if lobj is None:
+ lobj = a
+ if robj is None:
+ robj = b
if isinstance(a, dict) or isinstance(b, dict):
return assert_dict_equal(a, b)
@@ -70,33 +98,62 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False):
return True
if isiterable(a):
- assert isiterable(b), (
- "First object is iterable, second isn't: %r != %r" % (a, b)
- )
+
+ if not isiterable(b):
+ from pandas.util.testing import raise_assert_detail
+ if obj is None:
+ obj = 'Iterable'
+ msg = "First object is iterable, second isn't"
+ raise_assert_detail(obj, msg, a, b)
+
assert has_length(a) and has_length(b), (
"Can't compare objects without length, one or both is invalid: "
"(%r, %r)" % (a, b)
)
- na, nb = len(a), len(b)
- assert na == nb, (
- "Length of two iterators not the same: %r != %r" % (na, nb)
- )
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
+ if obj is None:
+ obj = 'numpy array'
+ na, nb = a.size, b.size
+ if a.shape != b.shape:
+ from pandas.util.testing import raise_assert_detail
+ raise_assert_detail(obj, '{0} shapes are different'.format(obj),
+ a.shape, b.shape)
try:
if np.array_equal(a, b):
return True
except:
pass
+ else:
+ if obj is None:
+ obj = 'Iterable'
+ na, nb = len(a), len(b)
+
+ if na != nb:
+ from pandas.util.testing import raise_assert_detail
+ raise_assert_detail(obj, '{0} length are different'.format(obj),
+ na, nb)
+
+ for i in xrange(len(a)):
+ try:
+ assert_almost_equal(a[i], b[i], check_less_precise)
+ except AssertionError:
+ is_unequal = True
+ diff += 1
- for i in xrange(na):
- assert_almost_equal(a[i], b[i], check_less_precise)
+ if is_unequal:
+ from pandas.util.testing import raise_assert_detail
+ msg = '{0} values are different ({1} %)'.format(obj, np.round(diff * 100.0 / na, 5))
+ raise_assert_detail(obj, msg, lobj, robj)
return True
+
elif isiterable(b):
- assert False, (
- "Second object is iterable, first isn't: %r != %r" % (a, b)
- )
+ from pandas.util.testing import raise_assert_detail
+ if obj is None:
+ obj = 'Iterable'
+ msg = "Second object is iterable, first isn't"
+ raise_assert_detail(obj, msg, a, b)
if isnull(a):
assert isnull(b), (
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index d6e57e76d0ec9..3c988943301c0 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -3371,7 +3371,10 @@ def test_inplace_mutation_resets_values(self):
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
- exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
+ exp_values = np.empty((6, ), dtype=object)
+ exp_values[:] = [(long(1), 'a')] * 6
+ # must be 1d array of tuples
+ self.assertEqual(exp_values.shape, (6, ))
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
@@ -4772,8 +4775,20 @@ def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'),range(3)],names=['first','second'])
str(mi)
- tm.assert_index_equal(eval(repr(mi)),mi,exact=True)
-
+
+ if compat.PY3:
+ tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
+ else:
+ result = eval(repr(mi))
+ # string coerces to unicode
+ tm.assert_index_equal(result, mi, exact=False)
+ self.assertEqual(mi.get_level_values('first').inferred_type, 'string')
+ self.assertEqual(result.get_level_values('first').inferred_type, 'unicode')
+
+ mi_u = MultiIndex.from_product([list(u'ab'),range(3)],names=['first','second'])
+ result = eval(repr(mi_u))
+ tm.assert_index_equal(result, mi_u, exact=True)
+
# formatting
if compat.PY3:
str(mi)
@@ -4783,7 +4798,19 @@ def test_repr_roundtrip(self):
# long format
mi = MultiIndex.from_product([list('abcdefg'),range(10)],names=['first','second'])
result = str(mi)
- tm.assert_index_equal(eval(repr(mi)),mi,exact=True)
+
+ if compat.PY3:
+ tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
+ else:
+ result = eval(repr(mi))
+ # string coerces to unicode
+ tm.assert_index_equal(result, mi, exact=False)
+ self.assertEqual(mi.get_level_values('first').inferred_type, 'string')
+ self.assertEqual(result.get_level_values('first').inferred_type, 'unicode')
+
+ mi = MultiIndex.from_product([list(u'abcdefg'),range(10)],names=['first','second'])
+ result = eval(repr(mi_u))
+ tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 668579911d6d5..f4fbc19535107 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -10,7 +10,8 @@
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assertRaisesRegexp, raise_with_traceback,
- assert_series_equal, assert_frame_equal, RNGContext
+ assert_index_equal, assert_series_equal, assert_frame_equal,
+ assert_numpy_array_equal, assert_isinstance, RNGContext
)
# let's get meta.
@@ -132,6 +133,275 @@ def test_raise_with_traceback(self):
raise_with_traceback(e, traceback)
+class TestAssertNumpyArrayEqual(tm.TestCase):
+
+ def test_numpy_array_equal_message(self):
+
+ expected = """numpy array are different
+
+numpy array shapes are different
+\\[left\\]: \\(2,\\)
+\\[right\\]: \\(3,\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))
+
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]))
+
+ # scalar comparison
+ expected = """: 1 != 2"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(1, 2)
+ expected = """expected 2\\.00000 but got 1\\.00000, with decimal 5"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(1, 2)
+
+ # array / scalar array comparison
+ expected = """(numpy array|Iterable) are different
+
+First object is iterable, second isn't
+\\[left\\]: \\[1\\]
+\\[right\\]: 1"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([1]), 1)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([1]), 1)
+
+ # scalar / array comparison
+ expected = """(numpy array|Iterable) are different
+
+Second object is iterable, first isn't
+\\[left\\]: 1
+\\[right\\]: \\[1\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(1, np.array([1]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(1, np.array([1]))
+
+ expected = """numpy array are different
+
+numpy array values are different \\(66\\.66667 %\\)
+\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
+\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
+
+ expected = """numpy array are different
+
+numpy array values are different \\(50\\.0 %\\)
+\\[left\\]: \\[1, 2\\]
+\\[right\\]: \\[1, 3\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
+
+
+ expected = """numpy array are different
+
+numpy array values are different \\(50\\.0 %\\)
+\\[left\\]: \\[1\\.1, 2\\.000001\\]
+\\[right\\]: \\[1\\.1, 2.0\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
+
+ # must pass
+ assert_almost_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
+
+ expected = """numpy array are different
+
+numpy array values are different \\(16\\.66667 %\\)
+\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
+\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([[1, 2], [3, 4], [5, 6]]),
+ np.array([[1, 3], [3, 4], [5, 6]]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),
+ np.array([[1, 3], [3, 4], [5, 6]]))
+
+ expected = """numpy array are different
+
+numpy array values are different \\(25\\.0 %\\)
+\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
+\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([[1, 2], [3, 4]]),
+ np.array([[1, 3], [3, 4]]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([[1, 2], [3, 4]]),
+ np.array([[1, 3], [3, 4]]))
+
+ # allow to overwrite message
+ expected = """Index are different
+
+Index shapes are different
+\\[left\\]: \\(2,\\)
+\\[right\\]: \\(3,\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]),
+ obj='Index')
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]),
+ obj='Index')
+
+ def test_assert_almost_equal_iterable_message(self):
+
+ expected = """Iterable are different
+
+Iterable length are different
+\\[left\\]: 2
+\\[right\\]: 3"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal([1, 2], [3, 4, 5])
+
+ expected = """Iterable are different
+
+Iterable values are different \\(50\\.0 %\\)
+\\[left\\]: \\[1, 2\\]
+\\[right\\]: \\[1, 3\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_almost_equal([1, 2], [1, 3])
+
+
+class TestAssertIndexEqual(unittest.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_index_equal_message(self):
+
+ expected = """Index are different
+
+Index levels are different
+\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
+\\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\],
+ labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)"""
+ idx1 = pd.Index([1, 2, 3])
+ idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4)])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, exact=False)
+
+
+ expected = """MultiIndex level \\[1\\] are different
+
+MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
+\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
+\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
+ idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2), ('B', 3), ('B', 4)])
+ idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4)])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, check_exact=False)
+
+ expected = """Index are different
+
+Index length are different
+\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
+\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
+ idx1 = pd.Index([1, 2, 3])
+ idx2 = pd.Index([1, 2, 3, 4])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, check_exact=False)
+
+ expected = """Index are different
+
+Index classes are different
+\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
+\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
+ idx1 = pd.Index([1, 2, 3])
+ idx2 = pd.Index([1, 2, 3.0])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, exact=True)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, exact=True, check_exact=False)
+
+ expected = """Index are different
+
+Index values are different \\(33\\.33333 %\\)
+\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
+\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
+ idx1 = pd.Index([1, 2, 3.])
+ idx2 = pd.Index([1, 2, 3.0000000001])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+
+ # must success
+ assert_index_equal(idx1, idx2, check_exact=False)
+
+ expected = """Index are different
+
+Index values are different \\(33\\.33333 %\\)
+\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
+\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
+ idx1 = pd.Index([1, 2, 3.])
+ idx2 = pd.Index([1, 2, 3.0001])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, check_exact=False)
+ # must success
+ assert_index_equal(idx1, idx2, check_exact=False, check_less_precise=True)
+
+ expected = """Index are different
+
+Index values are different \\(33\\.33333 %\\)
+\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
+\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
+ idx1 = pd.Index([1, 2, 3])
+ idx2 = pd.Index([1, 2, 4])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, check_less_precise=True)
+
+ expected = """MultiIndex level \\[1\\] are different
+
+MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
+\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
+\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
+ idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2), ('B', 3), ('B', 4)])
+ idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4)])
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2, check_exact=False)
+
+ def test_index_equal_metadata_message(self):
+
+ expected = """Index are different
+
+Attribute "names" are different
+\\[left\\]: \\[None\\]
+\\[right\\]: \\[u?'x'\\]"""
+ idx1 = pd.Index([1, 2, 3])
+ idx2 = pd.Index([1, 2, 3], name='x')
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+
+ # same name, should pass
+ assert_index_equal(pd.Index([1, 2, 3], name=np.nan),
+ pd.Index([1, 2, 3], name=np.nan))
+ assert_index_equal(pd.Index([1, 2, 3], name=pd.NaT),
+ pd.Index([1, 2, 3], name=pd.NaT))
+
+
+ expected = """Index are different
+
+Attribute "names" are different
+\\[left\\]: \\[nan\\]
+\\[right\\]: \\[NaT\\]"""
+ idx1 = pd.Index([1, 2, 3], name=np.nan)
+ idx2 = pd.Index([1, 2, 3], name=pd.NaT)
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_index_equal(idx1, idx2)
+
+
class TestAssertSeriesEqual(tm.TestCase):
_multiprocess_can_split_ = True
@@ -191,6 +461,28 @@ def test_multiindex_dtype(self):
{'a':[1.0,2.0],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
+ def test_series_equal_message(self):
+
+ expected = """Series are different
+
+Series length are different
+\\[left\\]: 3, Int64Index\\(\\[0, 1, 2\\], dtype='int64'\\)
+\\[right\\]: 4, Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))
+
+
+ expected = """Series are different
+
+Series values are different \\(33\\.33333 %\\)
+\\[left\\]: \\[1, 2, 3\\]
+\\[right\\]: \\[1, 2, 4\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),
+ check_less_precise=True)
+
class TestAssertFrameEqual(tm.TestCase):
_multiprocess_can_split_ = True
@@ -224,6 +516,65 @@ def test_empty_dtypes(self):
self._assert_equal(df1, df2, check_dtype=False)
self._assert_not_equal(df1, df2, check_dtype=True)
+ def test_frame_equal_message(self):
+
+ expected = """DataFrame are different
+
+DataFrame shape \\(number of rows\\) are different
+\\[left\\]: 3, Int64Index\\(\\[0, 1, 2\\], dtype='int64'\\)
+\\[right\\]: 4, Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3]}),
+ pd.DataFrame({'A':[1, 2, 3, 4]}))
+
+
+ expected = """DataFrame are different
+
+DataFrame shape \\(number of columns\\) are different
+\\[left\\]: 2, Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
+\\[right\\]: 1, Index\\(\\[u?'A'\\], dtype='object'\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]}),
+ pd.DataFrame({'A':[1, 2, 3]}))
+
+
+ expected = """DataFrame\\.index are different
+
+DataFrame\\.index values are different \\(33\\.33333 %\\)
+\\[left\\]: Index\\(\\[u?'a', u?'b', u?'c'\\], dtype='object'\\)
+\\[right\\]: Index\\(\\[u?'a', u?'b', u?'d'\\], dtype='object'\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
+ index=['a', 'b', 'c']),
+ pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
+ index=['a', 'b', 'd']))
+
+ expected = """DataFrame\\.columns are different
+
+DataFrame\\.columns values are different \\(50\\.0 %\\)
+\\[left\\]: Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
+\\[right\\]: Index\\(\\[u?'A', u?'b'\\], dtype='object'\\)"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]},
+ index=['a', 'b', 'c']),
+ pd.DataFrame({'A':[1, 2, 3], 'b':[4, 5, 6]},
+ index=['a', 'b', 'c']))
+
+
+ expected = """DataFrame\\.iloc\\[:, 1\\] are different
+
+DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\)
+\\[left\\]: \\[4, 5, 6\\]
+\\[right\\]: \\[4, 5, 7\\]"""
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]}),
+ pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 7]}))
+
+ with assertRaisesRegexp(AssertionError, expected):
+ assert_frame_equal(pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 6]}),
+ pd.DataFrame({'A':[1, 2, 3], 'B':[4, 5, 7]}),
+ by_blocks=True)
+
class TestRNGContext(unittest.TestCase):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 979ac007c7500..4b7c8d4540e0f 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -23,8 +23,9 @@
import numpy as np
import pandas as pd
-from pandas.core.common import (is_sequence, array_equivalent, is_list_like, is_number,
- is_datetimelike_v_numeric, is_datetimelike_v_object)
+from pandas.core.common import (is_sequence, array_equivalent, is_list_like,
+ is_datetimelike_v_numeric, is_datetimelike_v_object,
+ is_number, pprint_thing, take_1d)
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
@@ -536,23 +537,128 @@ def assert_equal(a, b, msg=""):
assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b)
-def assert_index_equal(left, right, exact=False, check_names=True):
+def assert_index_equal(left, right, exact=False, check_names=True,
+ check_less_precise=False, check_exact=True, obj='Index'):
+ """Check that left and right Index are equal.
+
+ Parameters
+ ----------
+ left : Index
+ right : Index
+ exact : bool, default False
+ Whether to check the Index class, dtype and inferred_type are identical.
+ check_names : bool, default True
+ Whether to check the names attribute.
+ check_less_precise : bool, default False
+ Specify comparison precision. Only used when check_exact is False.
+ 5 digits (False) or 3 digits (True) after decimal points are compared.
+ check_exact : bool, default True
+ Whether to compare number exactly.
+ obj : str, default 'Index'
+ Specify object name being compared, internally used to show appropriate
+ assertion message
+ """
+
+ def _check_types(l, r, obj='Index'):
+ if exact:
+ if type(l) != type(r):
+ msg = '{0} classes are different'.format(obj)
+ raise_assert_detail(obj, msg, l, r)
+ assert_attr_equal('dtype', l, r, obj=obj)
+ assert_attr_equal('inferred_type', l, r, obj=obj)
+
+ def _get_ilevel_values(index, level):
+ # accept level number only
+ unique = index.levels[level]
+ labels = index.labels[level]
+ filled = take_1d(unique.values, labels, fill_value=unique._na_value)
+ values = unique._simple_new(filled, index.names[level],
+ freq=getattr(unique, 'freq', None),
+ tz=getattr(unique, 'tz', None))
+ return values
+
+ # instance validation
assertIsInstance(left, Index, '[index] ')
assertIsInstance(right, Index, '[index] ')
- if not left.equals(right) or (exact and type(left) != type(right)):
- raise AssertionError("[index] left [{0} {1}], right [{2} {3}]".format(left.dtype,
- left,
- right,
- right.dtype))
+
+ # class / dtype comparison
+ _check_types(left, right)
+
+ # level comparison
+ if left.nlevels != right.nlevels:
+ raise_assert_detail(obj, '{0} levels are different'.format(obj),
+ '{0}, {1}'.format(left.nlevels, left),
+ '{0}, {1}'.format(right.nlevels, right))
+
+ # length comparison
+ if len(left) != len(right):
+ raise_assert_detail(obj, '{0} length are different'.format(obj),
+ '{0}, {1}'.format(len(left), left),
+ '{0}, {1}'.format(len(right), right))
+
+ # MultiIndex special comparison for little-friendly error messages
+ if left.nlevels > 1:
+ for level in range(left.nlevels):
+ # cannot use get_level_values here because it can change dtype
+ llevel = _get_ilevel_values(left, level)
+ rlevel = _get_ilevel_values(right, level)
+
+ lobj = 'MultiIndex level [{0}]'.format(level)
+ assert_index_equal(llevel, rlevel,
+ exact=exact, check_names=check_names,
+ check_less_precise=check_less_precise,
+ check_exact=check_exact, obj=lobj)
+ # get_level_values may change dtype
+ _check_types(left.levels[level], right.levels[level], obj=obj)
+
+ if check_exact:
+ if not left.equals(right):
+ diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
+ msg = '{0} values are different ({1} %)'.format(obj, np.round(diff, 5))
+ raise_assert_detail(obj, msg, left, right)
+ else:
+ assert_almost_equal(left.values, right.values,
+ check_less_precise=check_less_precise,
+ obj=obj, lobj=left, robj=right)
+
+ # metadata comparison
if check_names:
- assert_attr_equal('names', left, right)
+ assert_attr_equal('names', left, right, obj=obj)
+
+def assert_attr_equal(attr, left, right, obj='Attributes'):
+ """checks attributes are equal. Both objects must have attribute.
+
+ Parameters
+ ----------
+ attr : str
+ Attribute name being compared.
+ left : object
+ right : object
+ obj : str, default 'Attributes'
+ Specify object name being compared, internally used to show appropriate
+ assertion message
+ """
-def assert_attr_equal(attr, left, right):
- """checks attributes are equal. Both objects must have attribute."""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
- assert_equal(left_attr,right_attr,"attr is not equal [{0}]" .format(attr))
+
+ if left_attr is right_attr:
+ return True
+ elif (is_number(left_attr) and np.isnan(left_attr) and
+ is_number(right_attr) and np.isnan(right_attr)):
+ # np.nan
+ return True
+
+ result = left_attr == right_attr
+ if not isinstance(result, bool):
+ result = result.all()
+
+ if result:
+ return True
+ else:
+ raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
+ left_attr, right_attr)
def isiterable(obj):
@@ -607,6 +713,7 @@ def assertIsInstance(obj, cls, msg=''):
def assert_isinstance(obj, class_type_or_tuple, msg=''):
return deprecate('assert_isinstance', assertIsInstance)(obj, class_type_or_tuple, msg=msg)
+
def assertNotIsInstance(obj, cls, msg=''):
"""Test that obj is not an instance of cls
(which can be a class or a tuple of classes,
@@ -630,8 +737,23 @@ def assert_categorical_equal(res, exp):
raise AssertionError("ordered not the same")
-def assert_numpy_array_equal(np_array, assert_equal,
- strict_nan=False, err_msg=None):
+def raise_assert_detail(obj, message, left, right):
+ if isinstance(left, np.ndarray):
+ left = pprint_thing(left)
+ if isinstance(right, np.ndarray):
+ right = pprint_thing(right)
+
+ msg = """{0} are different
+
+{1}
+[left]: {2}
+[right]: {3}""".format(obj, message, left, right)
+ raise AssertionError(msg)
+
+
+def assert_numpy_array_equal(left, right,
+ strict_nan=False, err_msg=None,
+ obj='numpy array'):
"""Checks that 'np_array' is equivalent to 'assert_equal'.
This is similar to ``numpy.testing.assert_array_equal``, but can
@@ -639,10 +761,42 @@ def assert_numpy_array_equal(np_array, assert_equal,
equivalent if the arrays have equal non-NaN elements,
and `np.nan` in corresponding locations.
"""
- if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
+
+ # compare shape and values
+ if array_equivalent(left, right, strict_nan=strict_nan):
return
+
if err_msg is None:
- err_msg = '{0} is not equivalent to {1}.'.format(np_array, assert_equal)
+ # show detailed error
+
+ if np.isscalar(left) and np.isscalar(right):
+ # show scalar comparison error
+ assert_equal(left, right)
+ elif is_list_like(left) and is_list_like(right):
+ # some test cases pass list
+ left = np.asarray(left)
+ right = np.array(right)
+
+ if left.shape != right.shape:
+ raise_assert_detail(obj, '{0} shapes are different'.format(obj),
+ left.shape, right.shape)
+
+ diff = 0
+ for l, r in zip(left, right):
+ # count up differences
+ if not array_equivalent(l, r, strict_nan=strict_nan):
+ diff += 1
+
+ diff = diff * 100.0 / left.size
+ msg = '{0} values are different ({1} %)'.format(obj, np.round(diff, 5))
+ raise_assert_detail(obj, msg, left, right)
+ elif is_list_like(left):
+ msg = "First object is iterable, second isn't"
+ raise_assert_detail(obj, msg, left, right)
+ else:
+ msg = "Second object is iterable, first isn't"
+ raise_assert_detail(obj, msg, left, right)
+
raise AssertionError(err_msg)
@@ -651,17 +805,62 @@ def assert_series_equal(left, right, check_dtype=True,
check_index_type=False,
check_series_type=False,
check_less_precise=False,
- check_exact=False,
check_names=True,
- check_datetimelike_compat=False):
+ check_exact=False,
+ check_datetimelike_compat=False,
+ obj='Series'):
+
+ """Check that left and right Series are equal.
+
+ Parameters
+ ----------
+ left : Series
+ right : Series
+ check_dtype : bool, default True
+ Whether to check the Series dtype is identical.
+ check_index_type : bool, default False
+ Whether to check the Index class, dtype and inferred_type are identical.
+ check_series_type : bool, default False
+ Whether to check the Series class is identical.
+ check_less_precise : bool, default False
+ Specify comparison precision. Only used when check_exact is False.
+ 5 digits (False) or 3 digits (True) after decimal points are compared.
+ check_exact : bool, default False
+ Whether to compare number exactly.
+ check_names : bool, default True
+ Whether to check the Series and Index names attribute.
+ check_dateteimelike_compat : bool, default False
+ Compare datetime-like which is comparable ignoring dtype.
+ obj : str, default 'Series'
+ Specify object name being compared, internally used to show appropriate
+ assertion message
+ """
+
+ # instance validation
+ assertIsInstance(left, Series, '[Series] ')
+ assertIsInstance(right, Series, '[Series] ')
+
if check_series_type:
assertIsInstance(left, type(right))
+
+ # length comparison
+ if len(left) != len(right):
+ raise_assert_detail(obj, 'Series length are different',
+ '{0}, {1}'.format(len(left), left.index),
+ '{0}, {1}'.format(len(right), right.index))
+
+ # index comparison
+ assert_index_equal(left.index, right.index, exact=check_index_type,
+ check_names=check_names,
+ check_less_precise=check_less_precise, check_exact=check_exact,
+ obj='{0}.index'.format(obj))
+
if check_dtype:
assert_attr_equal('dtype', left, right)
+
if check_exact:
- if not np.array_equal(left.values, right.values):
- raise AssertionError('{0} is not equal to {1}.'.format(left.values,
- right.values))
+ assert_numpy_array_equal(left.get_values(), right.get_values(),
+ obj='{0}'.format(obj))
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check the values in that case
@@ -675,27 +874,12 @@ def assert_series_equal(left, right, check_dtype=True,
else:
assert_numpy_array_equal(left.values, right.values)
else:
- assert_almost_equal(left.values, right.values, check_less_precise)
- if check_less_precise:
- assert_almost_equal(
- left.index.values, right.index.values, check_less_precise)
- else:
- assert_index_equal(left.index, right.index, check_names=check_names)
- if check_index_type:
- for level in range(left.index.nlevels):
- lindex = left.index.get_level_values(level)
- rindex = right.index.get_level_values(level)
- assertIsInstance(lindex, type(rindex))
- assert_attr_equal('dtype', lindex, rindex)
- assert_attr_equal('inferred_type', lindex, rindex)
+ assert_almost_equal(left.get_values(), right.get_values(),
+ check_less_precise, obj='{0}'.format(obj))
+
+ # metadata comparison
if check_names:
- if is_number(left.name) and np.isnan(left.name):
- # Series.name can be np.nan in some test cases
- assert is_number(right.name) and np.isnan(right.name)
- elif left.name is pd.NaT:
- assert right.name is pd.NaT
- else:
- assert_attr_equal('name', left, right)
+ assert_attr_equal('name', left, right, obj=obj)
# This could be refactored to use the NDFrame.equals method
@@ -707,19 +891,69 @@ def assert_frame_equal(left, right, check_dtype=True,
check_names=True,
by_blocks=False,
check_exact=False,
- check_datetimelike_compat=False):
+ check_datetimelike_compat=False,
+ obj='DataFrame'):
+
+ """Check that left and right DataFrame are equal.
+
+ Parameters
+ ----------
+ left : DataFrame
+ right : DataFrame
+ check_dtype : bool, default True
+ Whether to check the DataFrame dtype is identical.
+ check_index_type : bool, default False
+ Whether to check the Index class, dtype and inferred_type are identical.
+ check_column_type : bool, default False
+ Whether to check the columns class, dtype and inferred_type are identical.
+ check_frame_type : bool, default False
+ Whether to check the DataFrame class is identical.
+ check_less_precise : bool, default False
+ Specify comparison precision. Only used when check_exact is False.
+ 5 digits (False) or 3 digits (True) after decimal points are compared.
+ check_names : bool, default True
+ Whether to check the Index names attribute.
+ by_blocks : bool, default False
+ Specify how to compare internal data. If False, compare by columns.
+ If True, compare by blocks.
+ check_exact : bool, default False
+ Whether to compare number exactly.
+ check_dateteimelike_compat : bool, default False
+ Compare datetime-like which is comparable ignoring dtype.
+ obj : str, default 'DataFrame'
+ Specify object name being compared, internally used to show appropriate
+ assertion message
+ """
+
+ # instance validation
+ assertIsInstance(left, DataFrame, '[DataFrame] ')
+ assertIsInstance(right, DataFrame, '[DataFrame] ')
+
if check_frame_type:
assertIsInstance(left, type(right))
- assertIsInstance(left, DataFrame)
- assertIsInstance(right, DataFrame)
- if check_less_precise:
- if not by_blocks:
- assert_almost_equal(left.columns, right.columns)
- assert_almost_equal(left.index, right.index)
- else:
- if not by_blocks:
- assert_index_equal(left.columns, right.columns, check_names=check_names)
+ # shape comparison (row)
+ if left.shape[0] != right.shape[0]:
+ raise_assert_detail(obj, 'DataFrame shape (number of rows) are different',
+ '{0}, {1}'.format(left.shape[0], left.index),
+ '{0}, {1}'.format(right.shape[0], right.index))
+ # shape comparison (columns)
+ if left.shape[1] != right.shape[1]:
+ raise_assert_detail(obj, 'DataFrame shape (number of columns) are different',
+ '{0}, {1}'.format(left.shape[1], left.columns),
+ '{0}, {1}'.format(right.shape[1], right.columns))
+
+ # index comparison
+ assert_index_equal(left.index, right.index, exact=check_index_type,
+ check_names=check_names,
+ check_less_precise=check_less_precise, check_exact=check_exact,
+ obj='{0}.index'.format(obj))
+
+ # column comparison
+ assert_index_equal(left.columns, right.columns, exact=check_column_type,
+ check_names=check_names,
+ check_less_precise=check_less_precise, check_exact=check_exact,
+ obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
@@ -728,7 +962,8 @@ def assert_frame_equal(left, right, check_dtype=True,
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
- assert_frame_equal(lblocks[dtype],rblocks[dtype], check_dtype=check_dtype)
+ assert_frame_equal(lblocks[dtype], rblocks[dtype],
+ check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
@@ -742,22 +977,8 @@ def assert_frame_equal(left, right, check_dtype=True,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
- check_datetimelike_compat=check_datetimelike_compat)
-
- if check_index_type:
- for level in range(left.index.nlevels):
- lindex = left.index.get_level_values(level)
- rindex = right.index.get_level_values(level)
- assertIsInstance(lindex, type(rindex))
- assert_attr_equal('dtype', lindex, rindex)
- assert_attr_equal('inferred_type', lindex, rindex)
- if check_column_type:
- assertIsInstance(left.columns, type(right.columns))
- assert_attr_equal('dtype', left.columns, right.columns)
- assert_attr_equal('inferred_type', left.columns, right.columns)
- if check_names:
- assert_attr_equal('names', left.index, right.index)
- assert_attr_equal('names', left.columns, right.columns)
+ check_datetimelike_compat=check_datetimelike_compat,
+ obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
| Closes #10373. Also, this is based on #10500 and #10501.
The fix also did some refactoring related to `assert_index_equal`. Added `check_exact` and `check_less_precise`, and moved logics from `assert_series_equal` and `assert_frame_equal` for cleanups.
Followings are the list of tested and what the output looks like
## Index
- Shape (size)
- Dtype
- Values
- Metadata
```
tm.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3, 4]))
# AssertionError: Index are different
#
# Index length are different
# [left]: 3, Int64Index([1, 2, 3], dtype='int64')
# [right]: 4, Int64Index([1, 2, 3, 4], dtype='int64')
tm.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 5]))
# AssertionError: Index are different
#
# Index values are different (33.33333 %)
# [left]: Int64Index([1, 2, 3], dtype='int64')
# [right]: Int64Index([1, 2, 5], dtype='int64')
tm.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3], name='x'))
# AssertionError: Index are different
#
# Attribute "names" are different
# [left]: [None]
# [right]: [u'x']
```
## Series
- Shape (size)
- Dtype
- Index (same as above)
- Values
- Metadata (same as above)
```
tm.assert_series_equal(pd.Series([1, 2]), pd.Series([1, 2, 3]))
# AssertionError: Series are different
#
# Series length are different
# [left]: 2, Int64Index([0, 1], dtype='int64')
# [right]: 3, Int64Index([0, 1, 2], dtype='int64')
tm.assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))
# AssertionError: Series are different
#
# Series values are different (33.33333 %)
# [left]: [1, 2, 3]
# [right]: [1, 2, 4]
```
## DataFrame
- Shape (size)
- Dtype
- Index (same as above)
- Column (almost same as above, but different summary)
- Values
```
tm.assert_frame_equal(pd.DataFrame([[1, 2], [3, 4]]), pd.DataFrame([[1, 2, 3], [4, 5, 6]]))
# AssertionError: DataFrame are different
#
# DataFrame shape (number of columns) are different
# [left]: 2, Int64Index([0, 1], dtype='int64')
# [right]: 3, Int64Index([0, 1, 2], dtype='int64')
tm.assert_frame_equal(pd.DataFrame([[1, 2], [3, 4]]), pd.DataFrame([[1, 2], [3, 4], [5, 6]]))
# AssertionError: DataFrame are different
#
# DataFrame shape (number of rows) are different
# [left]: 2, Int64Index([0, 1], dtype='int64')
# [right]: 3, Int64Index([0, 1, 2], dtype='int64')
tm.assert_frame_equal(pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']), pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']))
# AssertionError: DataFrame.columns are different
#
# DataFrame.columns values are different (100.0 %)
# [left]: Index([u'A', u'B'], dtype='object')
# [right]: Index([u'a', u'b'], dtype='object')
tm.assert_frame_equal(pd.DataFrame([[1, 2], [3, 4]]), pd.DataFrame([[1, 2], [3, 5]]))
# AssertionError: DataFrame.iloc[1, :] are different
#
# DataFrame.iloc[1, :] values are different (50.0 %)
# [left]: [2, 4]
# [right]: [2, 5]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10507 | 2015-07-04T01:05:22Z | 2015-08-10T16:27:46Z | 2015-08-10T16:27:46Z | 2015-08-11T11:08:53Z |
CLN: remove na_fvalues from TextFileReader (read_csv et al) signature (GH10481) | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index dd972150de0fe..8a118b1379a29 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -355,7 +355,6 @@ def parser_f(filepath_or_buffer,
skipfooter=None,
skip_footer=0,
na_values=None,
- na_fvalues=None,
true_values=None,
false_values=None,
delimiter=None,
@@ -431,7 +430,6 @@ def parser_f(filepath_or_buffer,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
- na_fvalues=na_fvalues,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
| Closes #10481
| https://api.github.com/repos/pandas-dev/pandas/pulls/10502 | 2015-07-03T15:41:16Z | 2015-07-13T13:14:25Z | 2015-07-13T13:14:25Z | 2015-07-13T13:14:25Z |
TST: Fix test for datetime categorical | diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index a327233e09003..e2a447207db82 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3505,11 +3505,13 @@ def test_groupby_datetime_categorical(self):
desc_result = grouped.describe()
idx = cats.codes.argsort()
- ord_labels = np.asarray(cats).take(idx)
+ ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
- expected = ord_data.groupby(ord_labels, sort=False).describe()
+ expected = ord_data.groupby(ord_labels).describe()
expected.index.names = ['myfactor', None]
assert_frame_equal(desc_result, expected)
+ tm.assert_index_equal(desc_result.index, expected.index)
+ tm.assert_index_equal(desc_result.index.get_level_values(0), expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, name='myfactor', ordered=True)
| Related to #10465, but different part.
Current `test_groupby_datetime_categorical` in `test_groupby.py` is incorrect, the actual result returns `CategoricalIndex` as level 0, otherwise expected result uses `DatetimeIndex` as level 0. Changed to use the same dtype and added explicit comparison.
### Actual Result (current test case)
```
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = pd.Categorical.from_codes(codes, levels, name='myfactor', ordered=True)
data = pd.DataFrame(np.random.randn(100, 4))
grouped = data.groupby(cats)
desc_result = grouped.describe()
desc_result.index.get_level_values(0)
# CategoricalIndex([2014-01-01T09:00:00.000000000+0900,
# ...
# 2014-01-04T09:00:00.000000000+0900],
# categories=[2014-01-01 00:00:00, 2014-01-02 00:00:00, 2014-01-03 00:00:00, 2014-01-04 00:00:00],
# ordered=True, name=u'myfactor', dtype='category')
```
### Expected Result (current test case)
It must be `CategoricalIndex`.
```
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, sort=False).describe()
expected.index.names = ['myfactor', None]
expected.index.get_level_values(0)
# DatetimeIndex(['2014-01-01', '2014-01-01', '2014-01-01', '2014-01-01',
# ...
# '2014-01-04', '2014-01-04', '2014-01-04', '2014-01-04'],
# dtype='datetime64[ns]', name=u'myfactor', freq=None, tz=None)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10501 | 2015-07-03T15:15:40Z | 2015-07-06T12:54:05Z | 2015-07-06T12:54:05Z | 2015-07-06T12:59:19Z |
TST: DataFrame.quantile should have Float64Index | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8c9233c1d687b..8c4efcc35d67f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12161,8 +12161,8 @@ def test_quantile(self):
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
- 3: [3.5, 3.75]}, index=["0.5", "0.75"])
- assert_frame_equal(result, expected)
+ 3: [3.5, 3.75]}, index=[0.5, 0.75])
+ assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
| The result of `DataFrame.quantile` should have `Float64Index` as below, but current test case doesn't check it.
```
import pandas as pd
pd.DataFrame([1, 2, 3, 4]).quantile([0.5, 0.75]).index
# Float64Index([0.5, 0.75], dtype='float64')
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10500 | 2015-07-03T15:04:30Z | 2015-07-04T11:29:41Z | 2015-07-04T11:29:41Z | 2015-07-04T12:49:42Z |
Updated to_hdf doc string | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3bf998c1fa5a7..aba38ed4f63af 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -879,7 +879,7 @@ def to_hdf(self, path_or_buf, key, **kwargs):
Parameters
----------
- path_or_buf : the path (string) or buffer to put the store
+ path_or_buf : the path (string) or HDFStore object
key : string
indentifier for the group in the store
mode : optional, {'a', 'w', 'r', 'r+'}, default 'a'
| Updated the docstring to make clear that a HDFStore is required for a buffer, if a path is not directly passed. See issue #10491
| https://api.github.com/repos/pandas-dev/pandas/pulls/10499 | 2015-07-03T13:50:30Z | 2015-07-03T19:35:54Z | 2015-07-03T19:35:54Z | 2015-07-03T19:36:00Z |
Add test and fix for categorical series .shift #10495. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 09a39a6d9b2f5..bfc76b37510b9 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -132,3 +132,4 @@ Bug Fixes
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
+- Bug in ``Categorical`` ``Series.shift`` (:issue:`10495`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 42d7163e7f741..cac00890bb7d8 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -885,7 +885,13 @@ def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
- new_values, fill_value = com._maybe_upcast(self.values)
+ if isinstance(self.values, Categorical):
+ # hack toward fixing issue 10495
+ values = self.values._codes
+ else:
+ values = self.values
+ new_values, fill_value = com._maybe_upcast(values)
+
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
@@ -906,6 +912,13 @@ def shift(self, periods, axis=0):
if f_ordered:
new_values = new_values.T
+ if isinstance(self.values, Categorical):
+ # hack toward fixing issue 10495
+ new_values[np.isnan(new_values)] = -1
+ new_values = Categorical.from_codes(new_values,
+ categories=self.values.categories)
+
+
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 2c1a4fd43e57f..f5583b2cb587b 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1175,6 +1175,18 @@ def test_comparison_with_unknown_scalars(self):
self.assert_numpy_array_equal(cat == 4 , [False, False, False])
self.assert_numpy_array_equal(cat != 4 , [True, True, True])
+ def test_shift(self):
+ # GH10495
+ # Series.shift should not depend on the dtype being categorical or not
+ values = ['a', 'b', 'c']
+ shifts = [-1, 0, 1]
+ results = [['b', 'c', np.nan], ['a', 'b', 'c'], [np.nan, 'a', 'b']]
+
+ for shift, result in zip(shifts, results):
+ b = pd.Series(pd.Categorical(result, categories=values))
+ a = pd.Series(values, dtype='category').shift(shift)
+ self.assert_series_equal(a, b)
+
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
| #10495
Waiting for Travis and will review further.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10498 | 2015-07-03T13:42:13Z | 2015-07-06T12:45:10Z | null | 2015-07-06T12:45:10Z |
BUG: CategoricalBlock shift GH9416 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 08222ef06d21f..5dabe730c92b0 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -128,6 +128,7 @@ Bug Fixes
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
+- Bug in ``Series.shift`` and ``DataFrame.shift`` not supporting categorical data (:issue:`9416`)
- Bug in ``Series.map`` using categorical ``Series`` raises ``AttributeError`` (:issue:`10324`)
- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index edd4a532cf8f5..96d2d283d2a2d 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -820,6 +820,35 @@ def shape(self):
return tuple([len(self._codes)])
+ def shift(self, periods):
+ """
+ Shift Categorical by desired number of periods.
+
+ Parameters
+ ----------
+ periods : int
+ Number of periods to move, can be positive or negative
+
+ Returns
+ -------
+ shifted : Categorical
+ """
+ # since categoricals always have ndim == 1, an axis parameter
+ # doesnt make any sense here.
+ codes = self.codes
+ if codes.ndim > 1:
+ raise NotImplementedError("Categorical with ndim > 1.")
+ if np.prod(codes.shape) and (periods != 0):
+ codes = np.roll(codes, com._ensure_platform_int(periods), axis=0)
+ if periods > 0:
+ codes[:periods] = -1
+ else:
+ codes[periods:] = -1
+
+ return Categorical.from_codes(codes,
+ categories=self.categories,
+ ordered=self.ordered)
+
def __array__(self, dtype=None):
"""
The numpy array interface.
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 42d7163e7f741..0c18ff641c269 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1709,6 +1709,10 @@ def interpolate(self, method='pad', axis=0, inplace=False,
limit=limit),
placement=self.mgr_locs)
+ def shift(self, periods, axis=0):
+ return self.make_block_same_class(values=self.values.shift(periods),
+ placement=self.mgr_locs)
+
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 2c1a4fd43e57f..5f3ff794b4900 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1080,6 +1080,26 @@ def test_set_item_nan(self):
exp = np.array([0,1,3,2])
self.assert_numpy_array_equal(cat.codes, exp)
+ def test_shift(self):
+ # GH 9416
+ cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
+
+ # shift forward
+ sp1 = cat.shift(1)
+ xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
+ self.assert_categorical_equal(sp1, xp1)
+ self.assert_categorical_equal(cat[:-1], sp1[1:])
+
+ # shift back
+ sn2 = cat.shift(-2)
+ xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
+ categories=['a', 'b', 'c', 'd'])
+ self.assert_categorical_equal(sn2, xp2)
+ self.assert_categorical_equal(cat[2:], sn2[:-2])
+
+ # shift by zero
+ self.assert_categorical_equal(cat, cat.shift(0))
+
def test_nbytes(self):
cat = pd.Categorical([1,2,3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ae6102751fb41..8bdd493e3d841 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10360,6 +10360,15 @@ def test_shift_bool(self):
columns=['high', 'low'])
assert_frame_equal(rs, xp)
+ def test_shift_categorical(self):
+ # GH 9416
+ s1 = pd.Series(['a', 'b', 'c'], dtype='category')
+ s2 = pd.Series(['A', 'B', 'C'], dtype='category')
+ df = DataFrame({'one': s1, 'two': s2})
+ rs = df.shift(1)
+ xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
+ assert_frame_equal(rs, xp)
+
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 075362e006206..a6a05ef6f479c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -35,6 +35,7 @@
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
assert_frame_equal,
+ assert_index_equal,
ensure_clean)
import pandas.util.testing as tm
@@ -5260,6 +5261,25 @@ def test_shift_int(self):
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
+ def test_shift_categorical(self):
+ # GH 9416
+ s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
+
+ assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())
+
+ sp1 = s.shift(1)
+ assert_index_equal(s.index, sp1.index)
+ self.assertTrue(np.all(sp1.values.codes[:1] == -1))
+ self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))
+
+ sn2 = s.shift(-2)
+ assert_index_equal(s.index, sn2.index)
+ self.assertTrue(np.all(sn2.values.codes[-2:] == -1))
+ self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))
+
+ assert_index_equal(s.values.categories, sp1.values.categories)
+ assert_index_equal(s.values.categories, sn2.values.categories)
+
def test_truncate(self):
offset = datetools.bday
| Should resolve #9416.
CategoricalBlocks always seem to have ndim=1, even if multiple
categoricals are in a frame with the same categories. This simplifies
the axis shift logic somewhat.
Note that dataframe shift with axis=1 still doesn't work with multiple
categorical columns, since they are each a different block.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10497 | 2015-07-03T13:42:01Z | 2015-07-18T00:03:27Z | 2015-07-18T00:03:27Z | 2015-07-18T00:03:30Z |
ENH: GH10485 'Frequency' label for Series.plot | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 08222ef06d21f..05b69bae42c28 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -143,3 +143,5 @@ Bug Fixes
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
+
+- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 2fbb4dfc6fd91..d72bc420b2388 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1001,12 +1001,12 @@ def test_kde_missing_vals(self):
def test_hist_kwargs(self):
ax = self.ts.plot(kind='hist', bins=5)
self.assertEqual(len(ax.patches), 5)
- self._check_text_labels(ax.yaxis.get_label(), 'Degree')
+ self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot(kind='hist', orientation='horizontal')
- self._check_text_labels(ax.xaxis.get_label(), 'Degree')
+ self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot(kind='hist', align='left', stacked=True)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 3265889e4b268..07d7ced02e6ba 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2005,10 +2005,10 @@ def _make_plot(self):
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
- ax.set_xlabel('Degree')
+ ax.set_xlabel('Frequency')
else:
for ax in self.axes:
- ax.set_ylabel('Degree')
+ ax.set_ylabel('Frequency')
@property
def orientation(self):
| More informative label for histogram.
Conversation in #10485
| https://api.github.com/repos/pandas-dev/pandas/pulls/10493 | 2015-07-02T17:36:05Z | 2015-07-11T06:45:10Z | 2015-07-11T06:45:10Z | 2015-07-11T06:45:10Z |
TST/ERR: GH10369 read_msgpack checks argument type | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2f5388381a103..09a39a6d9b2f5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -131,3 +131,4 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
+- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 75ca44fd1ef3e..f5e000449f232 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -141,24 +141,28 @@ def read(fh):
try:
exists = os.path.exists(path_or_buf)
- except (TypeError,ValueError):
+ except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
- # treat as a string-like
- if not hasattr(path_or_buf, 'read'):
-
+ # treat as a binary-like
+ if isinstance(path_or_buf, compat.binary_type):
+ fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
- fh.close()
+ if fh is not None:
+ fh.close()
# a buffer like
- return read(path_or_buf)
+ if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
+ return read(path_or_buf)
+
+ raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 92e0d7ba1a338..9f1fd41e90413 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -93,6 +93,17 @@ def test_iterator_with_string_io(self):
for i, result in enumerate(read_msgpack(s,iterator=True)):
tm.assert_frame_equal(result,dfs[i])
+ def test_invalid_arg(self):
+ #GH10369
+ class A(object):
+ def __init__(self):
+ self.read = 0
+
+ tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
+ tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
+ tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
+
+
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
| To close #10369
| https://api.github.com/repos/pandas-dev/pandas/pulls/10490 | 2015-07-02T12:44:12Z | 2015-07-03T11:58:52Z | 2015-07-03T11:58:52Z | 2015-07-03T11:58:57Z |
DOC: Better explain the behaviour of na_values | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9852822c556dc..185deb4b9cae8 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -115,7 +115,7 @@ They can take a number of arguments:
as the index.
- ``names``: List of column names to use as column names. To replace header
existing in file, explicitly pass ``header=0``.
- - ``na_values``: optional list of strings to recognize as NaN (missing
+ - ``na_values``: optional string or list of strings to recognize as NaN (missing
values), either in addition to or in lieu of the default set.
- ``true_values``: list of strings to recognize as ``True``
- ``false_values``: list of strings to recognize as ``False``
@@ -723,7 +723,8 @@ NA Values
~~~~~~~~~
To control which values are parsed as missing values (which are signified by ``NaN``), specifiy a
-list of strings in ``na_values``. If you specify a number (a ``float``, like ``5.0`` or an ``integer`` like ``5``),
+string in ``na_values``. If you specify a list of strings, then all values in
+it are considered to be missing values. If you specify a number (a ``float``, like ``5.0`` or an ``integer`` like ``5``),
the corresponding equivalent values will also imply a missing value (in this case effectively
``[5.0,5]`` are recognized as ``NaN``.
| According to the suggestion of @jorisvandenbossche from https://github.com/pydata/pandas/pull/10479, changing some wording in the docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10488 | 2015-07-02T04:17:02Z | 2015-07-02T07:42:55Z | 2015-07-02T07:42:55Z | 2015-07-02T07:42:55Z |
Small style consistency fix | diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index f1fcc822adeaf..06ad8827a5642 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -294,7 +294,7 @@ def _parse_entry(field_value, field_type):
return field_value
-def read_gbq(query, project_id = None, index_col=None, col_order=None, reauth=False):
+def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False):
"""Load data from Google BigQuery.
THIS IS AN EXPERIMENTAL LIBRARY
| Removed unneeded spaces in kwargs related to read_gbq()
| https://api.github.com/repos/pandas-dev/pandas/pulls/10487 | 2015-07-02T03:25:40Z | 2015-07-02T07:11:14Z | 2015-07-02T07:11:13Z | 2015-07-02T07:11:17Z |
Series.from_csv not loading header names | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2f5388381a103..4e9858371cc86 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -131,3 +131,5 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
+
+- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d1ddd086bf8b7..7158303cd836d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2330,7 +2330,9 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.icol(0)
- result.index.name = result.name = None
+ if header is None:
+ result.index.name = result.name = None
+
return result
def to_csv(self, path, index=True, sep=",", na_rep='',
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index f3626488301b9..075362e006206 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4979,6 +4979,11 @@ def test_from_csv(self):
self.assertTrue(ts.name is None)
self.assertTrue(ts.index.name is None)
+ # GH10483
+ self.ts.to_csv(path, header=True)
+ ts_h = Series.from_csv(path, header=0)
+ self.assertTrue(ts_h.name == 'ts')
+
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
@@ -4987,6 +4992,10 @@ def test_from_csv(self):
self.assertTrue(series.name is None)
self.assertTrue(series.index.name is None)
+ self.series.to_csv(path, header=True)
+ series_h = Series.from_csv(path, header=0)
+ self.assertTrue(series_h.name == 'series')
+
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
| Series.from_csv at the moment does not load the series.name and series.index.name when the header keyword argument is stated.
This was introduced in 6078fba9410918baa486ca008cc9e3ba066c03ec. The issue was that `Series.from_csv` uses `DataFrame.from_csv`, which automatically indexes columns (in practice, the index column gets labelled 0 and the values column gets labelled 1). Converting this to a series will then cause the `series.name` to be 1, and the `series.index.name` to be 0. The fix was to explicitly set the names in Series.from_csv to `None`.
This caused the headers to be deleted even if they were provided by setting the header kwarg.
The fix is simple, to check if the headers are provided, and only setting the names to None if they are not.
I included some tests, please let me know if this is enough as I am new to open-source and pandas.
Thanks!
| https://api.github.com/repos/pandas-dev/pandas/pulls/10483 | 2015-07-01T19:47:21Z | 2015-07-07T15:52:31Z | null | 2015-07-07T15:52:31Z |
Fix docstring for na_values in parsers | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index d1e6e5677da0b..dd972150de0fe 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -86,7 +86,7 @@ class ParserWarning(Warning):
should explicitly pass header=None
prefix : string, default None
Prefix to add to column numbers when no header, e.g 'X' for X0, X1, ...
-na_values : list-like or dict, default None
+na_values : str, list-like or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values
true_values : list
| I discovered entirely by accident that the `na_values` in `pd.read_*` can be a string. According to the docstring, it can only be a sequence or a dict. This PR fixes that.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10479 | 2015-07-01T04:52:03Z | 2015-07-01T10:53:21Z | 2015-07-01T10:53:21Z | 2015-07-02T04:17:15Z |
API: add DatetimeBlockTZ #8260 | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 239f9aa19f769..dcea59545aae3 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -18,7 +18,7 @@
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
- "environment_type": "",
+ "environment_type": "conda",
// the base URL to show a commit for the project.
"show_commit_url": "https://github.com/pydata/pandas/commit/",
@@ -26,7 +26,7 @@
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
// "pythons": ["2.7", "3.4"],
- "pythons": ["2.7", "3.4"],
+ "pythons": ["2.7"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
@@ -41,7 +41,7 @@
"sqlalchemy": [],
"scipy": [],
"numexpr": [],
- "tables": [],
+ "pytables": [],
"openpyxl": [],
"xlrd": [],
"xlwt": []
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index 187101b1f392b..d22d01f261b27 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -203,34 +203,59 @@ def time_series_timestamp_compare(self):
class timestamp_ops_diff1(object):
goal_time = 0.2
+ N = 1000000
def setup(self):
- self.N = 1000000
- self.s = Series(date_range('20010101', periods=self.N, freq='s'))
+ self.s = self.create()
+
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='s'))
def time_timestamp_ops_diff1(self):
self.s.diff()
+class timestamp_tz_ops_diff1(timestamp_ops_diff1):
+ N = 10000
+
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='s', tz='US/Eastern'))
class timestamp_ops_diff2(object):
goal_time = 0.2
+ N = 1000000
def setup(self):
- self.N = 1000000
- self.s = Series(date_range('20010101', periods=self.N, freq='s'))
+ self.s = self.create()
+
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='s'))
def time_timestamp_ops_diff2(self):
(self.s - self.s.shift())
+class timestamp_tz_ops_diff2(timestamp_ops_diff2):
+ N = 10000
+
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='s', tz='US/Eastern'))
class timestamp_series_compare(object):
goal_time = 0.2
+ N = 1000000
def setup(self):
- self.N = 1000000
self.halfway = ((self.N // 2) - 1)
- self.s = Series(date_range('20010101', periods=self.N, freq='T'))
+ self.s = self.create()
self.ts = self.s[self.halfway]
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='T'))
+
def time_timestamp_series_compare(self):
- (self.ts >= self.s)
\ No newline at end of file
+ (self.ts >= self.s)
+
+class timestamp_tz_series_compare(timestamp_series_compare):
+ N = 10000
+
+ def create(self):
+ return Series(date_range('20010101', periods=self.N, freq='T', tz='US/Eastern'))
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 3ea90447dd44f..bc4b463e52302 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1590,9 +1590,10 @@ dtypes
------
The main types stored in pandas objects are ``float``, ``int``, ``bool``,
-``datetime64[ns]``, ``timedelta[ns]`` and ``object``. In addition these dtypes
-have item sizes, e.g. ``int64`` and ``int32``. A convenient :attr:`~DataFrame.dtypes``
-attribute for DataFrames returns a Series with the data type of each column.
+``datetime64[ns]`` and ``datetime64[ns, tz]`` (in >= 0.17.0), ``timedelta[ns]``, ``category`` (in >= 0.15.0), and ``object``. In addition these dtypes
+have item sizes, e.g. ``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>` for more detail on ``datetime64[ns, tz]`` dtypes.
+
+A convenient :attr:`~DataFrame.dtypes` attribute for DataFrames returns a Series with the data type of each column.
.. ipython:: python
@@ -1814,8 +1815,14 @@ dtypes:
df['tdeltas'] = df.dates.diff()
df['uint64'] = np.arange(3, 6).astype('u8')
df['other_dates'] = pd.date_range('20130101', periods=3).values
+ df['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern')
df
+And the dtypes
+
+.. ipython:: python
+
+ df.dtypes
:meth:`~DataFrame.select_dtypes` has two parameters ``include`` and ``exclude`` that allow you to
say "give me the columns WITH these dtypes" (``include``) and/or "give the
@@ -1868,7 +1875,7 @@ All numpy dtypes are subclasses of ``numpy.generic``:
.. note::
- Pandas also defines an additional ``category`` dtype, which is not integrated into the normal
+ Pandas also defines the types ``category``, and ``datetime64[ns, tz]``, which are not integrated into the normal
numpy hierarchy and wont show up with the above function.
.. note::
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9580f90c29dcd..23f8ad3686585 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -50,6 +50,7 @@ Highlights include:
- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
- The sorting API has been revamped to remove some long-time inconsistencies, see :ref:`here <whatsnew_0170.api_breaking.sorting>`
+- Support for a ``datetime64[ns]`` with timezones as a first-class dtype, see :ref:`here <whatsnew_0170.tz>`
- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 9795c082ddb98..dd13e8fabf0e9 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1745,3 +1745,30 @@ constructor as well as ``tz_localize``.
# tz_convert(None) is identical with tz_convert('UTC').tz_localize(None)
didx.tz_convert('UCT').tz_localize(None)
+
+.. _timeseries.timezone_series:
+
+TZ aware Dtypes
+~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.17.0
+
+``Series/DatetimeIndex`` with a timezone naive value are represented with a dtype of ``datetime64[ns]``.
+
+.. ipython:: python
+
+ dr = pd.date_range('20130101',periods=3)
+ dr
+ s = Series(dr)
+ s
+
+``Series/DatetimeIndex`` with a timezone aware value are represented with a dtype of ``datetime64[ns, tz]``.
+
+.. ipython:: python
+
+ dr = pd.date_range('20130101',periods=3,tz='US/Eastern')
+ dr
+ s = Series(dr)
+ s
+
+Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see the :ref:`docs <basics.dt_accessors>` as well.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e2c3bfd17485c..3eed237843a25 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -30,6 +30,7 @@ Highlights include:
- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
- The sorting API has been revamped to remove some long-time inconsistencies, see :ref:`here <whatsnew_0170.api_breaking.sorting>`
+- Support for a ``datetime64[ns]`` with timezones as a first-class dtype, see :ref:`here <whatsnew_0170.tz>`
- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats,
previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>`
- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even
@@ -417,6 +418,58 @@ To keep the previous behaviour, you can use ``errors='ignore'``:
Furthermore, ``pd.to_timedelta`` has gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword
has been deprecated in favor of ``errors='coerce'``.
+.. _whatsnew_0170.tz:
+
+Datetime with TZ
+~~~~~~~~~~~~~~~~
+
+We are adding an implementation that natively supports datetime with timezones. A ``Series`` or a ``DataFrame`` column previously
+*could* be assigned a datetime with timezones, and would work as an ``object`` dtype. This had performance issues with a large
+number rows. (:issue:`8260`, :issue:`10763`)
+
+The new implementation allows for having a single-timezone across all rows, and operating on it in a performant manner.
+
+.. ipython:: python
+
+ df = DataFrame({'A' : date_range('20130101',periods=3),
+ 'B' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'C' : date_range('20130101',periods=3,tz='CET')})
+ df
+ df.dtypes
+
+.. ipython:: python
+
+ df.B
+ df.B.dt.tz_localize(None)
+
+This uses a new-dtype representation as well, that is very similar in look-and-feel to its numpy cousin ``datetime64[ns]``
+
+.. ipython:: python
+
+ df['B'].dtype
+ type(df['B']).dtype
+
+.. note::
+
+ There is a slightly different string repr for the underlying ``DatetimeIndex`` as a result of the dtype changes, but
+ functionally these are the same.
+
+ .. code-block:: python
+
+ In [1]: pd.date_range('20130101',periods=3,tz='US/Eastern')
+ Out[1]: DatetimeIndex(['2013-01-01 00:00:00-05:00', '2013-01-02 00:00:00-05:00',
+ '2013-01-03 00:00:00-05:00'],
+ dtype='datetime64[ns]', freq='D', tz='US/Eastern')
+
+ In [2]: pd.date_range('20130101',periods=3,tz='US/Eastern').dtype
+ Out[2]: dtype('<M8[ns]')
+
+ .. ipython:: python
+
+ pd.date_range('20130101',periods=3,tz='US/Eastern')
+ pd.date_range('20130101',periods=3,tz='US/Eastern').dtype
+
+
.. _whatsnew_0170.api_breaking.convert_objects:
Changes to convert_objects
@@ -824,6 +877,9 @@ Bug Fixes
- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`)
- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`)
+- Bug in ``DatetimeIndex`` when localizing with ``NaT`` (:issue:`10477`)
+- Bug in ``Series.dt`` ops in preserving meta-data (:issue:`10477`)
+- Bug in preserving ``NaT`` when passed in an otherwise invalid ``to_datetime`` construction (:issue:`10477`)
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 8f1dab4f8b511..34bf173d63860 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -206,7 +206,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
"""
from pandas.core.series import Series
from pandas.tools.tile import cut
- from pandas.tseries.period import PeriodIndex
+ from pandas import Index, PeriodIndex, DatetimeIndex
name = getattr(values, 'name', None)
values = Series(values).values
@@ -225,11 +225,15 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
dtype = values.dtype
is_period = com.is_period_arraylike(values)
+ is_datetimetz = com.is_datetimetz(values)
- if com.is_datetime_or_timedelta_dtype(dtype) or is_period:
+ if com.is_datetime_or_timedelta_dtype(dtype) or is_period or is_datetimetz:
if is_period:
- values = PeriodIndex(values, name=name)
+ values = PeriodIndex(values)
+ elif is_datetimetz:
+ tz = getattr(values, 'tz', None)
+ values = DatetimeIndex(values).tz_localize(None)
values = values.view(np.int64)
keys, counts = htable.value_count_scalar64(values, dropna)
@@ -239,8 +243,14 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
+ # localize to the original tz if necessary
+ if is_datetimetz:
+ keys = DatetimeIndex(keys).tz_localize(tz)
+
# convert the keys back to the dtype we came in
- keys = keys.astype(dtype)
+ else:
+ keys = keys.astype(dtype)
+
elif com.is_integer_dtype(dtype):
values = com._ensure_int64(values)
@@ -257,7 +267,9 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
- result = Series(counts, index=com._values_from_object(keys), name=name)
+ if not isinstance(keys, Index):
+ keys = Index(keys)
+ result = Series(counts, index=keys, name=name)
if bins is not None:
# TODO: This next line should be more efficient
diff --git a/pandas/core/base.py b/pandas/core/base.py
index fe9bac7f4c68e..d3850be13b6f0 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -364,6 +364,11 @@ def base(self):
""" return the base object if the memory of the underlying data is shared """
return self.values.base
+ @property
+ def _values(self):
+ """ the internal implementation """
+ return self.values
+
def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
@@ -397,6 +402,14 @@ def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return com.isnull(self).any()
+ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
+ filter_type=None, **kwds):
+ """ perform the reduction type operation if we can """
+ func = getattr(self,name,None)
+ if func is None:
+ raise TypeError("{klass} cannot perform the operation {op}".format(klass=self.__class__.__name__,op=name))
+ return func(**kwds)
+
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
@@ -586,7 +599,7 @@ def drop_duplicates(self, keep='first', inplace=False):
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
- keys = com._ensure_object(self.values)
+ keys = com._values_from_object(com._ensure_object(self.values))
duplicated = lib.duplicated(keys, keep=keep)
try:
return self._constructor(duplicated,
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 2f465ded12bd6..9decd5e212cbf 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -12,13 +12,14 @@
import pandas.core.common as com
from pandas.util.decorators import cache_readonly, deprecate_kwarg
-from pandas.core.common import (CategoricalDtype, ABCSeries, ABCIndexClass, ABCCategoricalIndex,
+from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
isnull, notnull, is_dtype_equal,
is_categorical_dtype, is_integer_dtype, is_object_dtype,
_possibly_infer_to_datetimelike, get_dtype_kinds,
is_list_like, is_sequence, is_null_slice, is_bool,
_ensure_platform_int, _ensure_object, _ensure_int64,
_coerce_indexer_dtype, take_1d)
+from pandas.core.dtypes import CategoricalDtype
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
@@ -85,7 +86,7 @@ def f(self, other):
def maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
- return array.values
+ return array._values
return array
_codes_doc = """The category codes of this categorical.
@@ -231,7 +232,7 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
- values = values.values
+ values = values._values
if ordered is None:
ordered = values.ordered
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 77536fb391f93..8ffffae6bd160 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -20,7 +20,7 @@
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import StringIO, BytesIO, range, long, u, zip, map, string_types, iteritems
-
+from pandas.core.dtypes import CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType
from pandas.core.config import get_option
class PandasError(Exception):
@@ -114,77 +114,29 @@ def __instancecheck__(cls, inst):
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
-class CategoricalDtypeType(type):
- """
- the type of CategoricalDtype, this metaclass determines subclass ability
- """
- def __init__(cls, name, bases, attrs):
- pass
-
-class CategoricalDtype(object):
- __meta__ = CategoricalDtypeType
- """
- A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
-
- THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
- """
- name = 'category'
- names = None
- type = CategoricalDtypeType
- subdtype = None
- kind = 'O'
- str = '|O08'
- num = 100
- shape = tuple()
- itemsize = 8
- base = np.dtype('O')
- isbuiltin = 0
- isnative = 0
-
- def __unicode__(self):
- return self.name
-
- def __str__(self):
- """
- Return a string representation for a particular Object
-
- Invoked by str(df) in both py2/py3.
- Yields Bytestring in Py2, Unicode String in py3.
- """
-
- if compat.PY3:
- return self.__unicode__()
- return self.__bytes__()
-
- def __bytes__(self):
- """
- Return a string representation for a particular object.
-
- Invoked by bytes(obj) in py3 only.
- Yields a bytestring in both py2/py3.
- """
- from pandas.core.config import get_option
-
- encoding = get_option("display.encoding")
- return self.__unicode__().encode(encoding, 'replace')
-
- def __repr__(self):
- """
- Return a string representation for a particular object.
+def bind_method(cls, name, func):
+ """Bind a method to class, python 2 and python 3 compatible.
- Yields Bytestring in Py2, Unicode String in py3.
- """
- return str(self)
+ Parameters
+ ----------
- def __hash__(self):
- # make myself hashable
- return hash(str(self))
+ cls : type
+ class to receive bound method
+ name : basestring
+ name of method on class instance
+ func : function
+ function to be bound as method
- def __eq__(self, other):
- if isinstance(other, compat.string_types):
- return other == self.name
- return isinstance(other, CategoricalDtype)
+ Returns
+ -------
+ None
+ """
+ # only python 2 has bound/unbound method issue
+ if not compat.PY3:
+ setattr(cls, name, types.MethodType(func, None, cls))
+ else:
+ setattr(cls, name, func)
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
@@ -764,9 +716,12 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
undefined if allow_fill == False and -1 is present in indexer.
"""
+ # dispatch to internal type takes
if is_categorical(arr):
return arr.take_nd(indexer, fill_value=fill_value,
allow_fill=allow_fill)
+ elif is_datetimetz(arr):
+ return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
@@ -1247,13 +1202,18 @@ def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
copy : if True always make a copy even if no upcast is required
"""
- if dtype is None:
- dtype = values.dtype
- new_dtype, fill_value = _maybe_promote(dtype, fill_value)
- if new_dtype != values.dtype:
- values = values.astype(new_dtype)
- elif copy:
- values = values.copy()
+ if is_internal_type(values):
+ if copy:
+ values = values.copy()
+ else:
+ if dtype is None:
+ dtype = values.dtype
+ new_dtype, fill_value = _maybe_promote(dtype, fill_value)
+ if new_dtype != values.dtype:
+ values = values.astype(new_dtype)
+ elif copy:
+ values = values.copy()
+
return values, fill_value
@@ -1724,7 +1684,7 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
- x, new_x = x.values.astype('i8'), new_x.astype('i8')
+ x, new_x = x._values.astype('i8'), new_x.astype('i8')
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
@@ -1831,7 +1791,8 @@ def _invalidate_string_dtypes(dtype_set):
def _get_dtype_from_object(dtype):
- """Get a numpy dtype.type-style object.
+ """Get a numpy dtype.type-style object. This handles the
+ datetime64[ns] and datetime64[ns, TZ] compat
Notes
-----
@@ -1840,6 +1801,10 @@ def _get_dtype_from_object(dtype):
# type object from a dtype
if isinstance(dtype, type) and issubclass(dtype, np.generic):
return dtype
+ elif is_categorical(dtype):
+ return CategoricalDtype().type
+ elif is_datetimetz(dtype):
+ return DatetimeTZDtype(dtype).type
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
@@ -1850,15 +1815,16 @@ def _get_dtype_from_object(dtype):
elif isinstance(dtype, compat.string_types):
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
- elif dtype == 'category':
- return CategoricalDtypeType
+
try:
- return _get_dtype_from_object(getattr(np, dtype))
+ return _get_dtype_from_object(getattr(np,dtype))
except AttributeError:
# handles cases like _get_dtype(int)
# i.e., python objects that are valid dtypes (unlike user-defined
# types, in general)
+ # further handle internal types
pass
+
return _get_dtype_from_object(np.dtype(dtype))
@@ -1978,8 +1944,8 @@ def _possibly_convert_platform(values):
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(values)
if getattr(values, 'dtype', None) == np.object_:
- if hasattr(values, 'values'):
- values = values.values
+ if hasattr(values, '_values'):
+ values = values._values
values = lib.maybe_convert_objects(values)
return values
@@ -1997,18 +1963,21 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
+ is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
- if is_datetime64 or is_timedelta64:
+ if is_datetime64 or is_datetime64tz or is_timedelta64:
# force the dtype if needed
- if is_datetime64 and dtype != _NS_DTYPE:
+ if is_datetime64 and not is_dtype_equal(dtype,_NS_DTYPE):
if dtype.name == 'datetime64[ns]':
dtype = _NS_DTYPE
else:
raise TypeError(
"cannot convert datetimelike to dtype [%s]" % dtype)
- elif is_timedelta64 and dtype != _TD_DTYPE:
+ elif is_datetime64tz:
+ pass
+ elif is_timedelta64 and not is_dtype_equal(dtype,_TD_DTYPE):
if dtype.name == 'timedelta64[ns]':
dtype = _TD_DTYPE
else:
@@ -2026,15 +1995,28 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
- elif np.prod(value.shape) and value.dtype != dtype:
+ elif np.prod(value.shape) and not is_dtype_equal(value.dtype, dtype):
try:
if is_datetime64:
- value = to_datetime(value, errors=errors).values
+ value = to_datetime(value, errors=errors)._values
+ elif is_datetime64tz:
+
+ # input has to be UTC at this point, so just localize
+ value = to_datetime(value, errors=errors).tz_localize(dtype.tz)
elif is_timedelta64:
- value = to_timedelta(value, errors=errors).values
+ value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError):
pass
+ # coerce datetimelike to object
+ elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
+ if is_object_dtype(dtype):
+ ints = np.asarray(value).view('i8')
+ return tslib.ints_to_pydatetime(ints)
+
+ # we have a non-castable dtype that was passed
+ raise TypeError('Cannot cast datetime64 to %s' % dtype)
+
else:
is_array = isinstance(value, np.ndarray)
@@ -2073,13 +2055,19 @@ def _possibly_infer_to_datetimelike(value, convert_dates=False):
Parameters
----------
- value : np.array
+ value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
+ if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)):
+ return value
+ elif isinstance(value, ABCSeries):
+ if isinstance(value._values, ABCDatetimeIndex):
+ return value._values
+
v = value
if not is_list_like(v):
v = [v]
@@ -2093,9 +2081,22 @@ def _possibly_infer_to_datetimelike(value, convert_dates=False):
def _try_datetime(v):
# safe coerce to datetime64
try:
- return tslib.array_to_datetime(v, errors='raise').reshape(shape)
+ v = tslib.array_to_datetime(v, errors='raise')
+ except ValueError:
+
+ # we might have a sequence of the same-datetimes with tz's
+ # if so coerce to a DatetimeIndex; if they are not the same, then
+ # these stay as object dtype
+ try:
+ from pandas import to_datetime
+ return to_datetime(v)
+ except:
+ pass
+
except:
- return v
+ pass
+
+ return v.reshape(shape)
def _try_timedelta(v):
# safe coerce to timedelta64
@@ -2103,7 +2104,7 @@ def _try_timedelta(v):
# will try first with a string & object conversion
from pandas.tseries.timedeltas import to_timedelta
try:
- return to_timedelta(v).values.reshape(shape)
+ return to_timedelta(v)._values.reshape(shape)
except:
return v
@@ -2112,9 +2113,9 @@ def _try_timedelta(v):
inferred_type = lib.infer_dtype(sample)
if inferred_type in ['datetime', 'datetime64'] or (convert_dates and inferred_type in ['date']):
- value = _try_datetime(v).reshape(shape)
+ value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
- value = _try_timedelta(v).reshape(shape)
+ value = _try_timedelta(v)
# its possible to have nulls intermixed within the datetime or timedelta
# these will in general have an inferred_type of 'mixed', so have to try
@@ -2125,9 +2126,9 @@ def _try_timedelta(v):
elif inferred_type in ['mixed']:
if lib.is_possible_datetimelike_array(_ensure_object(v)):
- value = _try_timedelta(v).reshape(shape)
+ value = _try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
- value = _try_datetime(v).reshape(shape)
+ value = _try_datetime(v)
return value
@@ -2448,19 +2449,21 @@ def is_period_arraylike(arr):
def is_datetime_arraylike(arr):
""" return if we are datetime arraylike / DatetimeIndex """
- if isinstance(arr, pd.DatetimeIndex):
+ if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'
return getattr(arr, 'inferred_type', None) == 'datetime'
def is_datetimelike(arr):
- return arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex)
+ return arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex) or is_datetimetz(arr)
def _coerce_to_dtype(dtype):
""" coerce a string / np.dtype to a dtype """
if is_categorical_dtype(dtype):
dtype = CategoricalDtype()
+ elif is_datetime64tz_dtype(dtype):
+ dtype = DatetimeTZDtype(dtype)
else:
dtype = np.dtype(dtype)
return dtype
@@ -2471,9 +2474,18 @@ def _get_dtype(arr_or_dtype):
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
elif isinstance(arr_or_dtype, CategoricalDtype):
- return CategoricalDtype()
- return arr_or_dtype.dtype
+ return arr_or_dtype
+ elif isinstance(arr_or_dtype, DatetimeTZDtype):
+ return arr_or_dtype
+ elif isinstance(arr_or_dtype, compat.string_types):
+ if is_categorical_dtype(arr_or_dtype):
+ return CategoricalDtype.construct_from_string(arr_or_dtype)
+ elif is_datetime64tz_dtype(arr_or_dtype):
+ return DatetimeTZDtype.construct_from_string(arr_or_dtype)
+ if hasattr(arr_or_dtype, 'dtype'):
+ arr_or_dtype = arr_or_dtype.dtype
+ return np.dtype(arr_or_dtype)
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
@@ -2482,23 +2494,26 @@ def _get_dtype_type(arr_or_dtype):
return np.dtype(arr_or_dtype).type
elif isinstance(arr_or_dtype, CategoricalDtype):
return CategoricalDtypeType
+ elif isinstance(arr_or_dtype, DatetimeTZDtype):
+ return DatetimeTZDtypeType
elif isinstance(arr_or_dtype, compat.string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtypeType
+ elif is_datetime64tz_dtype(arr_or_dtype):
+ return DatetimeTZDtypeType
return _get_dtype_type(np.dtype(arr_or_dtype))
try:
return arr_or_dtype.dtype.type
except AttributeError:
- raise ValueError('%r is not a dtype' % arr_or_dtype)
+ return type(None)
def is_dtype_equal(source, target):
""" return a boolean if the dtypes are equal """
- source = _get_dtype_type(source)
- target = _get_dtype_type(target)
-
try:
+ source = _get_dtype(source)
+ target = _get_dtype(target)
return source == target
- except TypeError:
+ except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
@@ -2524,14 +2539,24 @@ def is_int_or_datetime_dtype(arr_or_dtype):
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
-
def is_datetime64_dtype(arr_or_dtype):
- tipo = _get_dtype_type(arr_or_dtype)
+ try:
+ tipo = _get_dtype_type(arr_or_dtype)
+ except TypeError:
+ return False
return issubclass(tipo, np.datetime64)
+def is_datetime64tz_dtype(arr_or_dtype):
+ return DatetimeTZDtype.is_dtype(arr_or_dtype)
+
+def is_datetime64_any_dtype(arr_or_dtype):
+ return is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype)
def is_datetime64_ns_dtype(arr_or_dtype):
- tipo = _get_dtype(arr_or_dtype)
+ try:
+ tipo = _get_dtype(arr_or_dtype)
+ except TypeError:
+ return False
return tipo == _NS_DTYPE
def is_timedelta64_dtype(arr_or_dtype):
@@ -2555,9 +2580,10 @@ def is_datetimelike_v_numeric(a, b):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
- f = lambda x: is_integer_dtype(x) or is_float_dtype(x)
- return (needs_i8_conversion(a) and f(b)) or (
- needs_i8_conversion(b) and f(a))
+ is_numeric = lambda x: is_integer_dtype(x) or is_float_dtype(x)
+ is_datetimelike = needs_i8_conversion
+ return (is_datetimelike(a) and is_numeric(b)) or (
+ is_datetimelike(b) and is_numeric(a))
def is_datetimelike_v_object(a, b):
# return if we have an i8 convertible and object comparision
@@ -2566,14 +2592,17 @@ def is_datetimelike_v_object(a, b):
if not hasattr(b, 'dtype'):
b = np.asarray(b)
f = lambda x: is_object_dtype(x)
- return (needs_i8_conversion(a) and f(b)) or (
- needs_i8_conversion(b) and f(a))
+ is_object = lambda x: is_integer_dtype(x) or is_float_dtype(x)
+ is_datetimelike = needs_i8_conversion
+ return (is_datetimelike(a) and is_object(b)) or (
+ is_datetimelike(b) and is_object(a))
-needs_i8_conversion = is_datetime_or_timedelta_dtype
+needs_i8_conversion = lambda arr_or_dtype: is_datetime_or_timedelta_dtype(arr_or_dtype) or \
+ is_datetime64tz_dtype(arr_or_dtype)
def i8_boxer(arr_or_dtype):
""" return the scalar boxer for the dtype """
- if is_datetime64_dtype(arr_or_dtype):
+ if is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype):
return lib.Timestamp
elif is_timedelta64_dtype(arr_or_dtype):
return lambda x: lib.Timedelta(x,unit='ns')
@@ -2603,20 +2632,33 @@ def is_bool_dtype(arr_or_dtype):
return False
return issubclass(tipo, np.bool_)
+def is_sparse(array):
+ """ return if we are a sparse array """
+ return isinstance(array, (ABCSparseArray, ABCSparseSeries))
+
+def is_datetimetz(array):
+ """ return if we are a datetime with tz array """
+ return (isinstance(array, ABCDatetimeIndex) and getattr(array,'tz',None) is not None) or is_datetime64tz_dtype(array)
+
+def is_internal_type(value):
+ """
+ if we are a klass that is preserved by the internals
+ these are internal klasses that we represent (and don't use a np.array)
+ """
+ if is_categorical(value):
+ return True
+ elif is_sparse(value):
+ return True
+ elif is_datetimetz(value):
+ return True
+ return False
+
def is_categorical(array):
""" return if we are a categorical possibility """
- return isinstance(array, ABCCategorical) or isinstance(array.dtype, CategoricalDtype)
+ return isinstance(array, ABCCategorical) or is_categorical_dtype(array)
def is_categorical_dtype(arr_or_dtype):
- if hasattr(arr_or_dtype,'dtype'):
- arr_or_dtype = arr_or_dtype.dtype
-
- if isinstance(arr_or_dtype, CategoricalDtype):
- return True
- try:
- return arr_or_dtype == 'category'
- except:
- return False
+ return CategoricalDtype.is_dtype(arr_or_dtype)
def is_complex_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
@@ -2979,8 +3021,10 @@ def get_dtype_kinds(l):
dtype = arr.dtype
if is_categorical_dtype(dtype):
typ = 'category'
- elif isinstance(arr, ABCSparseArray):
+ elif is_sparse(arr):
typ = 'sparse'
+ elif is_datetimetz(arr):
+ typ = 'datetimetz'
elif is_datetime64_dtype(dtype):
typ = 'datetime'
elif is_timedelta64_dtype(dtype):
@@ -3029,7 +3073,7 @@ def is_nonempty(x):
typs = get_dtype_kinds(to_concat)
# these are mandated to handle empties as well
- if 'datetime' in typs or 'timedelta' in typs:
+ if 'datetime' in typs or 'datetimetz' in typs or 'timedelta' in typs:
from pandas.tseries.common import _concat_compat
return _concat_compat(to_concat, axis=axis)
diff --git a/pandas/core/dtypes.py b/pandas/core/dtypes.py
new file mode 100644
index 0000000000000..b260c2b58fce6
--- /dev/null
+++ b/pandas/core/dtypes.py
@@ -0,0 +1,196 @@
+""" define extension dtypes """
+
+import re
+import numpy as np
+from pandas import compat
+
+class ExtensionDtype(object):
+ """
+ A np.dtype duck-typed class, suitable for holding a custom dtype.
+
+ THIS IS NOT A REAL NUMPY DTYPE
+ """
+ name = None
+ names = None
+ type = None
+ subdtype = None
+ kind = None
+ str = None
+ num = 100
+ shape = tuple()
+ itemsize = 8
+ base = None
+ isbuiltin = 0
+ isnative = 0
+ _metadata = []
+
+ def __unicode__(self):
+ return self.name
+
+ def __str__(self):
+ """
+ Return a string representation for a particular Object
+
+ Invoked by str(df) in both py2/py3.
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+
+ if compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
+ def __bytes__(self):
+ """
+ Return a string representation for a particular object.
+
+ Invoked by bytes(obj) in py3 only.
+ Yields a bytestring in both py2/py3.
+ """
+ from pandas.core.config import get_option
+
+ encoding = get_option("display.encoding")
+ return self.__unicode__().encode(encoding, 'replace')
+
+ def __repr__(self):
+ """
+ Return a string representation for a particular object.
+
+ Yields Bytestring in Py2, Unicode String in py3.
+ """
+ return str(self)
+
+ def __hash__(self):
+ raise NotImplementedError("sub-classes should implement an __hash__ method")
+
+ def __eq__(self, other):
+ raise NotImplementedError("sub-classes should implement an __eq__ method")
+
+ @classmethod
+ def is_dtype(cls, dtype):
+ """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
+ if hasattr(dtype, 'dtype'):
+ dtype = dtype.dtype
+ if isinstance(dtype, cls):
+ return True
+ try:
+ return cls.construct_from_string(dtype) is not None
+ except:
+ return False
+
+class CategoricalDtypeType(type):
+ """
+ the type of CategoricalDtype, this metaclass determines subclass ability
+ """
+ pass
+
+class CategoricalDtype(ExtensionDtype):
+ __metaclass__ = CategoricalDtypeType
+ """
+ A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
+
+ THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
+ """
+ name = 'category'
+ type = CategoricalDtypeType
+ kind = 'O'
+ str = '|O08'
+ base = np.dtype('O')
+
+ def __hash__(self):
+ # make myself hashable
+ return hash(str(self))
+
+ def __eq__(self, other):
+ if isinstance(other, compat.string_types):
+ return other == self.name
+
+ return isinstance(other, CategoricalDtype)
+
+ @classmethod
+ def construct_from_string(cls, string):
+ """ attempt to construct this type from a string, raise a TypeError if its not possible """
+ try:
+ if string == 'category':
+ return cls()
+ except:
+ pass
+
+ raise TypeError("cannot construct a CategoricalDtype")
+
+class DatetimeTZDtypeType(type):
+ """
+ the type of DatetimeTZDtype, this metaclass determines subclass ability
+ """
+ pass
+
+class DatetimeTZDtype(ExtensionDtype):
+ __metaclass__ = DatetimeTZDtypeType
+ """
+ A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype.
+
+ THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns]
+ """
+ type = DatetimeTZDtypeType
+ kind = 'M'
+ str = '|M8[ns]'
+ num = 101
+ base = np.dtype('M8[ns]')
+ _metadata = ['unit','tz']
+ _match = re.compile("datetime64\[(?P<unit>.+), (?P<tz>.+)\]")
+
+ def __init__(self, unit, tz=None):
+ """
+ Parameters
+ ----------
+ unit : string unit that this represents, currently must be 'ns'
+ tz : string tz that this represents
+ """
+
+ if isinstance(unit, DatetimeTZDtype):
+ self.unit, self.tz = unit.unit, unit.tz
+ return
+
+ if tz is None:
+
+ # we were passed a string that we can construct
+ try:
+ m = self._match.search(unit)
+ if m is not None:
+ self.unit = m.groupdict()['unit']
+ self.tz = m.groupdict()['tz']
+ return
+ except:
+ raise ValueError("could not construct DatetimeTZDtype")
+
+ raise ValueError("DatetimeTZDtype constructor must have a tz supplied")
+
+ if unit != 'ns':
+ raise ValueError("DatetimeTZDtype only supports ns units")
+ self.unit = unit
+ self.tz = tz
+
+ @classmethod
+ def construct_from_string(cls, string):
+ """ attempt to construct this type from a string, raise a TypeError if its not possible """
+ try:
+ return cls(unit=string)
+ except ValueError:
+ raise TypeError("could not construct DatetimeTZDtype")
+
+ def __unicode__(self):
+ # format the tz
+ return "datetime64[{unit}, {tz}]".format(unit=self.unit,tz=self.tz)
+
+ @property
+ def name(self):
+ return str(self)
+
+ def __hash__(self):
+ # make myself hashable
+ return hash(str(self))
+
+ def __eq__(self, other):
+ if isinstance(other, compat.string_types):
+ return other == self.name
+
+ return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and self.tz == other.tz
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 818391d6eec23..29f1e1efe9f5d 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -19,6 +19,7 @@
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
+import pandas as pd
import numpy as np
import itertools
@@ -188,7 +189,7 @@ def _get_footer(self):
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if name and com.is_categorical_dtype(self.tr_series.dtype):
- level_info = self.tr_series.values._repr_categories_info()
+ level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
@@ -208,7 +209,7 @@ def _get_formatted_index(self):
return fmt_index, have_header
def _get_formatted_values(self):
- return format_array(self.tr_series.values, None,
+ return format_array(self.tr_series._values, None,
float_format=self.float_format,
na_rep=self.na_rep)
@@ -615,7 +616,7 @@ def get_col_type(dtype):
strcols.insert(i, lev3)
if column_format is None:
- dtypes = self.frame.dtypes.values
+ dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
@@ -681,7 +682,7 @@ def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
- frame.iloc[:, i].values,
+ frame.iloc[:, i]._values,
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
@@ -720,7 +721,7 @@ def is_numeric_dtype(dtype):
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
- dtypes = self.frame.dtypes.values
+ dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
@@ -1401,7 +1402,7 @@ def strftime_with_nulls(x):
series = {}
for k, v in compat.iteritems(values._series):
- series[k] = v.values
+ series[k] = v._values
nlevels = getattr(data_index, 'nlevels', 1)
for j, idx in enumerate(data_index):
@@ -1919,6 +1920,8 @@ def format_array(values, formatter, float_format=None, na_rep='NaN',
fmt_klass = PeriodArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
+ elif com.is_datetimetz(values):
+ fmt_klass = Datetime64TZFormatter
elif com.is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif com.is_timedelta64_dtype(values.dtype):
@@ -1975,6 +1978,8 @@ def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
+ elif x is pd.NaT:
+ return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
@@ -1984,7 +1989,7 @@ def _format(x):
vals = self.values
if isinstance(vals, Index):
- vals = vals.values
+ vals = vals._values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
@@ -2067,9 +2072,7 @@ class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
-
fmt_values = [formatter(x) for x in self.values]
-
return fmt_values
@@ -2080,27 +2083,16 @@ def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
self.date_format = date_format
def _format_strings(self):
+ """ we by definition have DO NOT have a TZ """
- # we may have a tz, if so, then need to process element-by-element
- # when DatetimeBlockWithTimezones is a reality this could be fixed
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
- if values.tz is None:
- fmt_values = format_array_from_datetime(values.asi8.ravel(),
- format=_get_format_datetime64_from_values(values, self.date_format),
- na_rep=self.nat_rep).reshape(values.shape)
- fmt_values = fmt_values.tolist()
-
- else:
-
- values = values.asobject
- is_dates_only = _is_dates_only(values)
- formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format))
- fmt_values = [ formatter(x) for x in values ]
-
- return fmt_values
+ fmt_values = format_array_from_datetime(values.asi8.ravel(),
+ format=_get_format_datetime64_from_values(values, self.date_format),
+ na_rep=self.nat_rep).reshape(values.shape)
+ return fmt_values.tolist()
class PeriodArrayFormatter(IntArrayFormatter):
@@ -2179,6 +2171,18 @@ def _get_format_datetime64_from_values(values, date_format):
return date_format
+class Datetime64TZFormatter(Datetime64Formatter):
+
+ def _format_strings(self):
+ """ we by definition have a TZ """
+
+ values = self.values.asobject
+ is_dates_only = _is_dates_only(values)
+ formatter = (self.formatter or _get_format_datetime64(is_dates_only, date_format=self.date_format))
+ fmt_values = [ formatter(x) for x in values ]
+
+ return fmt_values
+
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c5c0f9e82fa94..cb237b93c70ba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -28,6 +28,7 @@
_infer_dtype_from_scalar, _values_from_object,
is_list_like, _maybe_box_datetimelike,
is_categorical_dtype, is_object_dtype,
+ is_internal_type, is_datetimetz,
_possibly_infer_to_datetimelike, _dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
@@ -116,14 +117,14 @@
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
- If True, adds a column to output DataFrame called "_merge" with
- information on the source of each row.
- If string, column with information on source of each row will be added to
- output DataFrame, and column will be named value of string.
- Information column is Categorical-type and takes on a value of "left_only"
- for observations whose merge key only appears in 'left' DataFrame,
- "right_only" for observations whose merge key only appears in 'right'
- DataFrame, and "both" if the observation's merge key is found in both.
+ If True, adds a column to output DataFrame called "_merge" with
+ information on the source of each row.
+ If string, column with information on source of each row will be added to
+ output DataFrame, and column will be named value of string.
+ Information column is Categorical-type and takes on a value of "left_only"
+ for observations whose merge key only appears in 'left' DataFrame,
+ "right_only" for observations whose merge key only appears in 'right'
+ DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
@@ -402,6 +403,9 @@ def _get_axes(N, K, index=index, columns=columns):
index, columns = _get_axes(len(values),1)
return _arrays_to_mgr([ values ], columns, index, columns,
dtype=dtype)
+ elif is_datetimetz(values):
+ return self._init_dict({ 0 : values }, index, columns,
+ dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
@@ -871,6 +875,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None,
-------
df : DataFrame
"""
+
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
@@ -1749,7 +1754,7 @@ def get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
- return _maybe_box_datetimelike(series.values[index])
+ return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
@@ -1779,7 +1784,7 @@ def set_value(self, index, col, value, takeable=False):
series = self._get_item_cache(col)
engine = self.index._engine
- engine.set_value(series.values, index, value)
+ engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
@@ -1831,6 +1836,8 @@ def _ixs(self, i, axis=0):
copy=True
else:
new_values = self._data.fast_xs(i)
+ if lib.isscalar(new_values):
+ return new_values
# if we are a copy, mark as such
copy = isinstance(new_values,np.ndarray) and new_values.base is None
@@ -2423,7 +2430,7 @@ def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
- value = value.values.copy()
+ value = value._values.copy()
else:
# GH 4107
@@ -2475,7 +2482,7 @@ def reindexer(value):
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
- value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape)
+ value = _possibly_infer_to_datetimelike(value)
else:
# upcast the scalar
@@ -2483,8 +2490,8 @@ def reindexer(value):
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
- # return unconsolidatables directly
- if isinstance(value, (Categorical, SparseArray)):
+ # return internal types directly
+ if is_internal_type(value):
return value
# broadcast across multiple columns if necessary
@@ -2718,7 +2725,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
level = col
names.append(None)
else:
- level = frame[col].values
+ level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
@@ -2782,7 +2789,7 @@ def _maybe_casted_values(index, labels=None):
values = index.asobject.values
elif (isinstance(index, DatetimeIndex) and
index.tz is not None):
- values = index.asobject
+ values = index
else:
values = index.values
if values.dtype == np.object_:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d3a63f9f5d851..1586bb5df5893 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2417,6 +2417,11 @@ def values(self):
"""
return self.as_matrix()
+ @property
+ def _values(self):
+ """ internal implementation """
+ return self.values
+
@property
def _get_values(self):
# compat
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7a5770d3968ec..ce7aaec2644cc 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2193,9 +2193,9 @@ def _convert_grouper(axis, grouper):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
- return grouper.values
+ return grouper._values
else:
- return grouper.reindex(axis).values
+ return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 14ba2dea0b76c..ef167489435b3 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -19,6 +19,7 @@
deprecate, deprecate_kwarg)
import pandas.core.common as com
from pandas.core.common import (isnull, array_equivalent, is_dtype_equal, is_object_dtype,
+ is_datetimetz,
_values_from_object, is_float, is_integer, is_iterator, is_categorical_dtype,
ABCSeries, ABCCategorical, _ensure_object, _ensure_int64, is_bool_indexer,
is_list_like, is_bool_dtype, is_null_slice, is_integer_dtype)
@@ -115,7 +116,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
from pandas.tseries.period import PeriodIndex
if isinstance(data, (np.ndarray, Index, ABCSeries)):
- if issubclass(data.dtype.type, np.datetime64):
+ if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
@@ -207,7 +208,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
return cls._simple_new(subarr, name)
@classmethod
- def _simple_new(cls, values, name=None, **kwargs):
+ def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
@@ -215,9 +216,12 @@ def _simple_new(cls, values, name=None, **kwargs):
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
- values = np.array(values,copy=False)
- if is_object_dtype(values):
- values = cls(values, name=name, **kwargs).values
+ if values is None and dtype is not None:
+ values = np.empty(0, dtype=dtype)
+ else:
+ values = np.array(values,copy=False)
+ if is_object_dtype(values):
+ values = cls(values, name=name, dtype=dtype, **kwargs)._values
result = object.__new__(cls)
result._data = values
@@ -305,7 +309,7 @@ def repeat(self, n):
--------
numpy.ndarray.repeat
"""
- return self._shallow_copy(self.values.repeat(n))
+ return self._shallow_copy(self._values.repeat(n))
def ravel(self, order='C'):
"""
@@ -315,7 +319,7 @@ def ravel(self, order='C'):
--------
numpy.ndarray.ravel
"""
- return self.values.ravel(order=order)
+ return self._values.ravel(order=order)
# construction helpers
@classmethod
@@ -606,7 +610,7 @@ def _to_embed(self, keep_tz=False):
return an array repr of this object, potentially casting to object
"""
- return self.values
+ return self.values.copy()
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
@@ -1164,7 +1168,7 @@ def _ensure_compat_append(self, other):
break
to_concat = self._ensure_compat_concat(to_concat)
- to_concat = [x.values if isinstance(x, Index) else x
+ to_concat = [x._values if isinstance(x, Index) else x
for x in to_concat]
return to_concat, name
@@ -1197,10 +1201,15 @@ def _ensure_compat_concat(indexes):
return indexes
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, allow_fill=True, fill_value=None):
"""
return a new Index of the values selected by the indexer
+ For internal compatibility with numpy arrays.
+
+ # filling must always be None/nan here
+ # but is passed thru internally
+
See also
--------
numpy.ndarray.take
@@ -1470,20 +1479,20 @@ def union(self, other):
if self.is_monotonic and other.is_monotonic:
try:
- result = self._outer_indexer(self.values, other.values)[0]
+ result = self._outer_indexer(self.values, other._values)[0]
except TypeError:
# incomparable objects
result = list(self.values)
# worth making this faster? a very unusual case
value_set = set(self.values)
- result.extend([x for x in other.values if x not in value_set])
+ result.extend([x for x in other._values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
- other_diff = com.take_nd(other.values, indexer,
+ other_diff = com.take_nd(other._values, indexer,
allow_fill=False)
result = com._concat_compat((self.values, other_diff))
@@ -1544,17 +1553,17 @@ def intersection(self, other):
if self.is_monotonic and other.is_monotonic:
try:
- result = self._inner_indexer(self.values, other.values)[0]
+ result = self._inner_indexer(self.values, other._values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
- indexer = self.get_indexer(other.values)
+ indexer = Index(self.values).get_indexer(other._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
- indexer = self.get_indexer_non_unique(other.values)[0].unique()
+ indexer = Index(self.values).get_indexer_non_unique(other._values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
@@ -1683,6 +1692,13 @@ def get_value(self, series, key):
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
+
+ # if we have something that is Index-like, then
+ # use this, e.g. DatetimeIndex
+ s = getattr(series,'_values',None)
+ if isinstance(s, Index) and lib.isscalar(key):
+ return s[key]
+
s = _values_from_object(series)
k = _values_from_object(key)
@@ -1808,7 +1824,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
- indexer = self._engine.get_indexer(target.values)
+ indexer = self._engine.get_indexer(target._values)
return com._ensure_platform_int(indexer)
@@ -1820,12 +1836,12 @@ def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad'
else self._engine.get_backfill_indexer)
- indexer = method(target.values, limit)
+ indexer = method(target._values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(
- target.values, indexer, tolerance)
+ target._values, indexer, tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
@@ -1899,7 +1915,7 @@ def get_indexer_non_unique(self, target):
self = Index(self.asi8)
tgt_values = target.asi8
else:
- tgt_values = target.values
+ tgt_values = target._values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
@@ -2016,7 +2032,7 @@ def reindex(self, target, method=None, level=None, limit=None,
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
- target = self._simple_new(np.empty(0, dtype=self.dtype), **attrs)
+ target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
@@ -2077,7 +2093,7 @@ def _reindex_non_unique(self, target):
missing = com._ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = com._ensure_int64(l[~check])
- cur_labels = self.take(indexer[check]).values
+ cur_labels = self.take(indexer[check])._values
cur_indexer = com._ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
@@ -2097,7 +2113,7 @@ def _reindex_non_unique(self, target):
else:
# need to retake to have the same size as the indexer
- indexer = indexer.values
+ indexer = indexer._values
indexer[~check] = 0
# reset the new indexer to account for the new size
@@ -2258,7 +2274,7 @@ def _join_multi(self, other, how, return_indexers=True):
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
- left_idx, right_idx = _get_join_indexers([self.values], [other.values],
+ left_idx, right_idx = _get_join_indexers([self.values], [other._values],
how=how, sort=True)
left_idx = com._ensure_platform_int(left_idx)
@@ -2266,7 +2282,7 @@ def _join_non_unique(self, other, how='left', return_indexers=False):
join_index = self.values.take(left_idx)
mask = left_idx == -1
- np.putmask(join_index, mask, other.values.take(right_idx))
+ np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
@@ -2412,7 +2428,7 @@ def _join_monotonic(self, other, how='left', return_indexers=False):
return ret_index
sv = self.values
- ov = other.values
+ ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
@@ -2679,7 +2695,7 @@ def insert(self, loc, item):
new_index : Index
"""
_self = np.asarray(self)
- item = self._coerce_scalar_to_index(item).values
+ item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate(
(_self[:loc], item, _self[loc:]))
@@ -3056,7 +3072,7 @@ def _is_dtype_compat(self, other):
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
- other = other.values
+ other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories when appending")
else:
@@ -3319,9 +3335,13 @@ def _convert_list_indexer(self, keyarr, kind=None):
return None
- def take(self, indexer, axis=0):
+ def take(self, indexer, axis=0, allow_fill=True, fill_value=None):
"""
- return a new CategoricalIndex of the values selected by the indexer
+ For internal compatibility with numpy arrays.
+
+ # filling must always be None/nan here
+ # but is passed thru internally
+ assert isnull(fill_value)
See also
--------
@@ -3401,9 +3421,9 @@ def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same categories
if isinstance(other, CategoricalIndex):
- other = other.values
+ other = other._values
elif isinstance(other, Index):
- other = self._create_categorical(self, other.values, categories=self.categories, ordered=self.ordered)
+ other = self._create_categorical(self, other._values, categories=self.categories, ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)):
if len(self.values) != len(other):
@@ -3426,8 +3446,8 @@ def _evaluate_compare(self, other):
def _delegate_method(self, name, *args, **kwargs):
- """ method delegation to the .values """
- method = getattr(self.values, name)
+ """ method delegation to the ._values """
+ method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
@@ -3680,7 +3700,7 @@ def astype(self, dtype):
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
- return Index(self.values, name=self.name, dtype=dtype)
+ return Index(self._values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
@@ -3743,7 +3763,7 @@ def equals(self, other):
other = self._constructor(other)
if not is_dtype_equal(self.dtype,other.dtype) or self.shape != other.shape:
return False
- left, right = self.values, other.values
+ left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
@@ -4293,12 +4313,12 @@ def values(self):
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
- taken = lev._box_values(com.take_1d(lev.values, lab))
+ taken = lev._box_values(com.take_1d(lev._values, lab))
elif box:
- taken = com.take_1d(lev._box_values(lev.values), lab,
+ taken = com.take_1d(lev._box_values(lev._values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
- taken = com.take_1d(np.asarray(lev.values), lab)
+ taken = com.take_1d(np.asarray(lev._values), lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
@@ -4345,7 +4365,7 @@ def get_value(self, series, key):
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
- new_values = series.values[loc]
+ new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
@@ -4408,7 +4428,7 @@ def get_level_values(self, level):
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
- filled = com.take_1d(unique.values, labels, fill_value=unique._na_value)
+ filled = com.take_1d(unique._values, labels, fill_value=unique._na_value)
values = unique._simple_new(filled, self.names[num],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
@@ -4438,7 +4458,7 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
# weird all NA case
formatted = [com.pprint_thing(na if isnull(x) else x,
escape_chars=('\t', '\r', '\n'))
- for x in com.take_1d(lev.values, lab)]
+ for x in com.take_1d(lev._values, lab)]
stringified_levels.append(formatted)
result_levels = []
@@ -4622,7 +4642,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
if isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
- tuples = tuples.values
+ tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
@@ -4777,7 +4797,7 @@ def append(self, other):
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
- to_concat = (self.values,) + tuple(k.values for k in other)
+ to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
@@ -5065,7 +5085,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
- indexer = self_index._engine.get_indexer(target.values)
+ indexer = self_index._engine.get_indexer(target._values)
return com._ensure_platform_int(indexer)
@@ -5140,7 +5160,7 @@ def _tuple_index(self):
-------
index : Index
"""
- return Index(self.values)
+ return Index(self._values)
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
@@ -5450,7 +5470,7 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
mapper = Series(indexer)
indexer = labels.take(com._ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
- m = result.map(mapper).values
+ m = result.map(mapper)._values
else:
m = np.zeros(len(labels),dtype=bool)
@@ -5576,7 +5596,7 @@ def _update_indexer(idxr, indexer=indexer):
else:
# no matches we are done
- return Int64Index([]).values
+ return Int64Index([])._values
elif is_null_slice(k):
# empty slice
@@ -5592,8 +5612,8 @@ def _update_indexer(idxr, indexer=indexer):
# empty indexer
if indexer is None:
- return Int64Index([]).values
- return indexer.values
+ return Int64Index([])._values
+ return indexer._values
def truncate(self, before=None, after=None):
"""
@@ -5638,7 +5658,7 @@ def equals(self, other):
return True
if not isinstance(other, MultiIndex):
- return array_equivalent(self.values,
+ return array_equivalent(self._values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
@@ -5648,9 +5668,9 @@ def equals(self, other):
return False
for i in range(self.nlevels):
- svalues = com.take_nd(np.asarray(self.levels[i].values), self.labels[i],
+ svalues = com.take_nd(np.asarray(self.levels[i]._values), self.labels[i],
allow_fill=False)
- ovalues = com.take_nd(np.asarray(other.levels[i].values), other.labels[i],
+ ovalues = com.take_nd(np.asarray(other.levels[i]._values), other.labels[i],
allow_fill=False)
if not array_equivalent(svalues, ovalues):
return False
@@ -5690,7 +5710,7 @@ def union(self, other):
if len(other) == 0 or self.equals(other):
return self
- uniq_tuples = lib.fast_unique_multiple([self.values, other.values])
+ uniq_tuples = lib.fast_unique_multiple([self._values, other._values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
@@ -5712,8 +5732,8 @@ def intersection(self, other):
if self.equals(other):
return self
- self_tuples = self.values
- other_tuples = other.values
+ self_tuples = self._values
+ other_tuples = other._values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
@@ -5742,7 +5762,7 @@ def difference(self, other):
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
- difference = sorted(set(self.values) - set(other.values))
+ difference = sorted(set(self._values) - set(other._values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b8ee831cdc12c..7cf1942046e75 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -294,9 +294,9 @@ def _setitem_with_indexer(self, indexer, value):
new_index = index.insert(len(index),indexer)
# this preserves dtype of the value
- new_values = Series([value]).values
- if len(self.obj.values):
- new_values = np.concatenate([self.obj.values,
+ new_values = Series([value])._values
+ if len(self.obj._values):
+ new_values = np.concatenate([self.obj._values,
new_values])
self.obj._data = self.obj._constructor(
@@ -548,7 +548,7 @@ def _align_series(self, indexer, ser):
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([com.is_sequence(_) for _ in indexer])):
- ser = ser.reindex(obj.axes[0][indexer[0]], copy=True).values
+ ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1:
@@ -570,9 +570,9 @@ def _align_series(self, indexer, ser):
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
- return ser.values.copy()
+ return ser._values.copy()
- return ser.reindex(new_ix).values
+ return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
@@ -580,8 +580,8 @@ def _align_series(self, indexer, ser):
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
- return ser.values.copy()
- return ser.reindex(ax).values
+ return ser._values.copy()
+ return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
@@ -596,7 +596,7 @@ def _align_series(self, indexer, ser):
broadcast.append((n, len(labels)))
# broadcast along other dims
- ser = ser.values.copy()
+ ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
@@ -611,9 +611,9 @@ def _align_series(self, indexer, ser):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
- return ser.values.copy()
+ return ser._values.copy()
- return ser.reindex(ax).values
+ return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
@@ -659,16 +659,16 @@ def _align_frame(self, indexer, df):
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
- val = df.copy().values
+ val = df.copy()._values
else:
- val = df.reindex(idx, columns=cols).values
+ val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer))
and is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
- val = df.copy().values
+ val = df.copy()._values
else:
# we have a multi-index and are trying to align
@@ -677,7 +677,7 @@ def _align_frame(self, indexer, df):
df.index, MultiIndex) and ax.nlevels != df.index.nlevels:
raise TypeError("cannot align on a multi-index with out specifying the join levels")
- val = df.reindex(index=ax).values
+ val = df.reindex(index=ax)._values
return val
elif np.isscalar(indexer) and is_panel:
@@ -688,9 +688,9 @@ def _align_frame(self, indexer, df):
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
- return df.copy().values
+ return df.copy()._values
- return df.reindex(idx, columns=cols).values
+ return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
@@ -1660,11 +1660,11 @@ def check_bool_indexer(ax, key):
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
- mask = com.isnull(result.values)
+ mask = com.isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series key provided')
- result = result.astype(bool).values
+ result = result.astype(bool)._values
else:
# is_bool_indexer has already checked for nulls in the case of an
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 5366c5a6b8f80..58ee36142d4fd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -11,13 +11,18 @@
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
+ is_null_slice, is_dtype_equal,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
+ is_datetime64tz_dtype, is_datetimetz, is_sparse,
array_equivalent, _maybe_convert_string_to_object,
- is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
+ is_categorical, needs_i8_conversion, is_datetimelike_v_numeric,
+ is_internal_type)
+
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
+from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
@@ -48,11 +53,13 @@ class Block(PandasObject):
is_integer = False
is_complex = False
is_datetime = False
+ is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
+ _box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
@@ -109,6 +116,22 @@ def is_categorical_astype(self, dtype):
return False
+ def external_values(self, dtype=None):
+ """ return an outside world format, currently just the ndarray """
+ return self.values
+
+ def internal_values(self, dtype=None):
+ """ return an internal format, currently just the ndarray
+ this should be the pure internal API format """
+ return self.values
+
+ def get_values(self, dtype=None):
+ """
+ return an internal format, currently just the ndarray
+ this is often overriden to handle to_dense like operations
+ """
+ return self.values
+
def to_dense(self):
return self.values.view()
@@ -125,6 +148,18 @@ def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
+ def make_block(self, values, placement=None, ndim=None, **kwargs):
+ """
+ Create a new block, with type inference
+ propogate any values that are not specified
+ """
+ if placement is None:
+ placement = self.mgr_locs
+ if ndim is None:
+ ndim = self.ndim
+
+ return make_block(values, placement=placement, ndim=ndim, **kwargs)
+
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
@@ -248,9 +283,8 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
- return make_block(new_values,
- ndim=self.ndim, fastpath=True,
- placement=self.mgr_locs)
+ return self.make_block(new_values,
+ fastpath=True)
def get(self, item):
loc = self.items.get_loc(item)
@@ -280,7 +314,7 @@ def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
- result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
+ result = self.make_block(values=_block_shape(result))
return result
@@ -334,8 +368,8 @@ def downcast(self, dtypes=None):
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
- return [make_block(nv, ndim=self.ndim,
- fastpath=True, placement=self.mgr_locs)]
+ return [self.make_block(nv,
+ fastpath=True)]
# ndim > 1
if dtypes is None:
@@ -362,9 +396,9 @@ def downcast(self, dtypes=None):
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
- blocks.append(make_block(nv,
- ndim=self.ndim, fastpath=True,
- placement=[rl]))
+ blocks.append(self.make_block(nv,
+ fastpath=True,
+ placement=[rl]))
return blocks
@@ -382,9 +416,7 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
- return make_block(Categorical(self.values, **kwargs),
- ndim=self.ndim,
- placement=self.mgr_locs)
+ return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
@@ -399,12 +431,20 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
try:
# force the copy here
if values is None:
+
+ if issubclass(dtype.type, (compat.text_type, compat.string_types)):
+ values = self.to_native_types()
+ else:
+ values = self.get_values(dtype=dtype)
+
# _astype_nansafe works fine with 1-d only
- values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
- values = values.reshape(self.values.shape)
+ values = com._astype_nansafe(values.ravel(), dtype, copy=True)
+ values = values.reshape(self.shape)
+
newb = make_block(values,
- ndim=self.ndim, placement=self.mgr_locs,
- fastpath=True, dtype=dtype, klass=klass)
+ placement=self.mgr_locs,
+ dtype=dtype,
+ klass=klass)
except:
if raise_on_error is True:
raise
@@ -484,7 +524,7 @@ def _try_coerce_and_cast_result(self, result, dtype=None):
def _try_fill(self, value):
return value
- def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
+ def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
@@ -505,9 +545,9 @@ def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
- return make_block(values, ndim=self.ndim,
- klass=self.__class__, fastpath=True,
- placement=self.mgr_locs)
+ return self.make_block(values,
+ klass=self.__class__,
+ fastpath=True)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
@@ -616,9 +656,8 @@ def _is_empty_indexer(indexer):
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
- block = make_block(transf(values),
- ndim=self.ndim, placement=self.mgr_locs,
- fastpath=True)
+ block = self.make_block(transf(values),
+ fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
@@ -723,17 +762,16 @@ def putmask(self, mask, new, align=True, inplace=False,
# Put back the dimension that was taken from it and make
# a block out of the result.
- block = make_block(values=nv[np.newaxis],
- placement=[ref_loc],
- fastpath=True)
+ block = self.make_block(values=nv[np.newaxis],
+ placement=[ref_loc],
+ fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
- new_blocks.append(make_block(values=nv,
- placement=self.mgr_locs,
- fastpath=True))
+ new_blocks.append(self.make_block(values=nv,
+ fastpath=True))
return new_blocks
@@ -743,7 +781,8 @@ def putmask(self, mask, new, align=True, inplace=False,
if transpose:
new_values = new_values.T
- return [make_block(new_values, placement=self.mgr_locs, fastpath=True)]
+ return [self.make_block(new_values,
+ fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
@@ -824,9 +863,9 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
dtype=self.dtype)
values = self._try_coerce_result(values)
- blocks = [make_block(values,
- ndim=self.ndim, klass=self.__class__,
- fastpath=True, placement=self.mgr_locs)]
+ blocks = [self.make_block(values,
+ klass=self.__class__,
+ fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
@@ -865,9 +904,9 @@ def func(x):
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
- blocks = [make_block(interp_values,
- ndim=self.ndim, klass=self.__class__,
- fastpath=True, placement=self.mgr_locs)]
+ blocks = [self.make_block(interp_values,
+ klass=self.__class__,
+ fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
@@ -875,13 +914,22 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
Take values according to indexer and return them as a block.bb
"""
+
+ # com.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
+ # so need to preserve types
+ # sparse is treated like an ndarray, but needs .get_values() shaping
+
+ values = self.values
+ if self.is_sparse:
+ values = self.get_values()
+
if fill_tuple is None:
fill_value = self.fill_value
- new_values = com.take_nd(self.get_values(), indexer, axis=axis,
+ new_values = com.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
- new_values = com.take_nd(self.get_values(), indexer, axis=axis,
+ new_values = com.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
@@ -894,26 +942,24 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
else:
new_mgr_locs = self.mgr_locs
- if new_values.dtype != self.dtype:
- return make_block(new_values, new_mgr_locs)
+ if not is_dtype_equal(new_values.dtype, self.dtype):
+ return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
- def get_values(self, dtype=None):
- return self.values
-
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
- return [make_block(values=new_values,
- ndim=self.ndim, fastpath=True,
- placement=self.mgr_locs)]
+ return [self.make_block(values=new_values,
+ fastpath=True)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
+
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
+
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
@@ -934,9 +980,8 @@ def shift(self, periods, axis=0):
if f_ordered:
new_values = new_values.T
- return [make_block(new_values,
- ndim=self.ndim, fastpath=True,
- placement=self.mgr_locs)]
+ return [self.make_block(new_values,
+ fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
@@ -1027,8 +1072,8 @@ def handle_error():
if try_cast:
result = self._try_cast_result(result)
- return [make_block(result, ndim=self.ndim,
- fastpath=True, placement=self.mgr_locs)]
+ return [self.make_block(result,
+ fastpath=True,)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False):
@@ -1108,7 +1153,7 @@ def func(c, v, o):
if try_cast:
result = self._try_cast_result(result)
- return make_block(result, ndim=self.ndim, placement=self.mgr_locs)
+ return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
@@ -1121,8 +1166,8 @@ def func(c, v, o):
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
- result_blocks.append(make_block(r.T,
- placement=self.mgr_locs[m]))
+ result_blocks.append(self.make_block(r.T,
+ placement=self.mgr_locs[m]))
return result_blocks
@@ -1139,7 +1184,7 @@ class NonConsolidatableMixIn(object):
_holder = None
def __init__(self, values, placement,
- ndim=None, fastpath=False,):
+ ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
@@ -1159,6 +1204,12 @@ def __init__(self, values, placement,
self.values = values
+ @property
+ def shape(self):
+ if self.ndim == 1:
+ return (len(self.values)),
+ return (len(self.mgr_locs), len(self.values))
+
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
@@ -1170,7 +1221,7 @@ def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
- if col != 0:
+ if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
@@ -1450,13 +1501,13 @@ class ObjectBlock(Block):
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
- placement=None):
+ placement=None, **kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
- placement=placement)
+ placement=placement, **kwargs)
@property
def is_bool(self):
@@ -1490,8 +1541,8 @@ def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
- newb = make_block(values,
- ndim=self.ndim, placement=[rl])
+ newb = self.make_block(values,
+ placement=[rl])
blocks.append(newb)
else:
@@ -1504,8 +1555,7 @@ def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
- blocks.append(make_block(values,
- ndim=self.ndim, placement=self.mgr_locs))
+ blocks.append(self.make_block(values))
return blocks
@@ -1559,7 +1609,7 @@ def _try_cast(self, element):
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
- np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
+ np.datetime64, np.bool_)) or is_internal_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
@@ -1662,12 +1712,13 @@ def re_replacer(s):
new_values[filt] = f(new_values[filt])
return [self if inplace else
- make_block(new_values,
- fastpath=True, placement=self.mgr_locs)]
+ self.make_block(new_values,
+ fastpath=True)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
+ _verify_integrity = True
_can_hold_na = True
_holder = Categorical
@@ -1690,10 +1741,6 @@ def to_dense(self):
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
- @property
- def shape(self):
- return (len(self.mgr_locs), len(self.values))
-
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
@@ -1791,9 +1838,7 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
if copy:
values = values.copy()
- return make_block(values,
- ndim=self.ndim,
- placement=self.mgr_locs)
+ return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
@@ -1893,8 +1938,8 @@ def fillna(self, value, limit=None,
np.putmask(values, mask, value)
return [self if inplace else
- make_block(values,
- fastpath=True, placement=self.mgr_locs)]
+ self.make_block(values,
+ fastpath=True)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
@@ -1902,19 +1947,19 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None,
values = self.values
if slicer is not None:
- values = values[:, slicer]
+ values = values[..., slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
- tz=None,
+ tz=getattr(self.values,'tz',None),
format=format,
na_rep=na_rep).reshape(values.shape)
- return result
+ return np.atleast_2d(result)
def should_store(self, value):
- return issubclass(value.dtype.type, np.datetime64)
+ return issubclass(value.dtype.type, np.datetime64) and not is_datetimetz(value)
def set(self, locs, values, check=False):
"""
@@ -1937,12 +1982,102 @@ def get_values(self, dtype=None):
.reshape(self.values.shape)
return self.values
+class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
+ """ implement a datetime64 block with a tz attribute """
+ __slots__ = ()
+ _holder = DatetimeIndex
+ is_datetimetz = True
+
+ def __init__(self, values, placement, ndim=2,
+ **kwargs):
+
+ if not isinstance(values, self._holder):
+ values = self._holder(values)
+ if values.tz is None:
+ raise ValueError("cannot create a DatetimeTZBlock without a tz")
+
+ super(DatetimeTZBlock, self).__init__(values,
+ placement=placement,
+ ndim=ndim,
+ **kwargs)
+ def external_values(self):
+ """ we internally represent the data as a DatetimeIndex, but for external
+ compat with ndarray, export as a ndarray of Timestamps """
+ return self.values.astype('datetime64[ns]').values
+
+ def get_values(self, dtype=None):
+ # return object dtype as Timestamps with the zones
+ if dtype == object:
+ return lib.map_infer(self.values.ravel(), lambda x: lib.Timestamp(x,tz=self.values.tz))\
+ .reshape(self.values.shape)
+ return self.values
+
+ def _slice(self, slicer):
+ """ return a slice of my values """
+ if isinstance(slicer, tuple):
+ col, loc = slicer
+ if not is_null_slice(col) and col != 0:
+ raise IndexError("{0} only contains one item".format(self))
+ return self.values[loc]
+ return self.values[slicer]
+
+ def _try_coerce_args(self, values, other):
+ """ localize and return i8 for the values """
+ values = values.tz_localize(None).asi8
+
+ if is_null_datelike_scalar(other):
+ other = tslib.iNaT
+ elif isinstance(other, self._holder):
+ if other.tz != self.tz:
+ raise ValueError("incompatible or non tz-aware value")
+ other = other.tz_localize(None).asi8
+ else:
+ other = lib.Timestamp(other)
+ if not getattr(other, 'tz', None):
+ raise ValueError("incompatible or non tz-aware value")
+ other = other.value
+
+ return values, other
+
+ def _try_coerce_result(self, result):
+ """ reverse of try_coerce_args """
+ result = super(DatetimeTZBlock, self)._try_coerce_result(result)
+
+ if isinstance(result, np.ndarray):
+ result = self._holder(result, tz=self.values.tz)
+ elif isinstance(result, (np.integer, np.datetime64)):
+ result = lib.Timestamp(result, tz=self.values.tz)
+ return result
+
+ def shift(self, periods, axis=0):
+ """ shift the block by periods """
+
+ ### think about moving this to the DatetimeIndex. This is a non-freq (number of periods) shift ###
+
+ N = len(self)
+ indexer = np.zeros(N, dtype=int)
+ if periods > 0:
+ indexer[periods:] = np.arange(N - periods)
+ else:
+ indexer[:periods] = np.arange(-periods, N)
+
+ # move to UTC & take
+ new_values = self.values.tz_localize(None).asi8.take(indexer)
+
+ if periods > 0:
+ new_values[:periods] = tslib.iNaT
+ else:
+ new_values[periods:] = tslib.iNaT
+
+ new_values = DatetimeIndex(new_values,tz=self.values.tz)
+ return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
+ _box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@@ -1967,6 +2102,9 @@ def fill_value(self, v):
v = float(v)
self.values.fill_value = v
+ def to_dense(self):
+ return self.values.to_dense().view()
+
@property
def sp_values(self):
return self.values.sp_values
@@ -2001,7 +2139,7 @@ def copy(self, deep=True):
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
- fill_value=None, copy=False, fastpath=True):
+ fill_value=None, copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = self.dtype
@@ -2019,8 +2157,9 @@ def make_block_same_class(self, values, placement,
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
- return make_block(np.empty(values.shape, dtype=dtype),
- placement, fastpath=True,)
+ return self.make_block(np.empty(values.shape, dtype=dtype),
+ placement,
+ fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
@@ -2029,8 +2168,9 @@ def make_block_same_class(self, values, placement,
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
- return make_block(new_values, ndim=self.ndim,
- fastpath=fastpath, placement=placement)
+ return self.make_block(new_values,
+ fastpath=fastpath,
+ placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
@@ -2114,7 +2254,12 @@ def make_block(values, placement, klass=None, ndim=None,
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
- klass = DatetimeBlock
+ if hasattr(values,'tz'):
+ klass = DatetimeTZBlock
+ else:
+ klass = DatetimeBlock
+ elif is_datetimetz(values):
+ klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
@@ -2410,7 +2555,7 @@ def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
- if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
+ if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
@@ -2830,7 +2975,7 @@ def fast_xs(self, loc):
single block
"""
if len(self.blocks) == 1:
- return self.blocks[0].values[:, loc]
+ return self.blocks[0].iget((slice(None), loc))
items = self.items
@@ -2911,10 +3056,9 @@ def iget(self, i, fastpath=True):
Otherwise return as a ndarray
"""
-
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
- if not fastpath or block.is_sparse or values.ndim != 1:
+ if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
@@ -2984,18 +3128,10 @@ def set(self, item, value, check=False):
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
- value_is_sparse = isinstance(value, SparseArray)
- value_is_cat = is_categorical(value)
- value_is_nonconsolidatable = value_is_sparse or value_is_cat
-
- if value_is_sparse:
- # sparse
- assert self.ndim == 2
+ value_is_internal_type = is_internal_type(value)
- def value_getitem(placement):
- return value
- elif value_is_cat:
- # categorical
+ # categorical/spares/datetimetz
+ if value_is_internal_type:
def value_getitem(placement):
return value
else:
@@ -3064,7 +3200,7 @@ def value_getitem(placement):
unfit_count = len(unfit_mgr_locs)
new_blocks = []
- if value_is_nonconsolidatable:
+ if value_is_internal_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
@@ -3487,7 +3623,7 @@ def convert(self, **kwargs):
@property
def dtype(self):
- return self._values.dtype
+ return self._block.dtype
@property
def array_dtype(self):
@@ -3509,9 +3645,11 @@ def get_dtypes(self):
def get_ftypes(self):
return np.array([self._block.ftype])
- @property
- def values(self):
- return self._values.view()
+ def external_values(self):
+ return self._block.external_values()
+
+ def internal_values(self):
+ return self._block.internal_values()
def get_values(self):
""" return a dense type view """
@@ -3519,7 +3657,7 @@ def get_values(self):
@property
def itemsize(self):
- return self._values.itemsize
+ return self._block.values.itemsize
@property
def _can_hold_na(self):
@@ -3586,6 +3724,7 @@ def create_block_manager_from_blocks(blocks, axes):
def create_block_manager_from_arrays(arrays, names, axes):
+
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
@@ -3605,6 +3744,7 @@ def form_blocks(arrays, names, axes):
object_items = []
sparse_items = []
datetime_items = []
+ datetime_tz_items = []
cat_items = []
extra_locs = []
@@ -3623,7 +3763,7 @@ def form_blocks(arrays, names, axes):
k = names[name_idx]
v = arrays[name_idx]
- if isinstance(v, (SparseArray, ABCSparseSeries)):
+ if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
@@ -3633,10 +3773,12 @@ def form_blocks(arrays, names, axes):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
- if hasattr(v, 'tz') and v.tz is not None:
- object_items.append((i, k, v))
+ if is_datetimetz(v):
+ datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
+ elif is_datetimetz(v):
+ datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
@@ -3669,6 +3811,14 @@ def form_blocks(arrays, names, axes):
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
+ if len(datetime_tz_items):
+ dttz_blocks = [ make_block(array,
+ klass=DatetimeTZBlock,
+ fastpath=True,
+ placement=[i],
+ ) for i, names, array in datetime_tz_items ]
+ blocks.extend(dttz_blocks)
+
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
@@ -3757,7 +3907,7 @@ def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
- return x.values
+ return x._values
else:
return np.asarray(x)
@@ -3801,18 +3951,20 @@ def _lcd_dtype(l):
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
+ have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
- has_non_numeric = have_dt64 or have_td64 or have_cat
+ has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
if (have_object or
- (have_bool and (have_numeric or have_dt64 or have_td64)) or
+ (have_bool and (have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
+ have_dt64_tz or
have_td64):
return np.dtype(object)
elif have_bool:
@@ -4140,6 +4292,8 @@ def get_empty_dtype_and_na(join_units):
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
+ elif com.is_datetimetz(dtype):
+ upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
@@ -4174,6 +4328,8 @@ def get_empty_dtype_and_na(join_units):
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
+ elif 'datetimetz' in upcast_classes:
+ return np.dtype('M8[ns]'), tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
@@ -4432,12 +4588,6 @@ def is_null(self):
return True
- @cache_readonly
- def needs_block_conversion(self):
- """ we might need to convert the joined values to a suitable block repr """
- block = self.block
- return block is not None and (block.is_sparse or block.is_categorical)
-
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
@@ -4462,11 +4612,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
return missing_arr
if not self.indexers:
- if self.block.is_categorical:
- # preserve the categoricals for validation in _concat_compat
- return self.block.values
- elif self.block.is_sparse:
- # preserve the sparse array for validation in _concat_compat
+ if not self.block._can_consolidate:
+ # preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 8e3dd3836855c..9b0d6e9db1106 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -9,6 +9,7 @@
import warnings
import numpy as np
import pandas as pd
+import datetime
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
@@ -21,8 +22,10 @@
_values_from_object, _maybe_match_name,
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype, is_object_dtype,
- is_timedelta64_dtype, is_datetime64_dtype, is_bool_dtype)
+ is_timedelta64_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
+ is_bool_dtype)
from pandas.io.common import PerformanceWarning
+
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
@@ -268,50 +271,61 @@ class _TimeOp(object):
wrap_results = staticmethod(lambda x: x)
dtype = None
- def __init__(self, left, right, name):
- self.name = name
+ def __init__(self, left, right, name, na_op):
# need to make sure that we are aligning the data
if isinstance(left, pd.Series) and isinstance(right, pd.Series):
left, right = left.align(right,copy=False)
- self.left = left
- self.right = right
+ lvalues = self._convert_to_array(left, name=name)
+ rvalues = self._convert_to_array(right, name=name, other=lvalues)
- self.is_offset_lhs = self._is_offset(left)
- self.is_offset_rhs = self._is_offset(right)
+ self.name = name
+ self.na_op = na_op
- lvalues = self._convert_to_array(left, name=name)
- self.is_timedelta_lhs = is_timedelta64_dtype(left)
- self.is_datetime_lhs = is_datetime64_dtype(left)
+ # left
+ self.left = left
+ self.is_offset_lhs = self._is_offset(left)
+ self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
+ self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
+ self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
+ self.is_datetime_lhs = self.is_datetime64_lhs or self.is_datetime64tz_lhs
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
- rvalues = self._convert_to_array(right, name=name, other=lvalues)
- self.is_datetime_rhs = is_datetime64_dtype(rvalues)
+ # right
+ self.right = right
+ self.is_offset_rhs = self._is_offset(right)
+ self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
+ self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
+ self.is_datetime_rhs = self.is_datetime64_rhs or self.is_datetime64tz_rhs
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
- self._validate()
+ self._validate(lvalues, rvalues, name)
+ self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, rvalues)
- self._convert_for_datetime(lvalues, rvalues)
-
- def _validate(self):
+ def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
- if (self.is_timedelta_lhs and self.is_integer_rhs) or\
- (self.is_integer_lhs and self.is_timedelta_rhs):
+ if (self.is_timedelta_lhs and self.is_integer_rhs) or (
+ self.is_integer_lhs and self.is_timedelta_rhs):
- if self.name not in ('__truediv__', '__div__', '__mul__'):
+ if name not in ('__div__', '__truediv__', '__mul__'):
raise TypeError("can only operate on a timedelta and an "
"integer for division, but the operator [%s]"
- "was passed" % self.name)
+ "was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
- if self.name != '__sub__':
+
+ if name not in ('__sub__','__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
- " passed" % self.name)
+ " passed" % name)
+
+ # if tz's must be equal (same or None)
+ if getattr(lvalues,'tz',None) != getattr(rvalues,'tz',None):
+ raise ValueError("Incompatbile tz's on datetime subtraction ops")
# 2 timedeltas
elif ((self.is_timedelta_lhs and
@@ -319,29 +333,29 @@ def _validate(self):
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
- if self.name not in ('__div__', '__truediv__', '__add__',
- '__sub__'):
+ if name not in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__',
+ '__add__', '__radd__', '__sub__', '__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
- " operator [%s] was passed" % self.name)
+ " operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
- if self.name not in ('__add__', '__sub__'):
+ if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of"
" a timedelta/DateOffset for addition and subtraction,"
" but the operator [%s] was passed" %
- self.name)
+ name)
elif ((self.is_timedelta_lhs or self.is_offset_lhs)
and self.is_datetime_rhs):
- if self.name != '__add__':
+ if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset and"
" a datetime for addition, but the operator"
- " [%s] was passed" % self.name)
+ " [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series with out a rhs '
'of a series/ndarray of type datetime64[ns] '
@@ -351,8 +365,10 @@ def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
+ ovalues = values
if not is_list_like(values):
values = np.array([values])
+
inferred_type = lib.infer_dtype(values)
if inferred_type in ('datetime64', 'datetime', 'date', 'time'):
@@ -366,6 +382,13 @@ def _convert_to_array(self, values, name=None, other=None):
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
+ # datetime with tz
+ elif isinstance(ovalues, datetime.datetime) and hasattr(ovalues,'tz'):
+ values = pd.DatetimeIndex(values)
+ # datetime array with tz
+ elif com.is_datetimetz(values):
+ if isinstance(values, pd.Series):
+ values = values._values
elif not (isinstance(values, (np.ndarray, pd.Series)) and
is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
@@ -400,19 +423,25 @@ def _convert_to_array(self, values, name=None, other=None):
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.tseries.timedeltas import to_timedelta
- mask = None
+
+ mask = isnull(lvalues) | isnull(rvalues)
+
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
+
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
self.dtype = 'timedelta64[ns]'
+ elif self.is_datetime64tz_lhs:
+ self.dtype = lvalues.dtype
+ elif self.is_datetime64tz_rhs:
+ self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
- mask = isnull(lvalues) | isnull(rvalues)
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
- if self.is_offset_lhs:
+ def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
@@ -420,22 +449,31 @@ def _convert_for_datetime(self, lvalues, rvalues):
warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
PerformanceWarning)
rvalues = rvalues.astype('O')
+
+ # pass thru on the na_op
+ self.na_op = lambda x, y: getattr(x,self.name)(y)
+ return lvalues, rvalues
+
+
+ if self.is_offset_lhs:
+ lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
- if len(rvalues) == 1:
- lvalues = pd.DatetimeIndex(lvalues)
- rvalues = rvalues[0]
- else:
- warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
- PerformanceWarning)
- lvalues = lvalues.astype('O')
+ rvalues, lvalues = _offset(rvalues, lvalues)
else:
+
+ # with tz, convert to UTC
+ if self.is_datetime64tz_lhs:
+ lvalues = lvalues.tz_localize(None)
+ if self.is_datetime64tz_rhs:
+ rvalues = rvalues.tz_localize(None)
+
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
+
self.dtype = 'timedelta64[ns]'
- mask = isnull(lvalues) | isnull(rvalues)
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
@@ -458,15 +496,20 @@ def _convert_for_datetime(self, lvalues, rvalues):
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
- if mask is not None:
- if mask.any():
- def f(x):
+ if mask.any():
+ def f(x):
+
+ # datetime64[ns]/timedelta64[ns] masking
+ try:
x = np.array(x, dtype=self.dtype)
- np.putmask(x, mask, self.fill_value)
- return x
- self.wrap_results = f
- self.lvalues = lvalues
- self.rvalues = rvalues
+ except TypeError:
+ x = np.array(x, dtype='datetime64[ns]')
+
+ np.putmask(x, mask, self.fill_value)
+ return x
+ self.wrap_results = f
+
+ return lvalues, rvalues
def _is_offset(self, arr_or_obj):
@@ -479,7 +522,7 @@ def _is_offset(self, arr_or_obj):
return False
@classmethod
- def maybe_convert_for_time_op(cls, left, right, name):
+ def maybe_convert_for_time_op(cls, left, right, name, na_op):
"""
if ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
@@ -490,15 +533,12 @@ def maybe_convert_for_time_op(cls, left, right, name):
"""
# decide if we can do it
is_timedelta_lhs = is_timedelta64_dtype(left)
- is_datetime_lhs = is_datetime64_dtype(left)
+ is_datetime_lhs = is_datetime64_dtype(left) or is_datetime64tz_dtype(left)
+
if not (is_datetime_lhs or is_timedelta_lhs):
return None
- # rops are allowed. No need for special checks, just strip off
- # r part.
- if name.startswith('__r'):
- name = "__" + name[3:]
- return cls(left, right, name)
+ return cls(left, right, name, na_op)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None,
@@ -529,12 +569,12 @@ def na_op(x, y):
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
- def wrapper(left, right, name=name):
+ def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
- time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name)
+ time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name, na_op)
if time_converted is None:
lvalues, rvalues = left, right
@@ -547,6 +587,7 @@ def wrapper(left, right, name=name):
lvalues, rvalues = time_converted.lvalues, time_converted.rvalues
dtype = time_converted.dtype
wrap_results = time_converted.wrap_results
+ na_op = time_converted.na_op
if isinstance(rvalues, pd.Series):
rindex = getattr(rvalues,'index',rvalues)
@@ -616,7 +657,10 @@ def na_op(x, y):
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
- y = np.nan
+ if name == '__ne__':
+ return np.ones(len(x), dtype=bool)
+ else:
+ return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
@@ -642,7 +686,7 @@ def na_op(x, y):
result = op(x, y)
if mask is not None and mask.any():
- result[mask] = False
+ result[mask] = masker
return result
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 116ae9f31b5a4..f44c235978b7a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -17,10 +17,12 @@
_default_index, _maybe_upcast,
_asarray_tuplesafe, _infer_dtype_from_scalar,
is_list_like, _values_from_object,
+ is_categorical_dtype, is_datetime64tz_dtype,
+ needs_i8_conversion, i8_boxer,
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform, _try_sort,
- is_int64_dtype,
- ABCSparseArray, _maybe_match_name,
+ is_int64_dtype, is_internal_type, is_datetimetz,
+ _maybe_match_name, ABCSparseArray,
_coerce_to_dtype, SettingWithCopyError,
_maybe_box_datetimelike, ABCDataFrame,
_dict_compat)
@@ -308,19 +310,43 @@ def ftypes(self):
@property
def values(self):
"""
- Return Series as ndarray
+ Return Series as ndarray or ndarray-like
+ depending on the dtype
Returns
-------
- arr : numpy.ndarray
+ arr : numpy.ndarray or ndarray-like
+
+ Examples
+ --------
+ >>> pd.Series([1, 2, 3]).values
+ array([1, 2, 3])
+
+ >>> pd.Series(list('aabc')).values
+ array(['a', 'a', 'b', 'c'], dtype=object)
+
+ >>> pd.Series(list('aabc')).astype('category').values
+ [a, a, b, c]
+ Categories (3, object): [a, b, c]
+
+ # this is converted to UTC
+ >>> pd.Series(pd.date_range('20130101',periods=3,tz='US/Eastern')).values
+ array(['2013-01-01T00:00:00.000000000-0500',
+ '2013-01-02T00:00:00.000000000-0500',
+ '2013-01-03T00:00:00.000000000-0500'], dtype='datetime64[ns]')
+
"""
- return self._data.values
+ return self._data.external_values()
+
+ @property
+ def _values(self):
+ """ return the internal repr of this data """
+ return self._data.internal_values()
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
-
# ops
def ravel(self, order='C'):
"""
@@ -330,7 +356,7 @@ def ravel(self, order='C'):
--------
numpy.ndarray.ravel
"""
- return self.values.ravel(order=order)
+ return self._values.ravel(order=order)
def compress(self, condition, axis=0, out=None, **kwargs):
"""
@@ -366,7 +392,7 @@ def nonzero(self):
--------
numpy.nonzero
"""
- return self.values.nonzero()
+ return self._values.nonzero()
def put(self, *args, **kwargs):
"""
@@ -376,7 +402,7 @@ def put(self, *args, **kwargs):
--------
numpy.ndarray.put
"""
- self.values.put(*args, **kwargs)
+ self._values.put(*args, **kwargs)
def __len__(self):
"""
@@ -385,7 +411,7 @@ def __len__(self):
return len(self._data)
def view(self, dtype=None):
- return self._constructor(self.values.view(dtype),
+ return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
@@ -407,7 +433,7 @@ def __array_prepare__(self, result, context=None):
"""
# nice error message for non-ufunc types
- if context is not None and not isinstance(self.values, np.ndarray):
+ if context is not None and not isinstance(self._values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(obj=type(obj).__name__,
@@ -489,7 +515,7 @@ def _ixs(self, i, axis=0):
try:
# dispatch to the values if we need
- values = self.values
+ values = self._values
if isinstance(values, np.ndarray):
return _index.get_value_at(values, i)
else:
@@ -619,7 +645,7 @@ def _get_values_tuple(self, key):
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
- return self._constructor(self.values[indexer],
+ return self._constructor(self._values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
@@ -627,7 +653,7 @@ def _get_values(self, indexer):
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
- return self.values[indexer]
+ return self._values[indexer]
def __setitem__(self, key, value):
@@ -638,7 +664,7 @@ def setitem(key, value):
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
- values = self.values
+ values = self._values
if (com.is_integer(key)
and not self.index.inferred_type == 'integer'):
@@ -655,7 +681,7 @@ def setitem(key, value):
value = tslib.iNaT
try:
- self.index._engine.set_value(self.values, key, value)
+ self.index._engine.set_value(self._values, key, value)
return
except (TypeError):
pass
@@ -689,7 +715,7 @@ def setitem(key, value):
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
- values = self.values
+ values = self._values
try:
self.index._engine.set_value(values, key, value)
return
@@ -744,7 +770,7 @@ def _set_labels(self, key, value):
def _set_values(self, key, value):
if isinstance(key, Series):
- key = key.values
+ key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
@@ -760,7 +786,7 @@ def repeat(self, reps):
numpy.ndarray.repeat
"""
new_index = self.index.repeat(reps)
- new_values = self.values.repeat(reps)
+ new_values = self._values.repeat(reps)
return self._constructor(new_values,
index=new_index).__finalize__(self)
@@ -783,7 +809,7 @@ def reshape(self, *args, **kwargs):
# XXX ignoring the "order" keyword.
return self
- return self.values.reshape(shape, **kwargs)
+ return self._values.reshape(shape, **kwargs)
def iget_value(self, i, axis=0):
"""
@@ -824,8 +850,8 @@ def get_value(self, label, takeable=False):
value : scalar value
"""
if takeable is True:
- return _maybe_box_datetimelike(self.values[label])
- return self.index.get_value(self.values, label)
+ return _maybe_box_datetimelike(self._values[label])
+ return self.index.get_value(self._values, label)
def set_value(self, label, value, takeable=False):
"""
@@ -849,9 +875,9 @@ def set_value(self, label, value, takeable=False):
"""
try:
if takeable:
- self.values[label] = value
+ self._values[label] = value
else:
- self.index._engine.set_value(self.values, label, value)
+ self.index._engine.set_value(self._values, label, value)
return self
except KeyError:
@@ -894,7 +920,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
- return self._constructor(self.values.copy(),
+ return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
@@ -936,7 +962,7 @@ def _repr_footer(self):
return u('%s%sLength: %d') % (freqstr, namestr, len(self))
# Categorical
- if com.is_categorical_dtype(self.dtype):
+ if is_categorical_dtype(self.dtype):
level_info = self.values._repr_categories_info()
return u('%sLength: %d, dtype: %s\n%s') % (namestr,
len(self),
@@ -1021,14 +1047,13 @@ def _get_repr(
return result
def __iter__(self):
- if com.is_categorical_dtype(self.dtype):
- return iter(self.values)
- elif np.issubdtype(self.dtype, np.datetime64):
- return (lib.Timestamp(x) for x in self.values)
- elif np.issubdtype(self.dtype, np.timedelta64):
- return (lib.Timedelta(x) for x in self.values)
+ """ provide iteration over the values of the Series
+ box values if necessary """
+ if needs_i8_conversion(self.dtype):
+ boxer = i8_boxer(self)
+ return (boxer(x) for x in self._values)
else:
- return iter(self.values)
+ return iter(self._values)
def iteritems(self):
"""
@@ -1118,7 +1143,7 @@ def count(self, level=None):
nobs : int or Series (if level specified)
"""
if level is not None:
- mask = notnull(self.values)
+ mask = notnull(self._values)
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
@@ -1457,8 +1482,8 @@ def searchsorted(self, v, side='left', sorter=None):
if sorter is not None:
sorter = com._ensure_platform_int(sorter)
- return self.values.searchsorted(Series(v).values, side=side,
- sorter=sorter)
+ return self._values.searchsorted(Series(v)._values, side=side,
+ sorter=sorter)
#------------------------------------------------------------------------------
# Combination
@@ -1564,7 +1589,7 @@ def combine(self, other, func, fill_value=nan):
new_values[i] = func(lv, rv)
else:
new_index = self.index
- new_values = func(self.values, other)
+ new_values = func(self._values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
@@ -1585,7 +1610,7 @@ def combine_first(self, other):
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
name = _maybe_match_name(self, other)
- rs_vals = com._where_compat(isnull(this), other.values, this.values)
+ rs_vals = com._where_compat(isnull(this), other._values, this._values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
@@ -1627,7 +1652,7 @@ def _try_kind_sort(arr):
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
- arr = self.values
+ arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
@@ -1676,7 +1701,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
new_index, indexer = index.sort_values(return_indexer=True,
ascending=ascending)
- new_values = self.values.take(indexer)
+ new_values = self._values.take(indexer)
return self._constructor(new_values,
index=new_index).__finalize__(self)
@@ -1772,7 +1797,7 @@ def argsort(self, axis=0, kind='quicksort', order=None):
--------
numpy.ndarray.argsort
"""
- values = self.values
+ values = self._values
mask = isnull(values)
if mask.any():
@@ -1813,7 +1838,7 @@ def rank(self, method='average', na_option='keep', ascending=True,
ranks : Series
"""
from pandas.core.algorithms import rank
- ranks = rank(self.values, method=method, na_option=na_option,
+ ranks = rank(self._values, method=method, na_option=na_option,
ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
@@ -1927,7 +1952,7 @@ def swaplevel(self, i, j, copy=True):
swapped : Series
"""
new_index = self.index.swaplevel(i, j)
- return self._constructor(self.values, index=new_index,
+ return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
@@ -2023,7 +2048,7 @@ def map(self, arg, na_action=None):
y : Series
same index as caller
"""
- values = self.values
+ values = self._values
if com.is_datetime64_dtype(values.dtype):
values = lib.map_infer(values, lib.Timestamp)
@@ -2040,7 +2065,7 @@ def map_f(values, f):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
- new_values = com.take_1d(arg.values, indexer)
+ new_values = com.take_1d(arg._values, indexer)
return self._constructor(new_values,
index=self.index).__finalize__(self)
else:
@@ -2176,7 +2201,7 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
otherwise delegate to the object
"""
- delegate = self.values
+ delegate = self._values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
@@ -2200,12 +2225,12 @@ def _maybe_box(self, func, dropna=False):
"""
if dropna:
- values = self.dropna().values
+ values = self.dropna()._values
else:
- values = self.values
+ values = self._values
- if com.needs_i8_conversion(self):
- boxer = com.i8_boxer(self)
+ if needs_i8_conversion(self):
+ boxer = i8_boxer(self)
if len(values) == 0:
return boxer(tslib.iNaT)
@@ -2303,7 +2328,7 @@ def take(self, indices, axis=0, convert=True, is_copy=False):
indices = com._ensure_platform_int(indices)
new_index = self.index.take(indices)
- new_values = self.values.take(indices)
+ new_values = self._values.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
@@ -2363,12 +2388,12 @@ def isin(self, values):
f = lib.ismember
if com.is_datetime64_dtype(self):
from pandas.tseries.tools import to_datetime
- values = Series(to_datetime(values)).values.view('i8')
+ values = Series(to_datetime(values))._values.view('i8')
comps = comps.view('i8')
f = lib.ismember_int64
elif com.is_timedelta64_dtype(self):
from pandas.tseries.timedeltas import to_timedelta
- values = Series(to_timedelta(values)).values.view('i8')
+ values = Series(to_timedelta(values))._values.view('i8')
comps = comps.view('i8')
f = lib.ismember_int64
elif is_int64_dtype(self):
@@ -2541,7 +2566,7 @@ def first_valid_index(self):
if len(self) == 0:
return None
- mask = isnull(self.values)
+ mask = isnull(self._values)
i = mask.argmin()
if mask[i]:
return None
@@ -2555,7 +2580,7 @@ def last_valid_index(self):
if len(self) == 0:
return None
- mask = isnull(self.values[::-1])
+ mask = isnull(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
@@ -2587,7 +2612,7 @@ def asof(self, where):
if isinstance(where, compat.string_types):
where = datetools.to_datetime(where)
- values = self.values
+ values = self._values
if not hasattr(where, '__iter__'):
start = self.index[0]
@@ -2627,7 +2652,7 @@ def to_timestamp(self, freq=None, how='start', copy=True):
-------
ts : TimeSeries with DatetimeIndex
"""
- new_values = self.values
+ new_values = self._values
if copy:
new_values = new_values.copy()
@@ -2648,7 +2673,7 @@ def to_period(self, freq=None, copy=True):
-------
ts : TimeSeries with PeriodIndex
"""
- new_values = self.values
+ new_values = self._values
if copy:
new_values = new_values.copy()
@@ -2672,7 +2697,7 @@ def _make_dt_accessor(self):
# Categorical methods
def _make_cat_accessor(self):
- if not com.is_categorical_dtype(self.dtype):
+ if not is_categorical_dtype(self.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(self.values, self.index)
@@ -2713,6 +2738,9 @@ def remove_na(series):
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass thru a non-Index """
+ if index is None:
+ return data
+
if len(data) != len(index):
raise ValueError('Length of values does not match length of '
'index')
@@ -2754,10 +2782,11 @@ def _try_cast(arr, take_fast_path):
return arr
try:
- arr = _possibly_cast_to_datetime(arr, dtype)
- subarr = np.array(arr, dtype=dtype, copy=copy)
+ subarr = _possibly_cast_to_datetime(arr, dtype)
+ if not is_internal_type(subarr):
+ subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
- if com.is_categorical_dtype(dtype):
+ if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
@@ -2778,15 +2807,7 @@ def _try_cast(arr, take_fast_path):
elif copy:
subarr = data.copy()
else:
- if (com.is_datetime64_dtype(data.dtype) and
- not com.is_datetime64_dtype(dtype)):
- if dtype == object:
- ints = np.asarray(data).view('i8')
- subarr = tslib.ints_to_pydatetime(ints)
- elif raise_cast_failure:
- raise TypeError('Cannot cast datetime64 to %s' % dtype)
- else:
- subarr = _try_cast(data, True)
+ subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path them)
@@ -2823,6 +2844,19 @@ def _try_cast(arr, take_fast_path):
else:
subarr = _try_cast(data, False)
+ def create_from_value(value, index, dtype):
+ # return a new empty value suitable for the dtype
+
+ if is_datetimetz(dtype):
+ subarr = DatetimeIndex([value]*len(index))
+ else:
+ if not isinstance(dtype, (np.dtype, type(np.dtype))):
+ dtype = dtype.dtype
+ subarr = np.empty(len(index), dtype=dtype)
+ subarr.fill(value)
+
+ return subarr
+
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
@@ -2837,8 +2871,7 @@ def _try_cast(arr, take_fast_path):
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
- subarr = np.empty(len(index), dtype=dtype)
- subarr.fill(value)
+ subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
@@ -2849,9 +2882,7 @@ def _try_cast(arr, take_fast_path):
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
- value = subarr[0]
- subarr = np.empty(len(index), dtype=subarr.dtype)
- subarr.fill(value)
+ subarr = create_from_value(subarr[0], index, subarr)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b5a3577b36d4c..5fdc0ce86a0b4 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1790,6 +1790,8 @@ def set_atom(self, block, block_items, existing_col, min_itemsize,
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items, info=info)
+ elif block.is_datetimetz:
+ return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
@@ -1804,50 +1806,14 @@ def set_atom(self, block, block_items, existing_col, min_itemsize,
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
- rvalues = block.values.ravel()
- if getattr(rvalues[0], 'tzinfo', None) is not None:
+ # after 8260
+ # this only would be hit for a mutli-timezone dtype
+ # which is an error
- # if this block has more than one timezone, raise
- try:
- # pytz timezones: compare on zone name (to avoid issues with DST being a different zone to STD).
- zones = [r.tzinfo.zone for r in rvalues]
- except:
- # dateutil timezones: compare on ==
- zones = [r.tzinfo for r in rvalues]
- if any(zones[0] != zone_i for zone_i in zones[1:]):
- raise TypeError(
- "too many timezones in this block, create separate "
- "data columns"
- )
- else:
- if len(set(zones)) != 1:
- raise TypeError(
- "too many timezones in this block, create separate "
- "data columns"
- )
-
- # convert this column to datetime64[ns] utc, and save the tz
- index = DatetimeIndex(rvalues)
- tz = getattr(index, 'tz', None)
- if tz is None:
- raise TypeError(
- "invalid timezone specification")
-
- values = index.tz_convert('UTC').values.view('i8')
-
- # store a converted timezone
- zone = tslib.get_timezone(index.tz)
- if zone is None:
- zone = tslib.tot_seconds(index.tz.utcoffset())
- self.tz = zone
-
- self.update_info(info)
- self.set_atom_datetime64(
- block, values.reshape(block.values.shape))
-
- else:
- raise TypeError(
- "[datetime] is not implemented as a table column")
+ raise TypeError(
+ "too many timezones in this block, create separate "
+ "data columns"
+ )
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
@@ -1976,6 +1942,25 @@ def set_atom_datetime64(self, block, values=None):
values = block.values.view('i8')
self.set_data(values, 'datetime64')
+ def set_atom_datetime64tz(self, block, info, values=None):
+
+ if values is None:
+ values = block.values
+
+ # convert this column to datetime64[ns] utc, and save the tz
+ values = values.tz_convert('UTC').values.view('i8').reshape(block.shape)
+
+ # store a converted timezone
+ zone = tslib.get_timezone(block.values.tz)
+ if zone is None:
+ zone = tslib.tot_seconds(block.values.tz.utcoffset())
+ self.tz = zone
+ self.update_info(info)
+
+ self.kind = 'datetime64'
+ self.typ = self.get_atom_datetime64(block)
+ self.set_data(values, 'datetime64')
+
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
@@ -2037,9 +2022,8 @@ def convert(self, values, nan_rep, encoding):
# we stored as utc, so just set the tz
index = DatetimeIndex(
- self.data.ravel(), tz='UTC').tz_convert(self.tz)
- self.data = np.asarray(
- index.tolist(), dtype=object).reshape(self.data.shape)
+ self.data.ravel(), tz='UTC').tz_convert(tslib.maybe_get_tz(self.tz))
+ self.data = index
else:
self.data = np.asarray(self.data, dtype='M8[ns]')
@@ -4048,7 +4032,7 @@ def read(self, where=None, columns=None, **kwargs):
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
- if values.ndim == 1:
+ if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
diff --git a/pandas/io/tests/data/legacy_hdf/datetimetz_object.h5 b/pandas/io/tests/data/legacy_hdf/datetimetz_object.h5
new file mode 100644
index 0000000000000..8cb4eda470398
Binary files /dev/null and b/pandas/io/tests/data/legacy_hdf/datetimetz_object.h5 differ
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack
index 000879f4cb2c2..ed606295b0830 100644
Binary files a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack differ
diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_3.4.3.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_3.4.3.msgpack
new file mode 100644
index 0000000000000..7a933b3a96dbb
Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_3.4.3.msgpack differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle
index d45936baa1e00..d279403b3c765 100644
Binary files a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle differ
diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_3.4.3.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_3.4.3.pickle
new file mode 100644
index 0000000000000..e480d5ac21729
Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_3.4.3.pickle differ
diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py
index 0ca5ced1b8d1a..91d0333b3407f 100644
--- a/pandas/io/tests/generate_legacy_storage_files.py
+++ b/pandas/io/tests/generate_legacy_storage_files.py
@@ -83,7 +83,9 @@ def create_data():
index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
names=['one', 'two'])),
dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
- cat=Series(Categorical(['foo', 'bar', 'baz'])))
+ cat=Series(Categorical(['foo', 'bar', 'baz'])),
+ dt=Series(date_range('20130101',periods=5)),
+ dt_tz=Series(date_range('20130101',periods=5,tz='US/Eastern')))
if LooseVersion(pandas.__version__) >= '0.17.0':
series['period'] = Series([Period('2000Q1')] * 5)
@@ -101,7 +103,9 @@ def create_data():
cat_onecol=DataFrame(dict(A=Categorical(['foo', 'bar']))),
cat_and_float=DataFrame(dict(A=Categorical(['foo', 'bar', 'baz']),
B=np.arange(3).astype(np.int64))),
- mixed_dup=mixed_dup_df)
+ mixed_dup=mixed_dup_df,
+ dt_mixed_tzs=DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5)),
+ )
mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int']))
mixed_dup_panel.items = ['ItemA', 'ItemA']
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 1267821086d61..894b699281c80 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -45,7 +45,6 @@ def check_arbitrary(a, b):
else:
assert(a == b)
-
class TestPackers(tm.TestCase):
def setUp(self):
@@ -575,7 +574,7 @@ def check_min_structure(self, data):
for kind in v:
assert kind in data[typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
- def compare(self, vf):
+ def compare(self, vf, version):
data = read_msgpack(vf)
self.check_min_structure(data)
for typ, dv in data.items():
@@ -586,17 +585,42 @@ def compare(self, vf):
expected = self.data[typ][dt]
except KeyError:
continue
- check_arbitrary(result, expected)
+
+ # use a specific comparator
+ # if available
+ comparator = getattr(self,"compare_{typ}_{dt}".format(typ=typ,dt=dt), None)
+ if comparator is not None:
+ comparator(result, expected, typ, version)
+ else:
+ check_arbitrary(result, expected)
return data
+ def compare_series_dt_tz(self, result, expected, typ, version):
+ # 8260
+ # dtype is object < 0.17.0
+ if LooseVersion(version) < '0.17.0':
+ expected = expected.astype(object)
+ tm.assert_series_equal(result, expected)
+ else:
+ tm.assert_series_equal(result, expected)
+
+ def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
+ # 8260
+ # dtype is object < 0.17.0
+ if LooseVersion(version) < '0.17.0':
+ expected = expected.astype(object)
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_frame_equal(result, expected)
+
def read_msgpacks(self, version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
- self.compare(vf)
+ self.compare(vf, version)
n += 1
assert n > 0, 'Msgpack files are not tested'
diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py
index 1ade6ac0f8068..2a4e429e28580 100644
--- a/pandas/io/tests/test_pickle.py
+++ b/pandas/io/tests/test_pickle.py
@@ -8,6 +8,8 @@
import nose
import os
+from distutils.version import LooseVersion
+
import numpy as np
import pandas.util.testing as tm
import pandas as pd
@@ -41,7 +43,7 @@ def setUp(self):
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
- def compare_element(self, typ, result, expected):
+ def compare_element(self, result, expected, typ, version=None):
if isinstance(expected,Index):
tm.assert_index_equal(expected, result)
return
@@ -53,7 +55,7 @@ def compare_element(self, typ, result, expected):
comparator = getattr(tm,"assert_%s_equal" % typ,tm.assert_almost_equal)
comparator(result,expected)
- def compare(self, vf):
+ def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
@@ -72,9 +74,30 @@ def compare(self, vf):
except (KeyError):
continue
- self.compare_element(typ, result, expected)
+ # use a specific comparator
+ # if available
+ comparator = getattr(self,"compare_{typ}_{dt}".format(typ=typ,dt=dt), self.compare_element)
+ comparator(result, expected, typ, version)
return data
+ def compare_series_dt_tz(self, result, expected, typ, version):
+ # 8260
+ # dtype is object < 0.17.0
+ if LooseVersion(version) < '0.17.0':
+ expected = expected.astype(object)
+ tm.assert_series_equal(result, expected)
+ else:
+ tm.assert_series_equal(result, expected)
+
+ def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
+ # 8260
+ # dtype is object < 0.17.0
+ if LooseVersion(version) < '0.17.0':
+ expected = expected.astype(object)
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_frame_equal(result, expected)
+
def read_pickles(self, version):
if not is_little_endian():
raise nose.SkipTest("known failure on non-little endian")
@@ -83,7 +106,7 @@ def read_pickles(self, version):
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
- data = self.compare(vf)
+ data = self.compare(vf, version)
if data is None:
continue
@@ -150,14 +173,14 @@ def python_unpickler(path):
# test reading with each unpickler
result = pd.read_pickle(path)
- self.compare_element(typ, result, expected)
+ self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
- self.compare_element(typ, result, expected)
+ self.compare_element(result, expected, typ)
result = python_unpickler(path)
- self.compare_element(typ, result, expected)
+ self.compare_element(result, expected, typ)
def _validate_timeseries(self, pickled, current):
# GH 7748
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index b8536eeaddebb..5eef48c51d070 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -161,7 +161,7 @@ def tearDown(self):
pass
-class TestHDFStore(Base):
+class TestHDFStore(Base, tm.TestCase):
def test_factory_fun(self):
path = create_tempfile(self.path)
@@ -1980,73 +1980,6 @@ def test_unimplemented_dtypes_table_columns(self):
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
- def test_append_with_timezones_pytz(self):
-
- from datetime import timedelta
-
- def compare(a,b):
- tm.assert_frame_equal(a,b)
-
- # compare the zones on each element
- for c in a.columns:
- for i in a.index:
- a_e = a[c][i]
- b_e = b[c][i]
- if not (a_e == b_e and a_e.tz == b_e.tz):
- raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
-
- # as columns
- with ensure_clean_store(self.path) as store:
-
- _maybe_remove(store, 'df_tz')
- df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
- store.append('df_tz',df,data_columns=['A'])
- result = store['df_tz']
- compare(result,df)
- assert_frame_equal(result,df)
-
- # select with tz aware
- compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
-
- _maybe_remove(store, 'df_tz')
- # ensure we include dates in DST and STD time here.
- df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
- store.append('df_tz',df)
- result = store['df_tz']
- compare(result,df)
- assert_frame_equal(result,df)
-
- _maybe_remove(store, 'df_tz')
- df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
- self.assertRaises(TypeError, store.append, 'df_tz', df)
-
- # this is ok
- _maybe_remove(store, 'df_tz')
- store.append('df_tz',df,data_columns=['A','B'])
- result = store['df_tz']
- compare(result,df)
- assert_frame_equal(result,df)
-
- # can't append with diff timezone
- df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
- self.assertRaises(ValueError, store.append, 'df_tz', df)
-
- # as index
- with ensure_clean_store(self.path) as store:
-
- # GH 4098 example
- df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
-
- _maybe_remove(store, 'df')
- store.put('df',df)
- result = store.select('df')
- assert_frame_equal(result,df)
-
- _maybe_remove(store, 'df')
- store.append('df',df)
- result = store.select('df')
- assert_frame_equal(result,df)
-
def test_calendar_roundtrip_issue(self):
# 8591
@@ -2069,128 +2002,6 @@ def test_calendar_roundtrip_issue(self):
result = store.select('table')
assert_series_equal(result, s)
- def test_append_with_timezones_dateutil(self):
-
- from datetime import timedelta
- tm._skip_if_no_dateutil()
-
- # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
- from pandas.tslib import maybe_get_tz
- gettz = lambda x: maybe_get_tz('dateutil/' + x)
-
- def compare(a, b):
- tm.assert_frame_equal(a, b)
-
- # compare the zones on each element
- for c in a.columns:
- for i in a.index:
- a_e = a[c][i]
- b_e = b[c][i]
- if not (a_e == b_e and a_e.tz == b_e.tz):
- raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
-
- # as columns
- with ensure_clean_store(self.path) as store:
-
- _maybe_remove(store, 'df_tz')
- df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
- store.append('df_tz', df, data_columns=['A'])
- result = store['df_tz']
- compare(result, df)
- assert_frame_equal(result, df)
-
- # select with tz aware
- compare(store.select('df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
-
- _maybe_remove(store, 'df_tz')
- # ensure we include dates in DST and STD time here.
- df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
- store.append('df_tz', df)
- result = store['df_tz']
- compare(result, df)
- assert_frame_equal(result, df)
-
- _maybe_remove(store, 'df_tz')
- df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
- self.assertRaises(TypeError, store.append, 'df_tz', df)
-
- # this is ok
- _maybe_remove(store, 'df_tz')
- store.append('df_tz', df, data_columns=['A', 'B'])
- result = store['df_tz']
- compare(result, df)
- assert_frame_equal(result, df)
-
- # can't append with diff timezone
- df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
- self.assertRaises(ValueError, store.append, 'df_tz', df)
-
- # as index
- with ensure_clean_store(self.path) as store:
-
- # GH 4098 example
- df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
-
- _maybe_remove(store, 'df')
- store.put('df', df)
- result = store.select('df')
- assert_frame_equal(result, df)
-
- _maybe_remove(store, 'df')
- store.append('df', df)
- result = store.select('df')
- assert_frame_equal(result, df)
-
- def test_store_timezone(self):
- # GH2852
- # issue storing datetime.date with a timezone as it resets when read back in a new timezone
-
- # timezone setting not supported on windows
- tm._skip_if_windows()
-
- import datetime
- import time
- import os
-
- # original method
- with ensure_clean_store(self.path) as store:
-
- today = datetime.date(2013,9,10)
- df = DataFrame([1,2,3], index = [today, today, today])
- store['obj1'] = df
- result = store['obj1']
- assert_frame_equal(result, df)
-
- # with tz setting
- orig_tz = os.environ.get('TZ')
-
- def setTZ(tz):
- if tz is None:
- try:
- del os.environ['TZ']
- except:
- pass
- else:
- os.environ['TZ']=tz
- time.tzset()
-
- try:
-
- with ensure_clean_store(self.path) as store:
-
- setTZ('EST5EDT')
- today = datetime.date(2013,9,10)
- df = DataFrame([1,2,3], index = [today, today, today])
- store['obj1'] = df
-
- setTZ('CST6CDT')
- result = store['obj1']
-
- assert_frame_equal(result, df)
-
- finally:
- setTZ(orig_tz)
-
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
@@ -2875,26 +2686,6 @@ def test_can_serialize_dates(self):
self._check_roundtrip(frame, tm.assert_frame_equal)
- def test_timezones(self):
- rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
- frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
- with ensure_clean_store(self.path) as store:
- store['frame'] = frame
- recons = store['frame']
- self.assertTrue(recons.index.equals(rng))
- self.assertEqual(rng.tz, recons.index.tz)
-
- def test_fixed_offset_tz(self):
- rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
- frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
- with ensure_clean_store(self.path) as store:
- store['frame'] = frame
- recons = store['frame']
- self.assertTrue(recons.index.equals(rng))
- self.assertEqual(rng.tz, recons.index.tz)
-
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
@@ -4294,35 +4085,25 @@ def f():
def test_pytables_native_read(self):
- try:
- store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/pytables_native.h5'), mode='r') as store:
d2 = store['detector/readout']
- assert isinstance(d2, DataFrame)
- finally:
- safe_close(store)
+ self.assertIsInstance(d2, DataFrame)
- try:
- store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/pytables_native2.h5'), mode='r') as store:
str(store)
d1 = store['detector']
- assert isinstance(d1, DataFrame)
- finally:
- safe_close(store)
+ self.assertIsInstance(d1, DataFrame)
def test_legacy_read(self):
- try:
- store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy.h5'), mode='r') as store:
store['a']
store['b']
store['c']
store['d']
- finally:
- safe_close(store)
def test_legacy_table_read(self):
# legacy table types
- try:
- store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy_table.h5'), mode='r') as store:
store.select('df1')
store.select('df2')
store.select('wp1')
@@ -4340,24 +4121,17 @@ def test_legacy_table_read(self):
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
- finally:
- safe_close(store)
-
def test_legacy_0_10_read(self):
# legacy from 0.10
- try:
- store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), mode='r') as store:
str(store)
for k in store.keys():
store.select(k)
- finally:
- safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
- try:
- path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
- store = HDFStore(tm.get_data_path(path), 'r')
+ path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
+ with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
str(store)
assert 'df' in store
assert 'df1' in store
@@ -4368,8 +4142,6 @@ def test_legacy_0_11_read(self):
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
- finally:
- safe_close(store)
def test_copy(self):
@@ -4506,38 +4278,6 @@ def test_tseries_indices_frame(self):
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
- def test_tseries_select_index_column(self):
- # GH7777
- # selecting a UTC datetimeindex column did
- # not preserve UTC tzinfo set before storing
-
- # check that no tz still works
- rng = date_range('1/1/2000', '1/30/2000')
- frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
- with ensure_clean_store(self.path) as store:
- store.append('frame', frame)
- result = store.select_column('frame', 'index')
- self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
-
- # check utc
- rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
- frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
- with ensure_clean_store(self.path) as store:
- store.append('frame', frame)
- result = store.select_column('frame', 'index')
- self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
-
- # double check non-utc
- rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
- frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
- with ensure_clean_store(self.path) as store:
- store.append('frame', frame)
- result = store.select_column('frame', 'index')
- self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
-
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
@@ -4969,6 +4709,249 @@ def test_complex_append(self):
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
+class TestTimezones(Base, tm.TestCase):
+
+
+ def _compare_with_tz(self, a, b):
+ tm.assert_frame_equal(a, b)
+
+ # compare the zones on each element
+ for c in a.columns:
+ for i in a.index:
+ a_e = a.loc[i,c]
+ b_e = b.loc[i,c]
+ if not (a_e == b_e and a_e.tz == b_e.tz):
+ raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
+
+ def test_append_with_timezones_dateutil(self):
+
+ from datetime import timedelta
+ tm._skip_if_no_dateutil()
+
+ # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
+ from pandas.tslib import maybe_get_tz
+ gettz = lambda x: maybe_get_tz('dateutil/' + x)
+
+ # as columns
+ with ensure_clean_store(self.path) as store:
+
+ _maybe_remove(store, 'df_tz')
+ df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
+
+ store.append('df_tz', df, data_columns=['A'])
+ result = store['df_tz']
+ self._compare_with_tz(result, df)
+ assert_frame_equal(result, df)
+
+ # select with tz aware
+ expected = df[df.A >= df.A[3]]
+ result = store.select('df_tz', where=Term('A>=df.A[3]'))
+ self._compare_with_tz(result, expected)
+
+ # ensure we include dates in DST and STD time here.
+ _maybe_remove(store, 'df_tz')
+ df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
+ store.append('df_tz', df)
+ result = store['df_tz']
+ self._compare_with_tz(result, df)
+ assert_frame_equal(result, df)
+
+ df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
+ self.assertRaises(ValueError, store.append, 'df_tz', df)
+
+ # this is ok
+ _maybe_remove(store, 'df_tz')
+ store.append('df_tz', df, data_columns=['A', 'B'])
+ result = store['df_tz']
+ self._compare_with_tz(result, df)
+ assert_frame_equal(result, df)
+
+ # can't append with diff timezone
+ df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
+ self.assertRaises(ValueError, store.append, 'df_tz', df)
+
+ # as index
+ with ensure_clean_store(self.path) as store:
+
+ # GH 4098 example
+ df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
+
+ _maybe_remove(store, 'df')
+ store.put('df', df)
+ result = store.select('df')
+ assert_frame_equal(result, df)
+
+ _maybe_remove(store, 'df')
+ store.append('df', df)
+ result = store.select('df')
+ assert_frame_equal(result, df)
+
+ def test_append_with_timezones_pytz(self):
+
+ from datetime import timedelta
+
+ # as columns
+ with ensure_clean_store(self.path) as store:
+
+ _maybe_remove(store, 'df_tz')
+ df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
+ store.append('df_tz',df,data_columns=['A'])
+ result = store['df_tz']
+ self._compare_with_tz(result,df)
+ assert_frame_equal(result,df)
+
+ # select with tz aware
+ self._compare_with_tz(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
+
+ _maybe_remove(store, 'df_tz')
+ # ensure we include dates in DST and STD time here.
+ df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
+ store.append('df_tz',df)
+ result = store['df_tz']
+ self._compare_with_tz(result,df)
+ assert_frame_equal(result,df)
+
+ df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
+ self.assertRaises(ValueError, store.append, 'df_tz', df)
+
+ # this is ok
+ _maybe_remove(store, 'df_tz')
+ store.append('df_tz',df,data_columns=['A','B'])
+ result = store['df_tz']
+ self._compare_with_tz(result,df)
+ assert_frame_equal(result,df)
+
+ # can't append with diff timezone
+ df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
+ self.assertRaises(ValueError, store.append, 'df_tz', df)
+
+ # as index
+ with ensure_clean_store(self.path) as store:
+
+ # GH 4098 example
+ df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
+
+ _maybe_remove(store, 'df')
+ store.put('df',df)
+ result = store.select('df')
+ assert_frame_equal(result,df)
+
+ _maybe_remove(store, 'df')
+ store.append('df',df)
+ result = store.select('df')
+ assert_frame_equal(result,df)
+
+ def test_tseries_select_index_column(self):
+ # GH7777
+ # selecting a UTC datetimeindex column did
+ # not preserve UTC tzinfo set before storing
+
+ # check that no tz still works
+ rng = date_range('1/1/2000', '1/30/2000')
+ frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
+
+ with ensure_clean_store(self.path) as store:
+ store.append('frame', frame)
+ result = store.select_column('frame', 'index')
+ self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
+
+ # check utc
+ rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
+ frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
+
+ with ensure_clean_store(self.path) as store:
+ store.append('frame', frame)
+ result = store.select_column('frame', 'index')
+ self.assertEqual(rng.tz, result.dt.tz)
+
+ # double check non-utc
+ rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
+ frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
+
+ with ensure_clean_store(self.path) as store:
+ store.append('frame', frame)
+ result = store.select_column('frame', 'index')
+ self.assertEqual(rng.tz, result.dt.tz)
+
+ def test_timezones(self):
+ rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
+ frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
+
+ with ensure_clean_store(self.path) as store:
+ store['frame'] = frame
+ recons = store['frame']
+ self.assertTrue(recons.index.equals(rng))
+ self.assertEqual(rng.tz, recons.index.tz)
+
+ def test_fixed_offset_tz(self):
+ rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
+ frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
+
+ with ensure_clean_store(self.path) as store:
+ store['frame'] = frame
+ recons = store['frame']
+ self.assertTrue(recons.index.equals(rng))
+ self.assertEqual(rng.tz, recons.index.tz)
+
+ def test_store_timezone(self):
+ # GH2852
+ # issue storing datetime.date with a timezone as it resets when read back in a new timezone
+
+ import platform
+ if platform.system() == "Windows":
+ raise nose.SkipTest("timezone setting not supported on windows")
+
+ import datetime
+ import time
+ import os
+
+ # original method
+ with ensure_clean_store(self.path) as store:
+
+ today = datetime.date(2013,9,10)
+ df = DataFrame([1,2,3], index = [today, today, today])
+ store['obj1'] = df
+ result = store['obj1']
+ assert_frame_equal(result, df)
+
+ # with tz setting
+ orig_tz = os.environ.get('TZ')
+
+ def setTZ(tz):
+ if tz is None:
+ try:
+ del os.environ['TZ']
+ except:
+ pass
+ else:
+ os.environ['TZ']=tz
+ time.tzset()
+
+ try:
+
+ with ensure_clean_store(self.path) as store:
+
+ setTZ('EST5EDT')
+ today = datetime.date(2013,9,10)
+ df = DataFrame([1,2,3], index = [today, today, today])
+ store['obj1'] = df
+
+ setTZ('CST6CDT')
+ result = store['obj1']
+
+ assert_frame_equal(result, df)
+
+ finally:
+ setTZ(orig_tz)
+
+ def test_legacy_datetimetz_object(self):
+ # legacy from < 0.17.0
+ # 8260
+ expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))
+ with ensure_clean_store(tm.get_data_path('legacy_hdf/datetimetz_object.h5'), mode='r') as store:
+ result = store['df']
+ assert_frame_equal(result, expected)
+
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 619de8d6bad3b..d61c5f0740a91 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -1718,12 +1718,14 @@ def test_schema_support(self):
tm.assert_frame_equal(res1, res2)
def test_datetime_with_time_zone(self):
+
# Test to see if we read the date column with timezones that
# the timezone information is converted to utc and into a
# np.datetime64 (GH #7139)
+
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64),
- "DateColWithTz loaded with incorrect type")
+ "DateColWithTz loaded with incorrect type -> {0}".format(df.DateColWithTz.dtype))
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00'))
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 07f0c89535a77..75b25c7a81458 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -1732,7 +1732,7 @@ cdef class BlockPlacement:
self._as_array = arr
self._has_array = True
- def __unicode__(self):
+ def __str__(self):
cdef slice s = self._ensure_has_slice()
if s is not None:
v = self._as_slice
@@ -1741,6 +1741,8 @@ cdef class BlockPlacement:
return '%s(%r)' % (self.__class__.__name__, v)
+ __repr__ = __str__
+
def __len__(self):
cdef slice s = self._ensure_has_slice()
if s is not None:
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 83278fe12d641..f1799eb99f720 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -115,7 +115,7 @@ def __init__(self, data=None, index=None, columns=None,
index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
- mgr = df_to_manager(data, columns, index)
+ mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
@@ -181,7 +181,7 @@ def _init_dict(self, data, index, columns, dtype=None):
if c not in sdict:
sdict[c] = sp_maker(nan_vec)
- return df_to_manager(sdict, columns, index)
+ return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
data = _prep_ndarray(data, copy=False)
@@ -233,7 +233,7 @@ def _unpickle_sparse_frame_compat(self, state):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
- self._data = df_to_manager(series_dict, columns, index)
+ self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
@@ -737,7 +737,7 @@ def applymap(self, func):
"""
return self.apply(lambda x: lmap(func, x))
-def df_to_manager(sdf, columns, index):
+def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series, columns, index """
# from BlockManager perspective
diff --git a/pandas/sparse/scipy_sparse.py b/pandas/sparse/scipy_sparse.py
index da079a97873b8..a815ca7545561 100644
--- a/pandas/sparse/scipy_sparse.py
+++ b/pandas/sparse/scipy_sparse.py
@@ -30,7 +30,7 @@ def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
- values = ss._data.values._valid_sp_values
+ values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 420cf509395ce..96d509ed9b7c1 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -219,15 +219,15 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block',
@property
def values(self):
""" return the array """
- return self._data._values
+ return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
- return self._data._values
+ return self.block.values
def get_values(self):
""" same as values """
- return self._data._values.to_dense().view()
+ return self.block.to_dense().view()
@property
def block(self):
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 9ee5a753af567..74bd437373c19 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -73,13 +73,11 @@ except AttributeError:
cdef _try_infer_map(v):
""" if its in our map, just return the dtype """
cdef:
- object val_name, val_kind
- val_name = v.dtype.name
- if val_name in _TYPE_MAP:
- return _TYPE_MAP[val_name]
- val_kind = v.dtype.kind
- if val_kind in _TYPE_MAP:
- return _TYPE_MAP[val_kind]
+ object attr, val
+ for attr in ['name','kind','base']:
+ val = getattr(v.dtype,attr)
+ if val in _TYPE_MAP:
+ return _TYPE_MAP[val]
return None
def infer_dtype(object _values):
@@ -99,7 +97,7 @@ def infer_dtype(object _values):
# this will handle ndarray-like
# e.g. categoricals
try:
- values = getattr(_values, 'values', _values)
+ values = getattr(_values, '_values', getattr(_values, 'values', _values))
except:
val = _try_infer_map(_values)
if val is not None:
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 7e8905f4fc0c9..fb255f300ebdd 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, PandasDelegate
+import pandas.core.common as com
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
@@ -315,9 +316,10 @@ def test_ndarray_compat_properties(self):
for o in self.objs:
# check that we work
- for p in ['shape', 'dtype', 'base', 'flags', 'T',
+ for p in ['shape', 'dtype', 'flags', 'T',
'strides', 'itemsize', 'nbytes']:
self.assertIsNotNone(getattr(o, p, None))
+ self.assertTrue(hasattr(o, 'base'))
# if we have a datetimelike dtype then needs a view to work
# but the user is responsible for that
@@ -401,22 +403,35 @@ def test_value_counts_unique_nunique(self):
# freq must be specified because repeat makes freq ambiguous
# resets name from Index
- expected_index = pd.Index(o[::-1], name=None)
+ expected_index = pd.Index(o[::-1])
# attach name to klass
- o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq, name='a')
+ o = o.repeat(range(1, len(o) + 1))
+ o.name = 'a'
+
+ elif isinstance(o, DatetimeIndex):
+
+ # resets name from Index
+ expected_index = pd.Index(o[::-1])
+
+ # attach name to klass
+ o = o.repeat(range(1, len(o) + 1))
+ o.name = 'a'
+
# don't test boolean
elif isinstance(o,Index) and o.is_boolean():
continue
elif isinstance(o, Index):
- expected_index = pd.Index(values[::-1], name=None)
- o = klass(np.repeat(values, range(1, len(o) + 1)), name='a')
+ expected_index = pd.Index(values[::-1])
+ o = o.repeat(range(1, len(o) + 1))
+ o.name = 'a'
else:
- expected_index = pd.Index(values[::-1], name=None)
- idx = np.repeat(o.index.values, range(1, len(o) + 1))
+ expected_index = pd.Index(values[::-1])
+ idx = o.index.repeat(range(1, len(o) + 1))
o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx, name='a')
expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64', name='a')
+
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
self.assertTrue(result.index.name is None)
@@ -447,7 +462,16 @@ def test_value_counts_unique_nunique(self):
continue
# special assign to the numpy array
- if o.values.dtype == 'datetime64[ns]' or isinstance(o, PeriodIndex):
+ if com.is_datetimetz(o):
+ if isinstance(o, DatetimeIndex):
+ v = o.asi8
+ v[0:2] = pd.tslib.iNaT
+ values = o._shallow_copy(v)
+ else:
+ o = o.copy()
+ o[0:2] = pd.tslib.iNaT
+ values = o.values
+ elif o.values.dtype == 'datetime64[ns]' or isinstance(o, PeriodIndex):
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
@@ -558,17 +582,19 @@ def test_value_counts_inferred(self):
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
+ # don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG',
'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
- s = klass(df['dt'].copy(), name='dt')
+ s = klass(df['dt'].copy())
+ s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z',
'2009-01-01 00:00:00X'])
- expected_s = Series([3, 2, 1], index=idx, name='dt')
+ expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z',
@@ -583,7 +609,7 @@ def test_value_counts_inferred(self):
# with NaT
s = df['dt'].copy()
- s = klass([v for v in s.values] + [pd.NaT], name='dt')
+ s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
@@ -595,6 +621,7 @@ def test_value_counts_inferred(self):
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
+
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)
@@ -753,7 +780,7 @@ def test_duplicated_drop_duplicates(self):
self.assertFalse(result is original)
idx = original.index[list(range(len(original))) + [5, 3]]
- values = original.values[list(range(len(original))) + [5, 3]]
+ values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index f687ecbef35cb..9173c0a87f6c2 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1284,24 +1284,6 @@ def setUp(self):
def test_dtypes(self):
- dtype = com.CategoricalDtype()
- hash(dtype)
- self.assertTrue(com.is_categorical_dtype(dtype))
-
- s = Series(self.factor,name='A')
-
- # dtypes
- self.assertTrue(com.is_categorical_dtype(s.dtype))
- self.assertTrue(com.is_categorical_dtype(s))
- self.assertFalse(com.is_categorical_dtype(np.dtype('float64')))
-
- # np.dtype doesn't know about our new dtype
- def f():
- np.dtype(dtype)
- self.assertRaises(TypeError, f)
-
- self.assertFalse(dtype == np.str_)
- self.assertFalse(np.str_ == dtype)
# GH8143
index = ['cat','obj','num']
@@ -1830,16 +1812,14 @@ def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
- 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
- 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
@@ -1859,16 +1839,16 @@ def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
@@ -2048,9 +2028,9 @@ def test_categorical_series_repr_datetime(self):
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
- 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
@@ -2074,9 +2054,9 @@ def test_categorical_series_repr_datetime_ordered(self):
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]"""
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
diff --git a/pandas/tests/test_dtypes.py b/pandas/tests/test_dtypes.py
new file mode 100644
index 0000000000000..54a49de582e56
--- /dev/null
+++ b/pandas/tests/test_dtypes.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+
+import nose
+import numpy as np
+from pandas import Series, Categorical, date_range
+import pandas.core.common as com
+from pandas.core.common import (CategoricalDtype, is_categorical_dtype, is_categorical,
+ DatetimeTZDtype, is_datetime64tz_dtype, is_datetimetz,
+ is_dtype_equal, is_datetime64_ns_dtype, is_datetime64_dtype)
+import pandas.util.testing as tm
+
+_multiprocess_can_split_ = True
+
+class Base(object):
+
+ def test_hash(self):
+ hash(self.dtype)
+
+ def test_equality_invalid(self):
+ self.assertRaises(self.dtype == 'foo')
+
+ def test_numpy_informed(self):
+
+ # np.dtype doesn't know about our new dtype
+ def f():
+ np.dtype(self.dtype)
+ self.assertRaises(TypeError, f)
+
+ self.assertNotEqual(self.dtype, np.str_)
+ self.assertNotEqual(np.str_, self.dtype)
+
+ def test_pickle(self):
+ result = self.round_trip_pickle(self.dtype)
+ self.assertEqual(result, self.dtype)
+
+class TestCategoricalDtype(Base, tm.TestCase):
+
+ def setUp(self):
+ self.dtype = CategoricalDtype()
+
+ def test_equality(self):
+ self.assertTrue(is_dtype_equal(self.dtype, 'category'))
+ self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
+ self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
+
+ def test_construction_from_string(self):
+ result = CategoricalDtype.construct_from_string('category')
+ self.assertTrue(is_dtype_equal(self.dtype, result))
+ self.assertRaises(TypeError, lambda : CategoricalDtype.construct_from_string('foo'))
+
+ def test_is_dtype(self):
+ self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
+ self.assertTrue(CategoricalDtype.is_dtype('category'))
+ self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
+ self.assertFalse(CategoricalDtype.is_dtype('foo'))
+ self.assertFalse(CategoricalDtype.is_dtype(np.float64))
+
+ def test_basic(self):
+
+ self.assertTrue(is_categorical_dtype(self.dtype))
+
+ factor = Categorical.from_array(['a', 'b', 'b', 'a',
+ 'a', 'c', 'c', 'c'])
+
+ s = Series(factor,name='A')
+
+ # dtypes
+ self.assertTrue(is_categorical_dtype(s.dtype))
+ self.assertTrue(is_categorical_dtype(s))
+ self.assertFalse(is_categorical_dtype(np.dtype('float64')))
+
+ self.assertTrue(is_categorical(s.dtype))
+ self.assertTrue(is_categorical(s))
+ self.assertFalse(is_categorical(np.dtype('float64')))
+ self.assertFalse(is_categorical(1.0))
+
+class TestDatetimeTZDtype(Base, tm.TestCase):
+
+ def setUp(self):
+ self.dtype = DatetimeTZDtype('ns','US/Eastern')
+
+ def test_construction(self):
+ self.assertRaises(ValueError, lambda : DatetimeTZDtype('ms','US/Eastern'))
+
+ def test_subclass(self):
+ a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
+ b = DatetimeTZDtype('datetime64[ns, CET]')
+
+ self.assertTrue(issubclass(type(a), type(a)))
+ self.assertTrue(issubclass(type(a), type(b)))
+
+ def test_compat(self):
+ self.assertFalse(is_datetime64_ns_dtype(self.dtype))
+ self.assertFalse(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
+ self.assertFalse(is_datetime64_dtype(self.dtype))
+ self.assertFalse(is_datetime64_dtype('datetime64[ns, US/Eastern]'))
+
+ def test_construction_from_string(self):
+ result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
+ self.assertTrue(is_dtype_equal(self.dtype, result))
+ result = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
+ self.assertTrue(is_dtype_equal(self.dtype, result))
+ self.assertRaises(TypeError, lambda : DatetimeTZDtype.construct_from_string('foo'))
+
+ def test_is_dtype(self):
+ self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
+ self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
+ self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
+ self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns','US/Pacific')))
+ self.assertFalse(DatetimeTZDtype.is_dtype(np.float64))
+
+ def test_equality(self):
+ self.assertTrue(is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]'))
+ self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype('ns','US/Eastern')))
+ self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
+ self.assertFalse(is_dtype_equal(self.dtype, DatetimeTZDtype('ns','CET')))
+ self.assertFalse(is_dtype_equal(DatetimeTZDtype('ns','US/Eastern'), DatetimeTZDtype('ns','US/Pacific')))
+
+ # numpy compat
+ self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"),"datetime64[ns]"))
+
+ def test_basic(self):
+
+ self.assertTrue(is_datetime64tz_dtype(self.dtype))
+
+ dr = date_range('20130101',periods=3,tz='US/Eastern')
+ s = Series(dr,name='A')
+
+ # dtypes
+ self.assertTrue(is_datetime64tz_dtype(s.dtype))
+ self.assertTrue(is_datetime64tz_dtype(s))
+ self.assertFalse(is_datetime64tz_dtype(np.dtype('float64')))
+ self.assertFalse(is_datetime64tz_dtype(1.0))
+
+ self.assertTrue(is_datetimetz(s))
+ self.assertTrue(is_datetimetz(s.dtype))
+ self.assertFalse(is_datetimetz(np.dtype('float64')))
+ self.assertFalse(is_datetimetz(1.0))
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5ecb9fc66b0ea..24de36d95794c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -35,6 +35,7 @@
MultiIndex, DatetimeIndex, Timestamp, date_range,
read_csv, timedelta_range, Timedelta, CategoricalIndex,
option_context)
+from pandas.core.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
@@ -2254,6 +2255,11 @@ def setUp(self):
self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),
'int32' : np.array([1]*10,dtype='int32'),
}, index=np.arange(10))
+ self.tzframe = DataFrame({'A' : date_range('20130101',periods=3),
+ 'B' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'C' : date_range('20130101',periods=3,tz='CET')})
+ self.tzframe.iloc[1,1] = pd.NaT
+ self.tzframe.iloc[1,2] = pd.NaT
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
@@ -4080,13 +4086,14 @@ def test_constructor_with_datetimes(self):
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
+
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0,0],dt)
- assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
+ assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0,0],dt)
- assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
+ assert_series_equal(df.dtypes,Series({'End Date' : 'datetime64[ns, US/Eastern]' }))
# tz-aware (UTC and other tz's)
# GH 8411
@@ -4118,6 +4125,183 @@ def test_constructor_with_datetimes(self):
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })
assert_frame_equal(df, expected)
+ def test_constructor_with_datetime_tz(self):
+
+ # 8260
+ # support datetime64 with tz
+
+ idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),
+ name='foo')
+ dr = date_range('20130110',periods=3)
+
+ # construction
+ df = DataFrame({'A' : idx, 'B' : dr})
+ self.assertTrue(df['A'].dtype,'M8[ns, US/Eastern')
+ self.assertTrue(df['A'].name == 'A')
+ assert_series_equal(df['A'],Series(idx,name='A'))
+ assert_series_equal(df['B'],Series(dr,name='B'))
+
+ # construction from dict
+ df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))
+ assert_series_equal(df2.dtypes, Series(['datetime64[ns, US/Eastern]', 'datetime64[ns, CET]'], index=['A','B']))
+
+ # dtypes
+ tzframe = DataFrame({'A' : date_range('20130101',periods=3),
+ 'B' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'C' : date_range('20130101',periods=3,tz='CET')})
+ tzframe.iloc[1,1] = pd.NaT
+ tzframe.iloc[1,2] = pd.NaT
+ result = tzframe.dtypes.sort_index()
+ expected = Series([ np.dtype('datetime64[ns]'),
+ DatetimeTZDtype('datetime64[ns, US/Eastern]'),
+ DatetimeTZDtype('datetime64[ns, CET]') ],
+ ['A','B','C'])
+
+ # concat
+ df3 = pd.concat([df2.A.to_frame(),df2.B.to_frame()],axis=1)
+ assert_frame_equal(df2, df3)
+
+ # select_dtypes
+ result = df3.select_dtypes(include=['datetime64[ns]'])
+ expected = df3.reindex(columns=[])
+ assert_frame_equal(result, expected)
+
+ # this will select based on issubclass, and these are the same class
+ result = df3.select_dtypes(include=['datetime64[ns, CET]'])
+ expected = df3
+ assert_frame_equal(result, expected)
+
+ # from index
+ idx2 = date_range('20130101',periods=3,tz='US/Eastern',name='foo')
+ df2 = DataFrame(idx2)
+ assert_series_equal(df2['foo'],Series(idx2,name='foo'))
+ df2 = DataFrame(Series(idx2))
+ assert_series_equal(df2['foo'],Series(idx2,name='foo'))
+
+ idx2 = date_range('20130101',periods=3,tz='US/Eastern')
+ df2 = DataFrame(idx2)
+ assert_series_equal(df2[0],Series(idx2,name=0))
+ df2 = DataFrame(Series(idx2))
+ assert_series_equal(df2[0],Series(idx2,name=0))
+
+ # interleave with object
+ result = self.tzframe.assign(D = 'foo').values
+ expected = np.array([[Timestamp('2013-01-01 00:00:00'),
+ Timestamp('2013-01-02 00:00:00'),
+ Timestamp('2013-01-03 00:00:00')],
+ [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
+ [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
+ ['foo','foo','foo']], dtype=object).T
+ self.assert_numpy_array_equal(result, expected)
+
+ # interleave with only datetime64[ns]
+ result = self.tzframe.values
+ expected = np.array([[Timestamp('2013-01-01 00:00:00'),
+ Timestamp('2013-01-02 00:00:00'),
+ Timestamp('2013-01-03 00:00:00')],
+ [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
+ [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
+ self.assert_numpy_array_equal(result, expected)
+
+ # astype
+ expected = np.array([[Timestamp('2013-01-01 00:00:00'),
+ Timestamp('2013-01-02 00:00:00'),
+ Timestamp('2013-01-03 00:00:00')],
+ [Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00-0500', tz='US/Eastern')],
+ [Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
+ pd.NaT,
+ Timestamp('2013-01-03 00:00:00+0100', tz='CET')]], dtype=object).T
+ result = self.tzframe.astype(object)
+ assert_frame_equal(result, DataFrame(expected, index=self.tzframe.index, columns=self.tzframe.columns))
+
+ result = self.tzframe.astype('datetime64[ns]')
+ expected = DataFrame({'A' : date_range('20130101',periods=3),
+ 'B' : date_range('20130101',periods=3,tz='US/Eastern').tz_convert('UTC').tz_localize(None),
+ 'C' : date_range('20130101',periods=3,tz='CET').tz_convert('UTC').tz_localize(None)})
+ expected.iloc[1,1] = pd.NaT
+ expected.iloc[1,2] = pd.NaT
+ assert_frame_equal(result, expected)
+
+ # str formatting
+ result = self.tzframe.astype(str)
+ expected = np.array([['2013-01-01', '2013-01-01 00:00:00-05:00',
+ '2013-01-01 00:00:00+01:00'],
+ ['2013-01-02', 'NaT', 'NaT'],
+ ['2013-01-03', '2013-01-03 00:00:00-05:00',
+ '2013-01-03 00:00:00+01:00']], dtype=object)
+ self.assert_numpy_array_equal(result, expected)
+
+ result = str(self.tzframe)
+ self.assertTrue('0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00' in result)
+ self.assertTrue('1 2013-01-02 NaT NaT' in result)
+ self.assertTrue('2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00' in result)
+
+ # setitem
+ df['C'] = idx
+ assert_series_equal(df['C'],Series(idx,name='C'))
+
+ df['D'] = 'foo'
+ df['D'] = idx
+ assert_series_equal(df['D'],Series(idx,name='D'))
+ del df['D']
+
+ # assert that A & C are not sharing the same base (e.g. they
+ # are copies)
+ b1 = df._data.blocks[1]
+ b2 = df._data.blocks[2]
+ self.assertTrue(b1.values.equals(b2.values))
+ self.assertFalse(id(b1.values.values.base) == id(b2.values.values.base))
+
+ # with nan
+ df2 = df.copy()
+ df2.iloc[1,1] = pd.NaT
+ df2.iloc[1,2] = pd.NaT
+ result = df2['B']
+ assert_series_equal(notnull(result), Series([True,False,True],name='B'))
+ assert_series_equal(df2.dtypes, df.dtypes)
+
+ # set/reset
+ df = DataFrame({'A' : [0,1,2] }, index=idx)
+ result = df.reset_index()
+ self.assertTrue(result['foo'].dtype,'M8[ns, US/Eastern')
+
+ result = result.set_index('foo')
+ tm.assert_index_equal(df.index,idx)
+
+ # indexing
+ result = df2.iloc[1]
+ expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
+ index=list('ABC'), dtype='object', name=1)
+ assert_series_equal(result, expected)
+ result = df2.loc[1]
+ expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
+ index=list('ABC'), dtype='object', name=1)
+ assert_series_equal(result, expected)
+
+ # indexing - fast_xs
+ df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
+ result = df.iloc[5]
+ expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', offset='D')
+ self.assertEqual(result, expected)
+
+ result = df.loc[5]
+ self.assertEqual(result, expected)
+
+ # indexing - boolean
+ result = df[df.a > df.a[3]]
+ expected = df.iloc[4:]
+ assert_frame_equal(result, expected)
+
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
@@ -4422,11 +4606,11 @@ def test_astype_str(self):
result = df.astype(tt)
expected = DataFrame({
- 'a' : list(map(tt, a.values)),
- 'b' : list(map(tt, b.values)),
- 'c' : list(map(tt, c.values)),
- 'd' : list(map(tt, d.values)),
- 'e' : list(map(tt, e.values)),
+ 'a' : list(map(tt, map(lambda x: Timestamp(x)._date_repr, a._values))),
+ 'b' : list(map(tt, map(Timestamp, b._values))),
+ 'c' : list(map(tt, map(lambda x: Timedelta(x)._repr_base(format='all'), c._values))),
+ 'd' : list(map(tt, d._values)),
+ 'e' : list(map(tt, e._values)),
})
assert_frame_equal(result, expected)
@@ -4450,6 +4634,10 @@ def test_pickle(self):
unpickled = self.round_trip_pickle(self.empty)
repr(unpickled)
+ # tz frame
+ unpickled = self.round_trip_pickle(self.tzframe)
+ assert_frame_equal(self.tzframe, unpickled)
+
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
@@ -6382,6 +6570,17 @@ def test_to_csv_from_csv(self):
assert_frame_equal(df, result, check_index_type=True)
+ # tz, 8260
+ with ensure_clean(pname) as path:
+
+ self.tzframe.to_csv(path)
+ result = pd.read_csv(path, index_col=0, parse_dates=['A'])
+
+ converter = lambda c: pd.to_datetime(result[c]).dt.tz_localize('UTC').dt.tz_convert(self.tzframe[c].dt.tz)
+ result['B'] = converter('B')
+ result['C'] = converter('C')
+ assert_frame_equal(result, self.tzframe)
+
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
@@ -15211,8 +15410,10 @@ def test_dataframe_metadata(self):
self.assertEqual(df[['X']].testattr, 'XXX')
self.assertEqual(df.loc[['a', 'b'], :].testattr, 'XXX')
self.assertEqual(df.iloc[[0, 1], :].testattr, 'XXX')
+
# GH9776
self.assertEqual(df.iloc[0:1, :].testattr, 'XXX')
+
# GH10553
unpickled = self.round_trip_pickle(df)
assert_frame_equal(df, unpickled)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 9b2c1bf1a09ee..6e7a72360ab67 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -573,7 +573,7 @@ def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
- def test_consruction_list_mixed_tuples(self):
+ def test_construction_list_mixed_tuples(self):
# 10697
# if we are constructing from a mixed list of tuples, make sure that we
# are independent of the sorting order
@@ -2861,9 +2861,7 @@ def test_str(self):
if hasattr(idx,'tz'):
if idx.tz is not None:
- self.assertTrue("tz='%s'" % idx.tz in str(idx))
- else:
- self.assertTrue("tz=None" in str(idx))
+ self.assertTrue(idx.tz in str(idx))
if hasattr(idx,'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
@@ -2891,6 +2889,24 @@ def setUp(self):
def create_index(self):
return date_range('20130101', periods=5)
+ def test_construction_with_alt(self):
+
+ i = pd.date_range('20130101',periods=5,freq='H',tz='US/Eastern')
+ i2 = DatetimeIndex(i, dtype=i.dtype)
+ self.assert_index_equal(i, i2)
+
+ i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
+ self.assert_index_equal(i, i2)
+
+ i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
+ self.assert_index_equal(i, i2)
+
+ i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
+ self.assert_index_equal(i, i2)
+
+ # incompat tz/dtype
+ self.assertRaises(ValueError, lambda : DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
+
def test_pickle_compat_construction(self):
pass
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 7c51641b8e5da..61966674bc104 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -6,7 +6,8 @@
import nose
import numpy as np
-from pandas import Index, MultiIndex, DataFrame, Series, Categorical
+import re
+from pandas import Index, MultiIndex, DataFrame, DatetimeIndex, Series, Categorical
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
@@ -44,7 +45,7 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
* complex, c16, c8
* bool
* object, string, O
- * datetime, dt, M8[ns]
+ * datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
@@ -74,6 +75,13 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
+ elif typestr.startswith('M8[ns'):
+ # datetime with tz
+ m = re.search('M8\[ns,\s*(\w+\/?\w*)\]', typestr)
+ assert m is not None, "incompatible typestr -> {0}".format(typestr)
+ tz = m.groups()[0]
+ assert num_items == 1, "must have only 1 num items for a tz-aware"
+ values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category',):
@@ -401,7 +409,7 @@ def test_get_scalar(self):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item, fastpath=False)[i]
assert_almost_equal(res, exp)
- exp = self.mgr.get(item).values[i]
+ exp = self.mgr.get(item).internal_values()[i]
assert_almost_equal(res, exp)
def test_get(self):
@@ -414,19 +422,19 @@ def test_get(self):
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
- assert_almost_equal(mgr.get('a').values, values[0])
- assert_almost_equal(mgr.get('b').values, values[1])
- assert_almost_equal(mgr.get('c').values, values[2])
+ assert_almost_equal(mgr.get('a').internal_values(), values[0])
+ assert_almost_equal(mgr.get('b').internal_values(), values[1])
+ assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3,))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
- assert_almost_equal(mgr.get('a').values, [0] * 3)
- assert_almost_equal(mgr.get('b').values, ['bar'] * 3)
- assert_almost_equal(mgr.get('c').values, [2] * 3)
- assert_almost_equal(mgr.get('d').values, ['foo'] * 3)
+ assert_almost_equal(mgr.get('a').internal_values(), [0] * 3)
+ assert_almost_equal(mgr.get('b').internal_values(), ['bar'] * 3)
+ assert_almost_equal(mgr.get('c').internal_values(), [2] * 3)
+ assert_almost_equal(mgr.get('d').internal_values(), ['foo'] * 3)
def test_insert(self):
self.mgr.insert(0, 'inserted', np.arange(N))
@@ -478,7 +486,6 @@ def test_copy(self):
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
-
# what to test here?
self.assertEqual(mgr.as_matrix().dtype, np.float64)
@@ -510,6 +517,12 @@ def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
+ def test_as_matrix_datetime_tz(self):
+ mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
+ self.assertEqual(mgr.get('h').dtype, 'datetime64[ns, US/Eastern]')
+ self.assertEqual(mgr.get('g').dtype, 'datetime64[ns, CET]')
+ self.assertEqual(mgr.as_matrix().dtype, 'object')
+
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
@@ -692,10 +705,10 @@ def test_reindex_items(self):
assert_almost_equal(mgr.get('c',fastpath=False), reindexed.get('c',fastpath=False))
assert_almost_equal(mgr.get('a',fastpath=False), reindexed.get('a',fastpath=False))
assert_almost_equal(mgr.get('d',fastpath=False), reindexed.get('d',fastpath=False))
- assert_almost_equal(mgr.get('g').values, reindexed.get('g').values)
- assert_almost_equal(mgr.get('c').values, reindexed.get('c').values)
- assert_almost_equal(mgr.get('a').values, reindexed.get('a').values)
- assert_almost_equal(mgr.get('d').values, reindexed.get('d').values)
+ assert_almost_equal(mgr.get('g').internal_values(), reindexed.get('g').internal_values())
+ assert_almost_equal(mgr.get('c').internal_values(), reindexed.get('c').internal_values())
+ assert_almost_equal(mgr.get('a').internal_values(), reindexed.get('a').internal_values())
+ assert_almost_equal(mgr.get('d').internal_values(), reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
@@ -721,18 +734,18 @@ def test_get_numeric_data(self):
numeric = mgr.get_numeric_data()
assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool'])
assert_almost_equal(mgr.get('float',fastpath=False), numeric.get('float',fastpath=False))
- assert_almost_equal(mgr.get('float').values, numeric.get('float').values)
+ assert_almost_equal(mgr.get('float').internal_values(), numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.]))
- assert_almost_equal(mgr.get('float').values, np.array([100., 200., 300.]))
+ assert_almost_equal(mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool'])
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.]))
- assert_almost_equal(mgr.get('float').values, np.array([100., 200., 300.]))
+ assert_almost_equal(mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
@@ -743,17 +756,17 @@ def test_get_bool_data(self):
bools = mgr.get_bool_data()
assert_almost_equal(bools.items, ['bool'])
assert_almost_equal(mgr.get('bool',fastpath=False), bools.get('bool',fastpath=False))
- assert_almost_equal(mgr.get('bool').values, bools.get('bool').values)
+ assert_almost_equal(mgr.get('bool').internal_values(), bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True])
- assert_almost_equal(mgr.get('bool').values, [True, False, True])
+ assert_almost_equal(mgr.get('bool').internal_values(), [True, False, True])
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True])
- assert_almost_equal(mgr.get('bool').values, [True, False, True])
+ assert_almost_equal(mgr.get('bool').internal_values(), [True, False, True])
def test_unicode_repr_doesnt_raise(self):
str_repr = repr(create_mgr(u('b,\u05d0: object')))
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 1bce047f3bf96..0f55f79b8b9b9 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2188,6 +2188,21 @@ def test_datetimeindex(self):
self.assertIsInstance(index.levels[0],pd.DatetimeIndex)
self.assertIsInstance(index.levels[1],pd.DatetimeIndex)
+ def test_constructor_with_tz(self):
+
+ index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
+ name='dt1', tz='US/Pacific')
+ columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
+ name='dt2', tz='Asia/Tokyo')
+
+ result = MultiIndex.from_arrays([index, columns])
+ tm.assert_index_equal(result.levels[0], index)
+ tm.assert_index_equal(result.levels[1], columns)
+
+ result = MultiIndex.from_arrays([Series(index), Series(columns)])
+ tm.assert_index_equal(result.levels[0], index)
+ tm.assert_index_equal(result.levels[1], columns)
+
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 86eafdf7ca2c8..473549d3fb101 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1,6 +1,7 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import re
import sys
from datetime import datetime, timedelta
import operator
@@ -19,7 +20,7 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
- date_range, period_range, timedelta_range)
+ date_range, period_range, timedelta_range, _np_version_under1p8)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.period import PeriodIndex
@@ -92,7 +93,7 @@ def test_dt_namespace_accessor(self):
ok_for_td_methods = ['components','to_pytimedelta','total_seconds']
def get_expected(s, name):
- result = getattr(Index(s.values),prop)
+ result = getattr(Index(s._values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
@@ -138,6 +139,30 @@ def compare(s, name):
expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
+ # datetimeindex with tz
+ s = Series(date_range('20130101',periods=5,tz='US/Eastern'))
+ for prop in ok_for_dt:
+
+ # we test freq below
+ if prop != 'freq':
+ compare(s, prop)
+
+ for prop in ok_for_dt_methods:
+ getattr(s.dt,prop)
+
+ result = s.dt.to_pydatetime()
+ self.assertIsInstance(result,np.ndarray)
+ self.assertTrue(result.dtype == object)
+
+ result = s.dt.tz_convert('CET')
+ expected = Series(s._values.tz_convert('CET'),index=s.index)
+ tm.assert_series_equal(result, expected)
+
+ tz_result = result.dt.tz
+ self.assertEqual(str(tz_result), 'CET')
+ freq_result = s.dt.freq
+ self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
+
# timedeltaindex
for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),
Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
@@ -157,7 +182,7 @@ def compare(s, name):
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
-
+
result = s.dt.total_seconds()
self.assertIsInstance(result,pd.Series)
self.assertTrue(result.dtype == 'float64')
@@ -991,6 +1016,86 @@ def test_constructor_dtype_datetime64(self):
dr = date_range('20130101',periods=3,tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
+ # non-convertible
+ s = Series([1479596223000, -1479590, pd.NaT])
+ self.assertTrue(s.dtype == 'object')
+ self.assertTrue(s[2] is pd.NaT)
+ self.assertTrue('NaT' in str(s))
+
+ # if we passed a NaT it remains
+ s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
+ self.assertTrue(s.dtype == 'object')
+ self.assertTrue(s[2] is pd.NaT)
+ self.assertTrue('NaT' in str(s))
+
+ # if we passed a nan it remains
+ s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
+ self.assertTrue(s.dtype == 'object')
+ self.assertTrue(s[2] is np.nan)
+ self.assertTrue('NaN' in str(s))
+
+ def test_constructor_with_datetime_tz(self):
+
+ # 8260
+ # support datetime64 with tz
+
+ dr = date_range('20130101',periods=3,tz='US/Eastern')
+ s = Series(dr)
+ self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')
+ self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')
+ self.assertTrue(com.is_datetime64tz_dtype(s.dtype))
+
+ # export
+ result = s.values
+ self.assertIsInstance(result, np.ndarray)
+ self.assertTrue(result.dtype == 'datetime64[ns]')
+ self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz)))
+
+ # indexing
+ result = s.iloc[0]
+ self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
+ result = s[0]
+ self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
+
+ result = s[Series([True,True,False],index=s.index)]
+ assert_series_equal(result,s[0:2])
+
+ result = s.iloc[0:1]
+ assert_series_equal(result,Series(dr[0:1]))
+
+ # concat
+ result = pd.concat([s.iloc[0:1],s.iloc[1:]])
+ assert_series_equal(result,s)
+
+ # astype
+ result = s.astype(object)
+ expected = Series(DatetimeIndex(s._values).asobject)
+ assert_series_equal(result, expected)
+
+ # short str
+ self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
+
+ # formatting with NaT
+ result = s.shift()
+ self.assertTrue('datetime64[ns, US/Eastern]' in str(result))
+ self.assertTrue('NaT' in str(result))
+
+ # long str
+ t = Series(date_range('20130101',periods=1000,tz='US/Eastern'))
+ self.assertTrue('datetime64[ns, US/Eastern]' in str(t))
+
+ result = pd.DatetimeIndex(s,freq='infer')
+ tm.assert_index_equal(result, dr)
+
+ # inference
+ s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
+ self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')
+ self.assertTrue(lib.infer_dtype(s) == 'datetime64')
+
+ s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
+ self.assertTrue(s.dtype == 'object')
+ self.assertTrue(lib.infer_dtype(s) == 'datetime')
+
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
@@ -3519,16 +3624,17 @@ def test_timedelta_assignment(self):
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
- for op in ops:
- try:
- op = getattr(get_ser, op, None)
- if op is not None:
- self.assertRaises(TypeError, op, test_ser)
- except:
- com.pprint_thing("Failed on op %r" % op)
- raise
+
+ # check that we are getting a TypeError
+ # with 'operate' (from core/ops.py) for the ops that are not defined
+ for op_str in ops:
+ op = getattr(get_ser, op_str, None)
+ with tm.assertRaisesRegexp(TypeError, 'operate'):
+ op(test_ser)
+
### timedelta64 ###
td1 = Series([timedelta(minutes=5,seconds=3)]*3)
+ td1.iloc[2] = np.nan
td2 = timedelta(minutes=5,seconds=4)
ops = ['__mul__','__floordiv__','__pow__',
'__rmul__','__rfloordiv__','__rpow__']
@@ -3543,6 +3649,7 @@ def run_ops(ops, get_ser, test_ser):
### datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
+ dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
@@ -3571,6 +3678,66 @@ def run_ops(ops, get_ser, test_ser):
td1 + dt1
dt1 + td1
+ # 8260, 10763
+ # datetime64 with tz
+ ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
+ '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
+ '__rpow__']
+ dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
+ dt2 = dt1.copy()
+ dt2.iloc[2] = np.nan
+ td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H'))
+ td2 = td1.copy()
+ td2.iloc[1] = np.nan
+ run_ops(ops, dt1, td1)
+
+ result = dt1 + td1[0]
+ expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = dt2 + td2[0]
+ expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ # odd numpy behavior with scalar timedeltas
+ if not _np_version_under1p8:
+ result = td1[0] + dt1
+ expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = td2[0] + dt2
+ expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = dt1 - td1[0]
+ expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+ self.assertRaises(TypeError, lambda: td1[0] - dt1)
+
+ result = dt2 - td2[0]
+ expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+ self.assertRaises(TypeError, lambda: td2[0] - dt2)
+
+ result = dt1 + td1
+ expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = dt2 + td2
+ expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = dt1 - td1
+ expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ result = dt2 - td2
+ expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern')
+ assert_series_equal(result, expected)
+
+ self.assertRaises(TypeError, lambda: td1 - dt1)
+ self.assertRaises(TypeError, lambda: td2 - dt2)
+
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
@@ -4842,6 +5009,7 @@ def test_drop_duplicates(self):
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
+
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
@@ -4874,6 +5042,7 @@ def test_drop_duplicates(self):
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
+
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
@@ -5416,6 +5585,16 @@ def test_shift(self):
expected = Series([np.nan,0,1,2,3],index=index)
assert_series_equal(result,expected)
+ # xref 8260
+ # with tz
+ s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
+ result = s-s.shift()
+ assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
+
+ # incompat tz
+ s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo')
+ self.assertRaises(ValueError, lambda : s-s2)
+
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
@@ -5914,17 +6093,17 @@ def test_astype_str(self):
for tt in set([str, compat.text_type]):
ts = Series([Timestamp('2010-01-04 00:00:00')])
s = ts.astype(tt)
- expected = Series([tt(ts.values[0])])
+ expected = Series([tt('2010-01-04')])
assert_series_equal(s, expected)
ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])
s = ts.astype(tt)
- expected = Series([tt(ts.values[0])])
+ expected = Series([tt('2010-01-04 00:00:00-05:00')])
assert_series_equal(s, expected)
td = Series([Timedelta(1, unit='d')])
s = td.astype(tt)
- expected = Series([tt(td.values[0])])
+ expected = Series([tt('1 days 00:00:00.000000000')])
assert_series_equal(s, expected)
def test_astype_unicode(self):
@@ -7032,9 +7211,9 @@ def check_replace(to_rep, val, expected):
# test an object with dates + floats + integers + strings
dr = date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
- r = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
- assert_series_equal(r, Series([1.0,2,'a'] +
- dr[3:].tolist(),dtype=object))
+ result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
+ expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object)
+ assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = Series([True, False, True])
@@ -7113,6 +7292,11 @@ def test_diff(self):
nxp = xp.diff()
assert_series_equal(nrs, nxp)
+ # with tz
+ s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo')
+ result = s.diff()
+ assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
+
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 146d558ea0815..95c68aaa00b18 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -396,11 +396,11 @@ def _get_merge_keys(self):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
- right_keys.append(right[rk].values)
+ right_keys.append(right[rk]._values)
join_names.append(rk)
else:
if not is_rkey(rk):
- right_keys.append(right[rk].values)
+ right_keys.append(right[rk]._values)
if lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
@@ -409,7 +409,7 @@ def _get_merge_keys(self):
left_drop.append(lk)
else:
right_keys.append(rk)
- left_keys.append(left[lk].values)
+ left_keys.append(left[lk]._values)
join_names.append(lk)
elif _any(self.left_on):
for k in self.left_on:
@@ -417,10 +417,10 @@ def _get_merge_keys(self):
left_keys.append(k)
join_names.append(None)
else:
- left_keys.append(left[k].values)
+ left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
- right_keys = [lev.values.take(lab)
+ right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
@@ -431,10 +431,10 @@ def _get_merge_keys(self):
right_keys.append(k)
join_names.append(None)
else:
- right_keys.append(right[k].values)
+ right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
- left_keys = [lev.values.take(lab)
+ left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
@@ -952,7 +952,7 @@ def get_result(self):
# stack blocks
if self.axis == 0:
- new_data = com._concat_compat([x.values for x in self.objs])
+ new_data = com._concat_compat([x._values for x in self.objs])
name = com._consensus_name_attr(self.objs)
return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 34789a3c52cb7..50ae574c03067 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -579,21 +579,21 @@ def test_pivot_dtaccessor(self):
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
- index=exp_idx, columns=[7, 8, 9])
+ index=exp_idx, columns=Index([7, 8, 9],name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month, columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
- index=[1, 2], columns=[7, 8, 9])
+ index=Index([1, 2],name='dt2'), columns=Index([7, 8, 9],name='dt1'))
tm.assert_frame_equal(result, expected)
- result = pivot_table(df, index=df['dt2'].dt.year,
+ result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
- exp_col = MultiIndex.from_arrays([[7, 7, 8, 8, 9, 9], [1, 2] * 3])
+ exp_col = MultiIndex.from_arrays([[7, 7, 8, 8, 9, 9], [1, 2] * 3],names=['dt1','dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]],dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index a6b289b76af11..5062b7ead1de8 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -178,7 +178,7 @@ def sort_values(self, return_indexer=False, ascending=True):
return self._simple_new(sorted_values, **attribs)
- def take(self, indices, axis=0):
+ def take(self, indices, axis=0, **kwargs):
"""
Analogous to ndarray.take
"""
@@ -343,11 +343,6 @@ def _format_attrs(self):
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq',freq))
- elif attrib == 'tz':
- tz = self.tz
- if tz is not None:
- tz = "'%s'" % tz
- attrs.append(('tz',tz))
return attrs
@cache_readonly
@@ -451,9 +446,9 @@ def _add_delta_td(self, other):
inc = tslib._delta_to_nanoseconds(other)
mask = self.asi8 == tslib.iNaT
- new_values = (self.asi8 + inc).view(self.dtype)
+ new_values = (self.asi8 + inc).view('i8')
new_values[mask] = tslib.iNaT
- return new_values.view(self.dtype)
+ return new_values.view('i8')
def _add_delta_tdi(self, other):
# add a delta of a TimedeltaIndex
@@ -547,8 +542,7 @@ def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
- return self._simple_new(self.values.repeat(repeats),
- name=self.name)
+ return self._shallow_copy(self.values.repeat(repeats), freq=None)
def summary(self, name=None):
"""
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index 9a282bec2e9e4..ba9f2b8343a3d 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -9,6 +9,8 @@
from pandas import tslib
from pandas.core.common import (_NS_DTYPE, _TD_DTYPE, is_period_arraylike,
is_datetime_arraylike, is_integer_dtype, is_list_like,
+ is_datetime64_dtype, is_datetime64tz_dtype,
+ is_timedelta64_dtype,
get_dtype_kinds)
def is_datetimelike(data):
@@ -43,23 +45,24 @@ def maybe_to_datetimelike(data, copy=False):
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
index = data.index
- if issubclass(data.dtype.type, np.datetime64):
- return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index)
- elif issubclass(data.dtype.type, np.timedelta64):
- return TimedeltaProperties(TimedeltaIndex(data, copy=copy, freq='infer'), index)
+ if is_datetime64_dtype(data.dtype) or is_datetime64tz_dtype(data.dtype):
+ return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index, name=data.name)
+ elif is_timedelta64_dtype(data.dtype):
+ return TimedeltaProperties(TimedeltaIndex(data, copy=copy, freq='infer'), index, name=data.name)
else:
if is_period_arraylike(data):
- return PeriodProperties(PeriodIndex(data, copy=copy), index)
+ return PeriodProperties(PeriodIndex(data, copy=copy), index, name=data.name)
if is_datetime_arraylike(data):
- return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index)
+ return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index, name=data.name)
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
class Properties(PandasDelegate):
- def __init__(self, values, index):
+ def __init__(self, values, index, name):
self.values = values
self.index = index
+ self.name = name
def _delegate_property_get(self, name):
from pandas import Series
@@ -74,7 +77,7 @@ def _delegate_property_get(self, name):
return result
# return the result as a Series, which is by definition a copy
- result = Series(result, index=self.index)
+ result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a property of a datetimelike object are not "
@@ -95,7 +98,7 @@ def _delegate_method(self, name, *args, **kwargs):
if not com.is_list_like(result):
return result
- result = Series(result, index=self.index)
+ result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a method of a datetimelike object are not "
@@ -196,7 +199,7 @@ class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties):
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an datetimelike array of arrays each of which is a single
- M8[ns], or m8[ns] dtype
+ M8[ns], datetimet64[ns, tz] or m8[ns] dtype
Parameters
----------
@@ -211,6 +214,10 @@ def _concat_compat(to_concat, axis=0):
def convert_to_pydatetime(x, axis):
# coerce to an object dtype
if x.dtype == _NS_DTYPE:
+
+ if hasattr(x, 'tz'):
+ x = x.asobject
+
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
@@ -218,10 +225,19 @@ def convert_to_pydatetime(x, axis):
shape = x.shape
x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel())
x = x.reshape(shape)
+
return x
typs = get_dtype_kinds(to_concat)
+ # datetimetz
+ if 'datetimetz' in typs:
+
+ # we require ALL of the same tz for datetimetz
+ tzs = set([ getattr(x,'tz',None) for x in to_concat ])-set([None])
+ if len(tzs) == 1:
+ return DatetimeIndex(np.concatenate([ x.tz_localize(None).asi8 for x in to_concat ]), tz=list(tzs)[0])
+
# single dtype
if len(typs) == 1:
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index e471e66616711..d7eaab5a5a186 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -15,6 +15,7 @@
import pandas.tslib as tslib
import pandas._period as period
from pandas.tslib import Timedelta
+from pytz import AmbiguousTimeError
class FreqGroup(object):
FR_ANN = 1000
@@ -784,7 +785,7 @@ def _period_str_to_code(freqstr):
if freqstr in _rule_aliases:
new = _rule_aliases[freqstr]
warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new),
- FutureWarning, stacklevel=6)
+ FutureWarning, stacklevel=3)
freqstr = new
freqstr = _lite_rule_alias.get(freqstr, freqstr)
@@ -793,7 +794,7 @@ def _period_str_to_code(freqstr):
if lower in _rule_aliases:
new = _rule_aliases[lower]
warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new),
- FutureWarning, stacklevel=6)
+ FutureWarning, stacklevel=3)
freqstr = new
freqstr = _lite_rule_alias.get(lower, freqstr)
@@ -833,8 +834,8 @@ def infer_freq(index, warn=True):
import pandas as pd
if isinstance(index, com.ABCSeries):
- values = index.values
- if not (com.is_datetime64_dtype(index.values) or com.is_timedelta64_dtype(index.values) or values.dtype == object):
+ values = index._values
+ if not (com.is_datetime64_dtype(values) or com.is_timedelta64_dtype(values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
@@ -850,7 +851,11 @@ def infer_freq(index, warn=True):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
- index = pd.DatetimeIndex(index)
+ try:
+ index = pd.DatetimeIndex(index)
+ except AmbiguousTimeError:
+ index = pd.DatetimeIndex(index.asi8)
+
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index c6c66a62b86b5..b1198f9758938 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -7,8 +7,11 @@
import numpy as np
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
+ is_object_dtype, is_datetime64_dtype,
+ is_datetimetz, is_dtype_equal,
ABCSeries, is_integer, is_float,
- is_object_dtype, is_datetime64_dtype)
+ DatetimeTZDtype)
+
from pandas.io.common import PerformanceWarning
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
@@ -114,11 +117,12 @@ def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
- # simply set the tz
# data are already in UTC
+ # so need to localize
tz = d.pop('tz',None)
result = cls.__new__(cls, **d)
- result.tz = tz
+ if tz is not None:
+ result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
@@ -199,7 +203,7 @@ def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
- closed=None, ambiguous='raise', **kwargs):
+ closed=None, ambiguous='raise', dtype=None, **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
@@ -264,9 +268,16 @@ def __new__(cls, data=None,
dayfirst=dayfirst,
yearfirst=yearfirst)
+ if is_datetimetz(data):
+ # extract the data whether a Series or Index
+ if isinstance(data, ABCSeries):
+ data = data._values
+ tz = data.tz
+ data = data.tz_localize(None, ambiguous='infer').values
+
if issubclass(data.dtype.type, np.datetime64):
if isinstance(data, ABCSeries):
- data = data.values
+ data = data._values
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
@@ -290,7 +301,7 @@ def __new__(cls, data=None,
subarr = data.view(_NS_DTYPE)
else:
if isinstance(data, (ABCSeries, Index)):
- values = data.values
+ values = data._values
else:
values = data
@@ -304,7 +315,7 @@ def __new__(cls, data=None,
# make sure that we have a index/ndarray like (and not a Series)
if isinstance(subarr, ABCSeries):
- subarr = subarr.values
+ subarr = subarr._values
if subarr.dtype == np.object_:
subarr = tools._to_datetime(subarr, box=False)
@@ -312,7 +323,8 @@ def __new__(cls, data=None,
# tz aware
subarr = tools._to_datetime(data, box=False, utc=True)
- if not np.issubdtype(subarr.dtype, np.datetime64):
+ # we may not have been able to convert
+ if not (is_datetimetz(subarr) or np.issubdtype(subarr.dtype, np.datetime64)):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
@@ -334,6 +346,16 @@ def __new__(cls, data=None,
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
+ # if dtype is provided, coerce here
+ if dtype is not None:
+
+ if not is_dtype_equal(subarr.dtype, dtype):
+
+ if subarr.tz is not None:
+ raise ValueError("cannot localize from non-UTC data")
+ dtype = DatetimeTZDtype.construct_from_string(dtype)
+ subarr = subarr.tz_localize(dtype.tz)
+
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
@@ -498,16 +520,21 @@ def _local_timestamps(self):
return result.take(reverse)
@classmethod
- def _simple_new(cls, values, name=None, freq=None, tz=None, **kwargs):
+ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if not getattr(values,'dtype',None):
+ # empty, but with dtype compat
+ if values is None:
+ values = np.empty(0, dtype=_NS_DTYPE)
+ return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs)
values = np.array(values,copy=False)
+
if is_object_dtype(values):
- return cls(values, name=name, freq=freq, tz=tz, **kwargs).values
+ return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = com._ensure_int64(values).view(_NS_DTYPE)
@@ -690,7 +717,15 @@ def _add_delta(self, delta):
def _add_offset(self, offset):
try:
- return offset.apply_index(self)
+ if self.tz is not None:
+ values = self.tz_localize(None)
+ else:
+ values = self
+ result = offset.apply_index(values)
+ if self.tz is not None:
+ result = result.tz_localize(self.tz)
+ return result
+
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series or DatetimeIndex",
PerformanceWarning)
@@ -716,6 +751,8 @@ def astype(self, dtype):
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
+ elif dtype == _NS_DTYPE and self.tz is not None:
+ return self.tz_convert('UTC').tz_localize(None)
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
@@ -740,7 +777,8 @@ def to_series(self, keep_tz=False):
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
- Otherwise the Series will have an object dtype; the
+
+ Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
@@ -762,8 +800,11 @@ def _to_embed(self, keep_tz=False):
This is for internal compat
"""
if keep_tz and self.tz is not None:
- return self.asobject.values
- return self.values
+
+ # preserve the tz & copy
+ return self.copy(deep=True)
+
+ return self.values.copy()
def to_pydatetime(self):
"""
@@ -1477,9 +1518,11 @@ def inferred_type(self):
# sure we can't have ambiguous indexing
return 'datetime64'
- @property
+ @cache_readonly
def dtype(self):
- return _NS_DTYPE
+ if self.tz is None:
+ return _NS_DTYPE
+ return com.DatetimeTZDtype('ns',self.tz)
@property
def is_all_dates(self):
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 22eb1afb7b8bd..4c9726bbcf80d 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -111,32 +111,32 @@ def test_minmax(self):
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
- idx1 = DatetimeIndex([], freq='D')
- idx2 = DatetimeIndex(['2011-01-01'], freq='D')
- idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
- freq='H', tz='Asia/Tokyo')
- idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
- tz='US/Eastern')
- exp1 = """DatetimeIndex([], dtype='datetime64[ns]', freq='D', tz=None)"""
-
- exp2 = """DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D', tz=None)"""
-
- exp3 = """DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D', tz=None)"""
-
- exp4 = """DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D', tz=None)"""
-
- exp5 = """DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns]', freq='H', tz='Asia/Tokyo')"""
-
- exp6 = """DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns]', freq=None, tz='US/Eastern')"""
+ idx = []
+ idx.append(DatetimeIndex([], freq='D'))
+ idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
+ idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
+ idx.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
+ idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
+ freq='H', tz='Asia/Tokyo'))
+ idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='US/Eastern'))
+ idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='UTC'))
+
+ exp = []
+ exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
+ exp.append("""DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')""")
+ exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D')""")
+ exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D')""")
+ exp.append("""DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='H')""")
+ exp.append("""DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns, US/Eastern]', freq=None)""")
+ exp.append("""DatetimeIndex(['2011-01-01 09:00:00+00:00', '2011-01-01 10:00:00+00:00', 'NaT'], dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
- [exp1, exp2, exp3, exp4, exp5, exp6]):
+ for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(idx, func)()
+ result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
@@ -164,15 +164,15 @@ def test_representation_to_series(self):
2 2011-01-03
dtype: datetime64[ns]"""
- exp5 = """0 2011-01-01 09:00:00+09:00
-1 2011-01-01 10:00:00+09:00
-2 2011-01-01 11:00:00+09:00
-dtype: object"""
+ exp5 = """0 2011-01-01 09:00:00+09:00
+1 2011-01-01 10:00:00+09:00
+2 2011-01-01 11:00:00+09:00
+dtype: datetime64[ns, Asia/Tokyo]"""
- exp6 = """0 2011-01-01 09:00:00-05:00
-1 2011-01-01 10:00:00-05:00
-2 NaN
-dtype: object"""
+ exp6 = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 NaT
+dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index a9837e2794d58..84a4c3e08e493 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1101,6 +1101,60 @@ def test_to_datetime_array_of_dt64s(self):
)
)
+ def test_to_datetime_tz(self):
+
+ # xref 8260
+ # uniform returns a DatetimeIndex
+ arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]
+ result = pd.to_datetime(arr)
+ expected = DatetimeIndex(['2013-01-01 13:00:00','2013-01-02 14:00:00'],tz='US/Pacific')
+ tm.assert_index_equal(result, expected)
+
+ # mixed tzs will raise
+ arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')]
+ self.assertRaises(ValueError, lambda : pd.to_datetime(arr))
+
+ def test_to_datetime_tz_pytz(self):
+
+ # xref 8260
+ tm._skip_if_no_pytz()
+ import pytz
+
+ us_eastern = pytz.timezone('US/Eastern')
+ arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1, hour=3, minute=0)),
+ us_eastern.localize(datetime(year=2000, month=6, day=1, hour=3, minute=0))],dtype=object)
+ result = pd.to_datetime(arr, utc=True)
+ expected = DatetimeIndex(['2000-01-01 08:00:00+00:00', '2000-06-01 07:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None)
+ tm.assert_index_equal(result, expected)
+
+ def test_to_datetime_tz_psycopg2(self):
+
+ # xref 8260
+ try:
+ import psycopg2
+ except ImportError:
+ raise nose.SkipTest("no psycopg2 installed")
+
+ # misc cases
+ arr = np.array([ datetime(2000, 1, 1, 3, 0, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)),
+ datetime(2000, 6, 1, 3, 0, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None))], dtype=object)
+
+ result = pd.to_datetime(arr, errors='coerce', utc=True)
+ expected = DatetimeIndex(['2000-01-01 08:00:00+00:00', '2000-06-01 07:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None)
+ tm.assert_index_equal(result, expected)
+
+ # dtype coercion
+ i = pd.DatetimeIndex(['2000-01-01 08:00:00+00:00'],tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
+ self.assertFalse(com.is_datetime64_ns_dtype(i))
+
+ # tz coerceion
+ result = pd.to_datetime(i, errors='coerce')
+ tm.assert_index_equal(result, i)
+
+ result = pd.to_datetime(i, errors='coerce', utc=True)
+ expected = pd.DatetimeIndex(['2000-01-01 13:00:00'])
+ tm.assert_index_equal(result, expected)
+
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
@@ -2139,6 +2193,12 @@ def test_astype(self):
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
+ # with tz
+ rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
+ result = rng.astype('datetime64[ns]')
+ expected = date_range('1/1/2000', periods=10, tz='US/Eastern').tz_convert('UTC').tz_localize(None)
+ tm.assert_index_equal(result, expected)
+
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
@@ -2471,6 +2531,15 @@ def test_datetime64_with_DateOffset(self):
exp = klass(date_range('1999-01-01', '1999-01-31'))
assert_func(result, exp)
+ s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
+ pd.Timestamp('2000-02-15', tz='US/Central')])
+ result = s + pd.offsets.Day()
+ result2 = pd.offsets.Day() + s
+ exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
+ Timestamp('2000-02-16', tz='US/Central')])
+ assert_func(result, exp)
+ assert_func(result2, exp)
+
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')])
result = s + pd.offsets.MonthEnd()
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index b4b5576a5b268..a6e5812158474 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -17,7 +17,7 @@
from pytz import NonExistentTimeError
import pandas.util.testing as tm
-
+from pandas.core.dtypes import DatetimeTZDtype
from pandas.util.testing import assert_frame_equal
from pandas.compat import lrange, zip
@@ -669,7 +669,8 @@ def test_frame_no_datetime64_dtype(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
- self.assertEqual(e['B'].dtype, 'O')
+ tz_expected = DatetimeTZDtype('ns',dr_tz.tzinfo)
+ self.assertEqual(e['B'].dtype, tz_expected)
# GH 2810 (with timezones)
datetimes_naive = [ ts.to_pydatetime() for ts in dr ]
@@ -677,8 +678,8 @@ def test_frame_no_datetime64_dtype(self):
df = DataFrame({'dr' : dr, 'dr_tz' : dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz' : datetimes_with_tz })
- result = df.get_dtype_counts()
- expected = Series({ 'datetime64[ns]' : 2, 'object' : 2 })
+ result = df.get_dtype_counts().sort_index()
+ expected = Series({ 'datetime64[ns]' : 2, str(tz_expected) : 2 }).sort_index()
tm.assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 85bae42e7a492..fadad91e6842a 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -878,7 +878,7 @@ def test_intraday_conversion_factors(self):
def test_period_ordinal_start_values(self):
# information for 1.1.1970
- self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('Y')))
+ self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('A')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')))
self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')))
@@ -943,6 +943,12 @@ def compare_local_to_utc(tz_didx, utc_didx):
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
+ # Check all-NaT array
+ result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
+ tslib.maybe_get_tz('US/Eastern'),
+ tslib.maybe_get_tz('Asia/Tokyo'))
+ self.assert_numpy_array_equal(result, np.array([tslib.iNaT], dtype=np.int64))
+
class TestTimestampOps(tm.TestCase):
def test_timestamp_and_datetime(self):
self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 282e1d603ed84..11200bb2540cd 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -58,7 +58,7 @@ def _convert_listlike(arg, box, unit, name=None):
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
- values = _convert_listlike(arg.values, box=False, unit=unit)
+ values = _convert_listlike(arg._values, box=False, unit=unit)
return Series(values, index=arg.index, name=arg.name, dtype='m8[ns]')
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box=box, unit=unit, name=arg.name)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 521679f21dc93..5d98088493bd5 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -292,6 +292,14 @@ def _convert_listlike(arg, box, format, name=None):
pass
return arg
+
+ elif com.is_datetime64tz_dtype(arg):
+ if not isinstance(arg, DatetimeIndex):
+ return DatetimeIndex(arg, tz='utc' if utc else None)
+ if utc:
+ arg = arg.tz_convert(None)
+ return arg
+
elif format is None and com.is_integer_dtype(arg) and unit=='ns':
result = arg.astype('datetime64[ns]')
if box:
@@ -371,7 +379,7 @@ def _convert_listlike(arg, box, format, name=None):
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, Series):
- values = _convert_listlike(arg.values, False, format)
+ values = _convert_listlike(arg._values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box, format, name=arg.name)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index a8b573ab6788e..7741747103c55 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -1979,12 +1979,15 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
for i in range(n):
val = values[i]
- # set as nan if is even a datetime NaT
+ # set as nan except if its a NaT
if _checknull_with_nat(val):
- oresult[i] = np.nan
- elif util.is_datetime64_object(val):
if val is np_NaT or val.view('i8') == iNaT:
+ oresult[i] = NaT
+ else:
oresult[i] = np.nan
+ elif util.is_datetime64_object(val):
+ if val is np_NaT or val.view('i8') == iNaT:
+ oresult[i] = NaT
else:
oresult[i] = val.item()
else:
@@ -3318,7 +3321,7 @@ except:
def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
cdef:
- ndarray[int64_t] utc_dates, result, trans, deltas
+ ndarray[int64_t] utc_dates, tt, result, trans, deltas
Py_ssize_t i, pos, n = len(vals)
int64_t v, offset
pandas_datetimestruct dts
@@ -3337,27 +3340,38 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
if _is_tzlocal(tz1):
for i in range(n):
v = vals[i]
- pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
- dt = datetime(dts.year, dts.month, dts.day, dts.hour,
- dts.min, dts.sec, dts.us, tz1)
- delta = (int(total_seconds(_get_utcoffset(tz1, dt)))
- * 1000000000)
- utc_dates[i] = v - delta
+ if v == iNaT:
+ utc_dates[i] = iNaT
+ else:
+ pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, tz1)
+ delta = (int(total_seconds(_get_utcoffset(tz1, dt)))
+ * 1000000000)
+ utc_dates[i] = v - delta
else:
trans, deltas, typ = _get_dst_info(tz1)
+ # all-NaT
+ tt = vals[vals!=iNaT]
+ if not len(tt):
+ return vals
+
trans_len = len(trans)
- pos = trans.searchsorted(vals[0]) - 1
+ pos = trans.searchsorted(tt[0]) - 1
if pos < 0:
raise ValueError('First time before start of DST info')
offset = deltas[pos]
for i in range(n):
v = vals[i]
- while pos + 1 < trans_len and v >= trans[pos + 1]:
- pos += 1
- offset = deltas[pos]
- utc_dates[i] = v - offset
+ if v == iNaT:
+ utc_dates[i] = iNaT
+ else:
+ while pos + 1 < trans_len and v >= trans[pos + 1]:
+ pos += 1
+ offset = deltas[pos]
+ utc_dates[i] = v - offset
else:
utc_dates = vals
@@ -3368,18 +3382,26 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
if _is_tzlocal(tz2):
for i in range(n):
v = utc_dates[i]
- pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
- dt = datetime(dts.year, dts.month, dts.day, dts.hour,
- dts.min, dts.sec, dts.us, tz2)
- delta = int(total_seconds(_get_utcoffset(tz2, dt))) * 1000000000
- result[i] = v + delta
+ if v == iNaT:
+ result[i] = iNaT
+ else:
+ pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, tz2)
+ delta = int(total_seconds(_get_utcoffset(tz2, dt))) * 1000000000
+ result[i] = v + delta
return result
# Convert UTC to other timezone
trans, deltas, typ = _get_dst_info(tz2)
trans_len = len(trans)
- pos = trans.searchsorted(utc_dates[0]) - 1
+ # use first non-NaT element
+ # if all-NaT, return all-NaT
+ if (result==iNaT).all():
+ return result
+
+ pos = trans.searchsorted(utc_dates[utc_dates!=iNaT][0]) - 1
if pos < 0:
raise ValueError('First time before start of DST info')
@@ -3387,7 +3409,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
offset = deltas[pos]
for i in range(n):
v = utc_dates[i]
- if vals[i] == NPY_NAT:
+ if vals[i] == iNaT:
result[i] = vals[i]
else:
while pos + 1 < trans_len and v >= trans[pos + 1]:
@@ -3434,6 +3456,7 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
dts.min, dts.sec, dts.us, tz2)
delta = int(total_seconds(_get_utcoffset(tz2, dt))) * 1000000000
return utc_date + delta
+
# Convert UTC to other timezone
trans, deltas, typ = _get_dst_info(tz2)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 878bfdf3ac9fd..0dad2da4ab2c5 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -23,9 +23,11 @@
import numpy as np
import pandas as pd
-from pandas.core.common import (is_sequence, array_equivalent, is_list_like,
+from pandas.core.common import (is_sequence, array_equivalent, is_list_like, is_number,
is_datetimelike_v_numeric, is_datetimelike_v_object,
- is_number, pprint_thing, take_1d)
+ is_number, pprint_thing, take_1d,
+ needs_i8_conversion)
+
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
@@ -902,7 +904,7 @@ def assert_series_equal(left, right, check_dtype=True,
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check the values in that case
- if is_datetimelike_v_numeric(left, right) or is_datetimelike_v_object(left, right):
+ if is_datetimelike_v_numeric(left, right) or is_datetimelike_v_object(left, right) or needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py
index 4c74688ce660e..7c821374a83ab 100644
--- a/vb_suite/binary_ops.py
+++ b/vb_suite/binary_ops.py
@@ -172,3 +172,28 @@
start_date=datetime(2013, 1, 1))
timestamp_ops_diff2 = Benchmark("s-s.shift()", setup,
start_date=datetime(2013, 1, 1))
+
+#----------------------------------------------------------------------
+# timeseries with tz
+
+setup = common_setup + """
+N = 10000
+halfway = N // 2 - 1
+s = Series(date_range('20010101', periods=N, freq='T', tz='US/Eastern'))
+ts = s[halfway]
+"""
+
+timestamp_tz_series_compare = Benchmark("ts >= s", setup,
+ start_date=datetime(2013, 9, 27))
+series_timestamp_tz_compare = Benchmark("s <= ts", setup,
+ start_date=datetime(2012, 2, 21))
+
+setup = common_setup + """
+N = 10000
+s = Series(date_range('20010101', periods=N, freq='s', tz='US/Eastern'))
+"""
+
+timestamp_tz_ops_diff1 = Benchmark("s.diff()", setup,
+ start_date=datetime(2013, 1, 1))
+timestamp_tz_ops_diff2 = Benchmark("s-s.shift()", setup,
+ start_date=datetime(2013, 1, 1))
diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py
index 7e10b333d5c56..15bc89d62305f 100644
--- a/vb_suite/timeseries.py
+++ b/vb_suite/timeseries.py
@@ -110,7 +110,7 @@ def date_range(start=None, end=None, periods=None, freq=None):
start_date=datetime(2012, 4, 27))
#----------------------------------------------------------------------
-# Time zone stuff
+# Time zone
setup = common_setup + """
rng = date_range(start='1/1/2000', end='3/1/2000', tz='US/Eastern')
| closes #8260
closes #10763
ToDos:
- [x] doc updates
- [x] test with `Series.dt.*`
- [x] test with csv/HDF5
- [x] nat setting borked ATM
- [x] HDF5 example from 0.16.2
~~\- [ ] get_values/values - make consistent~~
~~\- [ ] maybe move `DatetimeTZBlock.shift` mostly to `DatetimeIndex.shift`~~
Also
- This cleans up the internal blocks calling conventions a bit
- Fixes a bug in `DatetimeIndex` and localizing when NaT's are present
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
timestamp_tz_ops_diff1 | 0.1804 | 159.4527 | 0.0011 | # note these are 10k element
timestamp_tz_ops_diff2 | 2.5350 | 156.4047 | 0.0162 | # note these are 10k elements
timeseries_timestamp_downsample_mean | 3.1467 | 3.3040 | 0.9524 |
timestamp_series_compare | 9.0797 | 9.1290 | 0.9946 |
timestamp_ops_diff2 | 19.7570 | 19.6819 | 1.0038 | # this is 1M elements
series_timestamp_compare | 9.3430 | 9.0226 | 1.0355 |
timestamp_ops_diff1 | 9.7457 | 9.0450 | 1.0775 | # this is 1M elements
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [8502474] : API: add Block.make_block
API: add DatetimeBlockWithTZ #8260
Base [16a44ad] : Merge pull request #10199 from jreback/gil
PERF: releasing the GIL, #8882
```
Demo
```
In [1]: df = DataFrame({'A' : date_range('20130101',periods=3),
...: 'B' : date_range('20130101',periods=3,tz='US/Eastern'),
...: 'C' : date_range('20130101',periods=3,tz='CET')})
In [2]: df
Out[2]:
A B C
0 2013-01-01 2013-01-01 05:00:00 2012-12-31 23:00:00
1 2013-01-02 2013-01-02 05:00:00 2013-01-01 23:00:00
2 2013-01-03 2013-01-03 05:00:00 2013-01-02 23:00:00
In [3]: df.dtypes
Out[3]:
A datetime64[ns]
B datetime64[ns, US/Eastern]
C datetime64[ns, CET]
dtype: object
In [4]: df.B
Out[4]:
0 2013-01-01 00:00:00-05:00
1 2013-01-02 00:00:00-05:00
2 2013-01-03 00:00:00-05:00
Name: B, dtype: datetime64[ns, US/Eastern]
In [5]: df.B.dt.tz_localize(None)
Out[5]:
0 2013-01-01
1 2013-01-02
2 2013-01-03
dtype: datetime64[ns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10477 | 2015-06-30T15:44:09Z | 2015-09-05T16:17:23Z | 2015-09-05T16:17:23Z | 2015-09-05T16:17:23Z |
BUG: Enable complex values to be written to HDF | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 28ec828b81c34..a3ec13439fe76 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -27,6 +27,7 @@ New features
~~~~~~~~~~~~
- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`)
+- Enable writing complex values to HDF stores when using table format (:issue:`10447`)
.. _whatsnew_0170.enhancements.other:
@@ -147,3 +148,4 @@ Bug Fixes
- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`)
+
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index eb800c37db98f..9e1a272ec5621 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1773,6 +1773,8 @@ def set_kind(self):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
+ elif dtype.startswith(u('complex')):
+ self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
@@ -1802,6 +1804,8 @@ def set_atom(self, block, block_items, existing_col, min_itemsize,
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
+ elif block.is_complex:
+ return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
@@ -1936,6 +1940,12 @@ def get_atom_coltype(self, kind=None):
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
+ def set_atom_complex(self, block):
+ self.kind = block.dtype.name
+ itemsize = int(self.kind.split('complex')[-1]) // 8
+ self.typ = _tables().ComplexCol(itemsize=itemsize, shape=block.shape[0])
+ self.set_data(block.values.astype(self.typ.type, copy=False))
+
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
@@ -3147,8 +3157,8 @@ def f(i, c):
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
- note: cannot index Time64Col() currently; PyTables must be >= 2.3
-
+ note: cannot index Time64Col() or ComplexCol currently;
+ PyTables must be >= 3.0
Paramaters
----------
@@ -3203,6 +3213,12 @@ def create_index(self, columns=None, optlevel=None, kind=None):
# create the index
if not v.is_indexed:
+ if v.type.startswith('complex'):
+ raise TypeError('Columns containing complex values can be stored but cannot'
+ ' be indexed when using table format. Either use fixed '
+ 'format, set index=False, or do not include the columns '
+ 'containing complex values to data_columns when '
+ 'initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 4ae2c331f5a65..1b932fb3759e5 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -131,18 +131,18 @@ def compat_assert_produces_warning(w,f):
f()
-class TestHDFStore(tm.TestCase):
+class Base(tm.TestCase):
@classmethod
def setUpClass(cls):
- super(TestHDFStore, cls).setUpClass()
+ super(Base, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
- super(TestHDFStore, cls).tearDownClass()
+ super(Base, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
@@ -155,6 +155,9 @@ def setUp(self):
def tearDown(self):
pass
+
+class TestHDFStore(Base):
+
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
@@ -4743,6 +4746,146 @@ def test_read_nokey(self):
df.to_hdf(path, 'df2', mode='a')
self.assertRaises(ValueError, read_hdf, path)
+
+class TestHDFComplexValues(Base):
+ # GH10447
+ def test_complex_fixed(self):
+ df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ def test_complex_table(self):
+ df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', format='table')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', format='table', mode='w')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ def test_complex_mixed_fixed(self):
+ complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
+ dtype=np.complex128)
+ df = DataFrame({'A': [1, 2, 3, 4],
+ 'B': ['a', 'b', 'c', 'd'],
+ 'C': complex64,
+ 'D': complex128,
+ 'E': [1.0, 2.0, 3.0, 4.0]},
+ index=list('abcd'))
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ def test_complex_mixed_table(self):
+ complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
+ dtype=np.complex128)
+ df = DataFrame({'A': [1, 2, 3, 4],
+ 'B': ['a', 'b', 'c', 'd'],
+ 'C': complex64,
+ 'D': complex128,
+ 'E': [1.0, 2.0, 3.0, 4.0]},
+ index=list('abcd'))
+
+ with ensure_clean_store(self.path) as store:
+ store.append('df', df, data_columns=['A', 'B'])
+ result = store.select('df', where=Term('A>2'))
+ assert_frame_equal(df.loc[df.A > 2], result)
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', format='table')
+ reread = read_hdf(path, 'df')
+ assert_frame_equal(df, reread)
+
+ def test_complex_across_dimensions_fixed(self):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list('abcd'))
+ df = DataFrame({'A': s, 'B': s})
+ p = Panel({'One': df, 'Two': df})
+
+ objs = [s, df, p]
+ comps = [tm.assert_series_equal, tm.assert_frame_equal,
+ tm.assert_panel_equal]
+ for obj, comp in zip(objs, comps):
+ with ensure_clean_path(self.path) as path:
+ obj.to_hdf(path, 'obj', format='fixed')
+ reread = read_hdf(path, 'obj')
+ comp(obj, reread)
+
+ def test_complex_across_dimensions(self):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list('abcd'))
+ df = DataFrame({'A': s, 'B': s})
+ p = Panel({'One': df, 'Two': df})
+ p4d = pd.Panel4D({'i': p, 'ii': p})
+
+ objs = [df, p, p4d]
+ comps = [tm.assert_frame_equal, tm.assert_panel_equal,
+ tm.assert_panel4d_equal]
+ for obj, comp in zip(objs, comps):
+ with ensure_clean_path(self.path) as path:
+ obj.to_hdf(path, 'obj', format='table')
+ reread = read_hdf(path, 'obj')
+ comp(obj, reread)
+
+ def test_complex_indexing_error(self):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
+ dtype=np.complex128)
+ df = DataFrame({'A': [1, 2, 3, 4],
+ 'B': ['a', 'b', 'c', 'd'],
+ 'C': complex128},
+ index=list('abcd'))
+ with ensure_clean_store(self.path) as store:
+ self.assertRaises(TypeError, store.append, 'df', df, data_columns=['C'])
+
+ def test_complex_series_error(self):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list('abcd'))
+
+ with ensure_clean_path(self.path) as path:
+ self.assertRaises(TypeError, s.to_hdf, path, 'obj', format='t')
+
+ with ensure_clean_path(self.path) as path:
+ s.to_hdf(path, 'obj', format='t', index=False)
+ reread = read_hdf(path, 'obj')
+ tm.assert_series_equal(s, reread)
+
+ def test_complex_append(self):
+ df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
+ 'b': np.random.randn(100)})
+
+ with ensure_clean_store(self.path) as store:
+ store.append('df', df, data_columns=['b'])
+ store.append('df', df)
+ result = store.select('df')
+ assert_frame_equal(pd.concat([df, df], 0), result)
+
+
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
| Enable table format to be used to store complex values
in DataFrames, Panels and Panel4Ds.
Add tests for both fixed and panel.
Add exception when attempting to write Series with complex values.
closes #10447
| https://api.github.com/repos/pandas-dev/pandas/pulls/10473 | 2015-06-29T19:27:56Z | 2015-07-13T12:42:41Z | 2015-07-13T12:42:41Z | 2015-07-13T13:22:09Z |
ENH: Make group_var_ use Welford's algorithm. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2dbed08aa02f3..08222ef06d21f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -135,8 +135,11 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
+
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)
- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)
+
+- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 4cffb663748a4..53fcdb61bd1ae 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -1147,58 +1147,43 @@ def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
group_var_template = """@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
ndarray[int64_t] counts,
ndarray[%(dest_type2)s, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- %(dest_type2)s val, ct
- ndarray[%(dest_type2)s, ndim=2] nobs, sumx, sumxx
+ %(dest_type2)s val, ct, oldmean
+ ndarray[%(dest_type2)s, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -1206,8 +1191,8 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
"""
group_var_bin_template = """@cython.wraparound(False)
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index c76838c4a49c9..db0e96d158f0c 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -7232,58 +7232,43 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, ct
- ndarray[float64_t, ndim=2] nobs, sumx, sumxx
+ float64_t val, ct, oldmean
+ ndarray[float64_t, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -7291,63 +7276,48 @@ def group_var_float64(ndarray[float64_t, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
@cython.wraparound(False)
@cython.boundscheck(False)
+@cython.cdivision(True)
def group_var_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float32_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, ct
- ndarray[float32_t, ndim=2] nobs, sumx, sumxx
+ float32_t val, ct, oldmean
+ ndarray[float32_t, ndim=2] nobs, mean
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
+ mean = np.zeros_like(out)
N, K = (<object> values).shape
- with nogil:
- if K > 1:
- for i in range(N):
-
- lab = labels[i]
- if lab < 0:
- continue
+ out[:, :] = 0.0
- counts[lab] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- sumx[lab, j] += val
- sumxx[lab, j] += val * val
- else:
- for i in range(N):
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
- lab = labels[i]
- if lab < 0:
- continue
+ counts[lab] += 1
- counts[lab] += 1
- val = values[i, 0]
+ for j in range(K):
+ val = values[i, j]
# not nan
if val == val:
- nobs[lab, 0] += 1
- sumx[lab, 0] += val
- sumxx[lab, 0] += val * val
-
+ nobs[lab, j] += 1
+ oldmean = mean[lab, j]
+ mean[lab, j] += (val - oldmean) / nobs[lab, j]
+ out[lab, j] += (val - mean[lab, j]) * (val - oldmean)
for i in range(ncounts):
for j in range(K):
@@ -7355,8 +7325,8 @@ def group_var_float32(ndarray[float32_t, ndim=2] out,
if ct < 2:
out[i, j] = NAN
else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
+ out[i, j] /= (ct - 1)
+
@cython.wraparound(False)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 8192f6e99116b..138ef92831b2a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -2,6 +2,7 @@
from pandas.compat import range
import numpy as np
+from numpy.random import RandomState
from pandas.core.api import Series, Categorical
import pandas as pd
@@ -10,6 +11,7 @@
import pandas.util.testing as tm
import pandas.hashtable as hashtable
+
class TestMatch(tm.TestCase):
_multiprocess_can_split_ = True
@@ -285,6 +287,125 @@ def test_dropna(self):
pd.Series([10.3, 5., 5., None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[5., 10.3, np.nan]))
+
+class GroupVarTestMixin(object):
+
+ def test_group_var_generic_1d(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 1))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(15, 1).astype(self.dtype)
+ labels = np.tile(np.arange(5), (3, ))
+
+ expected_out = (np.squeeze(values)
+ .reshape((5, 3), order='F')
+ .std(axis=1, ddof=1) ** 2)[:, np.newaxis]
+ expected_counts = counts + 3
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_1d_flat_labels(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((1, 1))).astype(self.dtype)
+ counts = np.zeros(1, dtype=int)
+ values = 10 * prng.rand(5, 1).astype(self.dtype)
+ labels = np.zeros(5, dtype=int)
+
+ expected_out = np.array([[values.std(ddof=1) ** 2]])
+ expected_counts = counts + 5
+
+ self.algo(out, counts, values, labels)
+
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_2d_all_finite(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
+ labels = np.tile(np.arange(5), (2, ))
+
+ expected_out = np.std(
+ values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
+ expected_counts = counts + 2
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_generic_2d_some_nan(self):
+ prng = RandomState(1234)
+
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
+ counts = np.zeros(5, dtype=int)
+ values = 10 * prng.rand(10, 2).astype(self.dtype)
+ values[:, 1] = np.nan
+ labels = np.tile(np.arange(5), (2, ))
+
+ expected_out = np.vstack([
+ values[:, 0].reshape(5, 2, order='F').std(ddof=1, axis=1) ** 2,
+ np.nan * np.ones(5)
+ ]).T
+ expected_counts = counts + 2
+
+ self.algo(out, counts, values, labels)
+ np.testing.assert_allclose(out, expected_out, self.rtol)
+ tm.assert_array_equal(counts, expected_counts)
+
+ def test_group_var_constant(self):
+ # Regression test from GH 10448.
+
+ out = np.array([[np.nan]], dtype=self.dtype)
+ counts = np.array([0])
+ values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
+ labels = np.zeros(3, dtype=np.int)
+
+ self.algo(out, counts, values, labels)
+
+ self.assertEqual(counts[0], 3)
+ self.assertTrue(out[0, 0] >= 0) # Python 2.6 has no assertGreaterEqual
+ tm.assert_almost_equal(out[0, 0], 0.0)
+
+
+class TestGroupVarFloat64(tm.TestCase, GroupVarTestMixin):
+ __test__ = True
+ _multiprocess_can_split_ = True
+
+ algo = algos.algos.group_var_float64
+ dtype = np.float64
+ rtol = 1e-5
+
+ def test_group_var_large_inputs(self):
+
+ prng = RandomState(1234)
+
+ out = np.array([[np.nan]], dtype=self.dtype)
+ counts = np.array([0])
+ values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
+ values.shape = (10 ** 6, 1)
+ labels = np.zeros(10 ** 6, dtype=np.int)
+
+ self.algo(out, counts, values, labels)
+
+ self.assertEqual(counts[0], 10 ** 6)
+ tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
+
+
+class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin):
+ __test__ = True
+ _multiprocess_can_split_ = True
+
+ algo = algos.algos.group_var_float32
+ dtype = np.float32
+ rtol = 1e-2
+
+
def test_quantile():
s = Series(np.random.randn(100))
| closes #10448
This PR reimplements the Cython functions `group_var_float64` and `group_var_float32` to use Welford's algorithm, rather than the sum-of-squares method, which is numerically unstable. This came up in #10448; see also #10242 for more discussion.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10472 | 2015-06-29T17:23:27Z | 2015-07-08T14:19:41Z | 2015-07-08T14:19:41Z | 2015-07-08T21:19:54Z |
BUG: Fix csv_read bugs when using empty input. GH10467 & GH10413 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index da16734dc873b..6b29854144456 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -134,3 +134,5 @@ Bug Fixes
- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)
- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)
+
+- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index dd972150de0fe..7d4c9df64c0bb 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2223,13 +2223,14 @@ def _clean_index_names(columns, index_col):
def _get_empty_meta(columns, index_col, index_names):
columns = list(columns)
- if index_col is not None:
- index = MultiIndex.from_arrays([[]] * len(index_col),
- names=index_names)
- for n in index_col:
- columns.pop(n)
- else:
+ if index_col is None or index_col is False:
index = Index([])
+ else:
+ index_col = list(index_col)
+ index = MultiIndex.from_arrays([[]] * len(index_col), names=index_names)
+ index_col.sort()
+ for i, n in enumerate(index_col):
+ columns.pop(n-i)
return index, columns, {}
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 52430bb6e0999..3dae9f383db8f 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2301,6 +2301,81 @@ def test_empty_with_index(self):
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
+ def test_emtpy_with_multiindex(self):
+ # GH 10467
+ data = 'x,y,z'
+ result = self.read_csv(StringIO(data), index_col=['x', 'y'])
+ expected = DataFrame([], columns=['z'],
+ index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_with_reversed_multiindex(self):
+ data = 'x,y,z'
+ result = self.read_csv(StringIO(data), index_col=[1, 0])
+ expected = DataFrame([], columns=['z'],
+ index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
+ tm.assert_frame_equal(result, expected)
+
+ def test_empty_index_col_scenarios(self):
+ data = 'x,y,z'
+
+ # None, no index
+ index_col, expected = None, DataFrame([], columns=list('xyz')),
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # False, no index
+ index_col, expected = False, DataFrame([], columns=list('xyz')),
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # int, first column
+ index_col, expected = 0, DataFrame([], columns=['y', 'z'], index=Index([], name='x'))
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # int, not first column
+ index_col, expected = 1, DataFrame([], columns=['x', 'z'], index=Index([], name='y'))
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # str, first column
+ index_col, expected = 'x', DataFrame([], columns=['y', 'z'], index=Index([], name='x'))
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # str, not the first column
+ index_col, expected = 'y', DataFrame([], columns=['x', 'z'], index=Index([], name='y'))
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # list of int
+ index_col, expected = [0, 1], DataFrame([], columns=['z'],
+ index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # list of str
+ index_col, expected = (
+ ['x', 'y'],
+ DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
+ )
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # list of int, reversed sequence
+ index_col, expected = (
+ [1, 0],
+ DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
+ )
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ # list of str, reversed sequence
+ index_col, expected = (
+ ['y', 'x'],
+ DataFrame([], columns=['z'], index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
+ )
+ tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected)
+
+ def test_empty_with_index_col_false(self):
+ # GH 10413
+ data = 'x,y'
+ result = self.read_csv(StringIO(data), index_col=False)
+ expected = DataFrame([], columns=['x', 'y'])
+ tm.assert_frame_equal(result, expected)
+
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
| closes #10413
closes #10467
| https://api.github.com/repos/pandas-dev/pandas/pulls/10469 | 2015-06-29T13:45:00Z | 2015-07-07T09:30:45Z | 2015-07-07T09:30:45Z | 2015-07-07T09:30:45Z |
ENH: Add pipe method to GroupBy (fixes #10353) | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index b5a382ce24342..42f49b85bb481 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -1002,6 +1002,67 @@ See the :ref:`visualization documentation<visualization.box>` for more.
to ``df.boxplot(by="g")``. See :ref:`here<visualization.box.return>` for
an explanation.
+
+.. _groupby.pipe:
+
+Piping function calls
+~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.17.0
+
+Similar to the funcionality provided by ``DataFrames`` and ``Series``, functions
+that take ``GroupBy`` objects can be chained together using a ``pipe`` method to
+allow for a cleaner, more readable syntax.
+
+Imagine that one had functions f, g, and h that each takes a ``DataFrameGroupBy``
+as well as a single argument and returns a ``DataFrameGroupBy``, and one wanted
+to apply these functions in succession to a grouped DataFrame. Instead of having
+to deeply compose these functions and their arguments, such as:
+
+.. code-block:: python
+
+ >>> h(g(f(df.groupby('group'), arg1), arg2), arg4)
+
+one can write the following:
+
+.. code-block:: python
+
+ >>> (df
+ .groupby('group')
+ .pipe(f, arg1)
+ .pipe(g, arg2)
+ .pipe(h, arg3))
+
+For a more concrete example, imagine one wanted to group a DataFrame by column
+'A' and the user wanted to take the square of the difference between the maximum
+value of 'B' in each group and the overal minimum value of 'C' (across all
+groups). One could write this as a pipeline of functions applied to the original
+dataframe:
+
+.. code-block:: python
+
+ def f(dfgb):
+ """
+ Take a DataFrameGroupBy and return a Series
+ where each value corresponds to the maximum
+ value of column 'B' in each group minus the
+ global minimum of column 'C'.
+ """
+ return dfgb.B.max() - dfgb.C.min().min()
+
+ def square(srs):
+ """
+ Take a Series and transform it by
+ squaring each value.
+ """
+ return srs ** 2
+
+ res = df.groupby('A').pipe(f).pipe(square)
+
+
+For more details on pipeline functionality, see :ref:`here<basics.pipe>`.
+
+
Examples
--------
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3b3bf8cffe41b..9fc50219d0bc9 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -468,6 +468,9 @@ Other enhancements
- ``pd.read_csv`` can now read bz2-compressed files incrementally, and the C parser can read bz2-compressed files from AWS S3 (:issue:`11070`, :issue:`11072`).
+- ``GroupBy`` objects now have a ``pipe`` method, similar to the one on ``DataFrame`` and ``Series`` that allow for functions that take a ``GroupBy`` to be composed in a clean, readable syntax. See the :ref:`documentation <groupby.pipe>` for more.
+
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6aec297c31d2b..7c33342ef21a2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -26,6 +26,7 @@
AbstractMethodError)
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, Substitution, deprecate_kwarg
+from pandas.tools.util import _pipe
from pandas.core import config
@@ -2169,7 +2170,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
-----
Use ``.pipe`` when chaining together functions that expect
- on Series or DataFrames. Instead of writing
+ on Series, DataFrames, or GroupBys. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
@@ -2191,6 +2192,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
See Also
--------
+ pandas.GroupBy.pipe
pandas.DataFrame.apply
pandas.DataFrame.applymap
pandas.Series.map
@@ -2198,15 +2200,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
)
@Appender(_shared_docs['pipe'] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
- if isinstance(func, tuple):
- func, target = func
- if target in kwargs:
- msg = '%s is both the pipe target and a keyword argument' % target
- raise ValueError(msg)
- kwargs[target] = self
- return func(*args, **kwargs)
- else:
- return func(self, *args, **kwargs)
+ return _pipe(self, func, *args, **kwargs)
#----------------------------------------------------------------------
# Attribute access
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index f34fd6e3d2575..21c70d80a9bc5 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -14,13 +14,14 @@
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
-from pandas.core.generic import NDFrame
+from pandas.core.generic import NDFrame, _pipe
from pandas.core.index import Index, MultiIndex, CategoricalIndex, _ensure_index
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import (cache_readonly, Appender, make_signature,
deprecate_kwarg)
+from pandas.tools.util import _pipe
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
@@ -1076,6 +1077,59 @@ def tail(self, n=5):
tail = obj[in_tail]
return tail
+ def pipe(self, func, *args, **kwargs):
+ """ Apply a function with arguments to this GroupBy object
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ func : callable or tuple of (callable, string)
+ Function to apply to this GroupBy or, alternatively, a
+ ``(callable, data_keyword)`` tuple where ``data_keyword`` is a
+ string indicating the keyword of `callable`` that expects the
+ %(klass)s.
+ args : iterable, optional
+ positional arguments passed into ``func``.
+ kwargs : any, dictionary
+ a dictionary of keyword arguments passed into ``func``.
+
+ Returns
+ -------
+ object : the return type of ``func``.
+
+ Notes
+ -----
+
+ Use ``.pipe`` when chaining together functions that expect
+ a GroupBy, or when alternating between functions that take
+ a DataFrame and a GroupBy.
+
+ Assuming that one has a function f that takes and returns
+ a DataFrameGroupBy, a function g that takes a DataFrameGroupBy
+ and returns a DataFrame, and a function h that takes a DataFrame,
+ instead of having to write:
+
+ >>> f(g(h(df.groupby('group')), arg1=a), arg2=b, arg3=c)
+
+ You can write
+
+ >>> (df
+ ... .groupby('group')
+ ... .pipe(f, arg1)
+ ... .pipe(g, arg2)
+ ... .pipe(h, arg3))
+
+
+ See Also
+ --------
+ pandas.Series.pipe
+ pandas.DataFrame.pipe
+ pandas.GroupBy.apply
+ """
+ return _pipe(self, func, *args, **kwargs)
+
+
def _cumcount_array(self, arr=None, ascending=True):
"""
arr is where cumcount gets its values from
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 97b57690ccc49..752e0ed515cd3 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -5159,7 +5159,7 @@ def test_tab_completion(self):
'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
'all', 'shift', 'skew', 'bfill', 'ffill',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
- 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
+ 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin', 'pipe'
])
self.assertEqual(results, expected)
@@ -5467,6 +5467,7 @@ def test_func(x):
expected = DataFrame()
tm.assert_frame_equal(result, expected)
+
def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
@@ -5512,6 +5513,66 @@ def test_sort(x):
g.apply(test_sort)
+ def test_pipe(self):
+ # Test the pipe method of DataFrameGroupBy.
+ # Issue #10353
+
+ random_state = np.random.RandomState(1234567890)
+
+ df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B': random_state.randn(8),
+ 'C': random_state.randn(8)})
+
+ def f(dfgb):
+ return dfgb.B.max() - dfgb.C.min().min()
+
+ def square(srs):
+ return srs ** 2
+
+ # Note that the transformations are
+ # GroupBy -> Series
+ # Series -> Series
+ # This then chains the GroupBy.pipe and the
+ # NDFrame.pipe methods
+ res = df.groupby('A').pipe(f).pipe(square)
+
+ index = Index([u'bar', u'foo'], dtype='object', name=u'A')
+ expected = pd.Series([8.99110003361, 8.17516964785], name='B', index=index)
+
+ assert_series_equal(expected, res)
+
+
+ def test_pipe_args(self):
+ # Test passing args to the pipe method of DataFrameGroupBy.
+ # Issue #10353
+
+ df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
+ 'x': [1.0, 2.0, 3.0, 2.0, 5.0],
+ 'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
+
+ def f(dfgb, arg1):
+ return dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False).groupby(dfgb.grouper)
+
+ def g(dfgb, arg2):
+ return dfgb.sum() / dfgb.sum().sum() + arg2
+
+ def h(df, arg3):
+ return df.x + df.y - arg3
+
+ res = (df
+ .groupby('group')
+ .pipe(f, 0)
+ .pipe(g, 10)
+ .pipe(h, 100))
+
+ # Assert the results here
+ index = pd.Index(['A', 'B', 'C'], name='group')
+ expected = pd.Series([-79.5160891089, -78.4839108911, None], index=index)
+
+ assert_series_equal(expected, res)
+
+
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index 0bb6b4b7f7892..54ddfd13edf70 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -48,3 +48,25 @@ def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
+
+
+def _pipe(obj, func, *args, **kwargs):
+ """
+ Apply a function to a obj either by
+ passing the obj as the first argument
+ to the function or, in the case that
+ the func is a tuple, interpret the first
+ element of the tuple as a function and
+ pass the obj to that function as a keyword
+ arguemnt whose key is the value of the
+ second element of the tuple
+ """
+ if isinstance(func, tuple):
+ func, target = func
+ if target in kwargs:
+ msg = '%s is both the pipe target and a keyword argument' % target
+ raise ValueError(msg)
+ kwargs[target] = obj
+ return func(*args, **kwargs)
+ else:
+ return func(obj, *args, **kwargs)
| closes #10353, extends the "pipe" method to a GroupBy object to allow one to chain it with NDFrame.pipe calls
- Moves the functionality of "pipe" from NDFrame into generics._pipe to avoid code duplication
- Leverages this in GroupBy object
- Adds unit test
| https://api.github.com/repos/pandas-dev/pandas/pulls/10466 | 2015-06-28T19:51:19Z | 2015-12-06T19:09:55Z | null | 2022-10-13T00:16:40Z |
BUG: MultiIndex.get_level_values created from Categorical raises AttributeError | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 14e185b5b2a26..389a81c22489e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -94,7 +94,7 @@ Bug Fixes
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
-
+- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`)
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 0f3a4adb0b33a..f5383463cc578 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -4238,7 +4238,7 @@ def get_level_values(self, level):
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
- filled = com.take_1d(unique.values, labels, fill_value=unique._na_value)
+ filled = com.take_1d(unique.get_values(), labels, fill_value=unique._na_value)
values = unique._simple_new(filled, self.names[num],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 576c5c6be890d..3ea52a4abfe01 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3476,6 +3476,13 @@ def test_groupby_categorical(self):
expected.index.names = ['myfactor', None]
assert_frame_equal(desc_result, expected)
+ # GH 10460
+ exp = CategoricalIndex(['foo'] * 8 + ['bar'] * 8 + ['baz'] * 8 + ['qux'] * 8,
+ name='myfactor')
+ self.assert_index_equal(desc_result.index.get_level_values(0), exp)
+ exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
+ self.assert_index_equal(desc_result.index.get_level_values(1), exp)
+
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 5cbe49a1decbf..74fc6d666e447 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -3567,6 +3567,16 @@ def test_get_level_values_na(self):
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
+ # GH 10460
+ index = MultiIndex(levels=[CategoricalIndex(['A', 'B']),
+ CategoricalIndex([1, 2, 3])],
+ labels=[np.array([0, 0, 0, 1, 1, 1]),
+ np.array([0, 1, 2, 0, 1, 2])])
+ exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
+ self.assert_index_equal(index.get_level_values(0), exp)
+ exp = CategoricalIndex([1, 2 ,3, 1, 2, 3])
+ self.assert_index_equal(index.get_level_values(1), exp)
+
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
| Closes #10460.
xref: #10464.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10465 | 2015-06-28T11:22:03Z | 2015-06-28T13:18:30Z | null | 2015-06-28T13:22:06Z |
BUG: Series.map using categorical Series raises AttributeError | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 2e03cbbea2f70..ec575d06e82f2 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -124,8 +124,8 @@ Bug Fixes
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
-
-
+- Bug in ``Series.map`` using categorical ``Series`` raises ``AttributeError`` (:issue:`10324`)
+- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`)
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 62721587e0828..773ecea8f2712 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -782,6 +782,11 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
+
+ if is_categorical(arr):
+ return arr.take_nd(indexer, fill_value=fill_value,
+ allow_fill=allow_fill)
+
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c6eb99985dc60..a327233e09003 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3476,6 +3476,13 @@ def test_groupby_categorical(self):
expected.index.names = ['myfactor', None]
assert_frame_equal(desc_result, expected)
+ # GH 10460
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, name='myfactor', ordered=True)
+ exp = CategoricalIndex(expc, name='myfactor')
+ self.assert_index_equal(desc_result.index.get_level_values(0), exp)
+ exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
+ self.assert_index_equal(desc_result.index.get_level_values(1), exp)
+
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
@@ -3488,7 +3495,8 @@ def test_groupby_datetime_categorical(self):
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
- expected.index = CategoricalIndex(expected.index,categories=expected.index,name='myfactor',ordered=True)
+ expected.index = CategoricalIndex(expected.index, categories=expected.index,
+ name='myfactor', ordered=True)
assert_frame_equal(result, expected)
self.assertEqual(result.index.name, cats.name)
@@ -3503,6 +3511,14 @@ def test_groupby_datetime_categorical(self):
expected.index.names = ['myfactor', None]
assert_frame_equal(desc_result, expected)
+ # GH 10460
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, name='myfactor', ordered=True)
+ exp = CategoricalIndex(expc, name='myfactor')
+ self.assert_index_equal(desc_result.index.get_level_values(0), exp)
+ exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
+ self.assert_index_equal(desc_result.index.get_level_values(1), exp)
+
+
def test_groupby_categorical_index(self):
levels = ['foo', 'bar', 'baz', 'qux']
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index d84c813b2b898..a69db34bdd2df 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -3534,6 +3534,16 @@ def test_get_level_values(self):
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
+ # GH 10460
+ index = MultiIndex(levels=[CategoricalIndex(['A', 'B']),
+ CategoricalIndex([1, 2, 3])],
+ labels=[np.array([0, 0, 0, 1, 1, 1]),
+ np.array([0, 1, 2, 0, 1, 2])])
+ exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
+ self.assert_index_equal(index.get_level_values(0), exp)
+ exp = CategoricalIndex([1, 2 ,3, 1, 2, 3])
+ self.assert_index_equal(index.get_level_values(1), exp)
+
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 1e6fa68f1c85b..f3626488301b9 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5753,6 +5753,35 @@ def test_map(self):
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
+ # GH 10324
+ a = Series([1, 2, 3, 4])
+ b = Series(["even", "odd", "even", "odd"], dtype="category")
+ c = Series(["even", "odd", "even", "odd"])
+
+ exp = Series(["odd", "even", "odd", np.nan], dtype="category")
+ self.assert_series_equal(a.map(b), exp)
+ exp = Series(["odd", "even", "odd", np.nan])
+ self.assert_series_equal(a.map(c), exp)
+
+ a = Series(['a', 'b', 'c', 'd'])
+ b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
+ c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
+
+ exp = Series([np.nan, 1, 2, 3])
+ self.assert_series_equal(a.map(b), exp)
+ exp = Series([np.nan, 1, 2, 3])
+ self.assert_series_equal(a.map(c), exp)
+
+ a = Series(['a', 'b', 'c', 'd'])
+ b = Series(['B', 'C', 'D', 'E'], dtype='category',
+ index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
+ c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
+
+ exp = Series([np.nan, 'B', 'C', 'D'], dtype='category')
+ self.assert_series_equal(a.map(b), exp)
+ exp = Series([np.nan, 'B', 'C', 'D'])
+ self.assert_series_equal(a.map(c), exp)
+
def test_map_compat(self):
# related GH 8024
s = Series([True,True,False],index=[1,2,3])
| Closes #10324. Closes #10460.
Based on #9848, using `.get_values` should be avoided?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10464 | 2015-06-28T11:03:24Z | 2015-07-01T13:46:10Z | 2015-07-01T13:46:10Z | 2015-07-01T13:47:52Z |
BUG: #10445 cannot add DataFrame to empty Panel | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index b2a1e10469a0f..7285e88b25ded 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -351,6 +351,7 @@ Bug Fixes
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
+- Bug in adding a `DataFrame` to an empty `Panel` (:issue:`10445`)
- Bug in ``io.sql.get_schema`` when specifying multiple columns as primary
key (:issue:`10385`).
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index bc342d5919bb8..888c12e25da20 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -565,7 +565,9 @@ def _box_item_values(self, key, values):
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
+ self._ensure_valid_major_minor_axes(value)
shape = tuple(self.shape)
+
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
@@ -586,6 +588,25 @@ def __setitem__(self, key, value):
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
+ def _ensure_valid_major_minor_axes(self, value):
+ """Ensure that we add a minor or major axis if it's missing. We can
+ create these from the passed value. This may happen if the user
+ initializes an empty panel before adding DataFrames to it.
+ (GH #10445)
+ """
+ if len(self.major_axis) == 0 or len(self.minor_axis) == 0:
+ try:
+ value = DataFrame(value)
+ except PandasError:
+ raise ValueError("Cannot set a panel when either the major "
+ "axis or minor axis is missing and given a "
+ "value that can't be converted to a DataFrame")
+
+ if len(self.major_axis) == 0:
+ self._data = self._data.reindex_axis(value.index.copy(), axis=1)
+ if len(self.minor_axis) == 0:
+ self._data = self._data.reindex_axis(value.columns.copy(), axis=2)
+
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 9cdc769dd7d74..ff13051e38d25 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -907,6 +907,17 @@ def test_constructor(self):
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
+ # test creating empty panel and then adding DataFrame (GH # 10445)
+ panel = Panel()
+ df = DataFrame(np.array([[1, 2], [3, 4]]))
+ panel['item1'] = df
+ tm.assert_frame_equal(panel['item1'], df)
+
+ panel = Panel()
+ invalid_input = 'blablabla'
+ with self.assertRaises(ValueError):
+ panel['item1'] = invalid_input
+
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
| closes #10445
I based this bugfix off how DataFrame handles the analogous situation ‒ being intialized to empty and then having a series added (cf. [DataFrame._ensure_valid_index](https://github.com/pydata/pandas/blob/master/pandas/core/frame.py#L2167))
| https://api.github.com/repos/pandas-dev/pandas/pulls/10462 | 2015-06-28T06:11:29Z | 2015-10-11T16:00:58Z | null | 2022-10-13T00:16:40Z |
TST/CLN: remove assert_isinstance | diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index e898d699ff2fd..26f4d65978fa0 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -424,27 +424,27 @@ def test_reader_converters(self):
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', converters=converters)
tm.assert_frame_equal(actual, expected)
-
+
def test_reading_all_sheets(self):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
-
+
_skip_if_no_xlrd()
-
+
dfs = read_excel(self.multisheet,sheetname=None)
expected_keys = ['Alpha','Beta','Charlie']
tm.assert_contains_all(expected_keys,dfs.keys())
def test_reading_multiple_specific_sheets(self):
- # Test reading specific sheetnames by specifying a mixed list
+ # Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
-
+
# Ensure a dict is returned
# See PR #9450
_skip_if_no_xlrd()
-
+
#Explicitly request duplicates. Only the set should be returned.
expected_keys = [2,'Charlie','Charlie']
dfs = read_excel(self.multisheet,sheetname=expected_keys)
@@ -456,19 +456,19 @@ def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450
-
+
_skip_if_no_xlrd()
_skip_if_no_xlwt()
-
+
def tdf(sheetname):
d, i = [11,22,33], [1,2,3]
return DataFrame(d,i,columns=[sheetname])
-
+
sheets = ['AAA','BBB','CCC']
-
+
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets,dfs))
-
+
with ensure_clean('.xlsx') as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
@@ -476,7 +476,7 @@ def tdf(sheetname):
dfs_returned = pd.read_excel(pth,sheetname=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s],dfs_returned[s])
-
+
def test_reader_seconds(self):
# Test reading times with and without milliseconds. GH5945.
_skip_if_no_xlrd()
@@ -1575,12 +1575,12 @@ def test_ExcelWriter_dispatch(self):
with ensure_clean('.xlsx') as path:
writer = ExcelWriter(path)
- tm.assert_isinstance(writer, writer_klass)
+ tm.assertIsInstance(writer, writer_klass)
_skip_if_no_xlwt()
with ensure_clean('.xls') as path:
writer = ExcelWriter(path)
- tm.assert_isinstance(writer, _XlwtWriter)
+ tm.assertIsInstance(writer, _XlwtWriter)
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
@@ -1608,7 +1608,7 @@ def check_called(func):
register_writer(DummyClass)
writer = ExcelWriter('something.test')
- tm.assert_isinstance(writer, DummyClass)
+ tm.assertIsInstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index c162bd7c50f5a..fca9e1c4e47ca 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -159,12 +159,12 @@ def test_spam_with_types(self):
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
- tm.assert_isinstance(df, DataFrame)
+ tm.assertIsInstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
- tm.assert_isinstance(df, DataFrame)
+ tm.assertIsInstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=1)[0]
@@ -307,9 +307,9 @@ def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url), 'First',
attrs={'id': 'table'})
- tm.assert_isinstance(dfs, list)
+ tm.assertIsInstance(dfs, list)
for df in dfs:
- tm.assert_isinstance(df, DataFrame)
+ tm.assertIsInstance(df, DataFrame)
@slow
def test_invalid_table_attrs(self):
@@ -325,34 +325,34 @@ def _bank_data(self, *args, **kwargs):
@slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
- tm.assert_isinstance(df.columns, MultiIndex)
+ tm.assertIsInstance(df.columns, MultiIndex)
@slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
- tm.assert_isinstance(df.index, MultiIndex)
+ tm.assertIsInstance(df.index, MultiIndex)
@slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
- tm.assert_isinstance(df.columns, MultiIndex)
- tm.assert_isinstance(df.index, MultiIndex)
+ tm.assertIsInstance(df.columns, MultiIndex)
+ tm.assertIsInstance(df.index, MultiIndex)
@slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1, tupleize_cols=True)[0]
- tm.assert_isinstance(df.columns, Index)
+ tm.assertIsInstance(df.columns, Index)
@slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
- tm.assert_isinstance(df.columns, MultiIndex)
+ tm.assertIsInstance(df.columns, MultiIndex)
@slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
- tm.assert_isinstance(df.index, MultiIndex)
- tm.assert_isinstance(df.columns, MultiIndex)
+ tm.assertIsInstance(df.index, MultiIndex)
+ tm.assertIsInstance(df.columns, MultiIndex)
@slow
def test_regex_idempotency(self):
@@ -360,9 +360,9 @@ def test_regex_idempotency(self):
dfs = self.read_html(file_path_to_url(url),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
- tm.assert_isinstance(dfs, list)
+ tm.assertIsInstance(dfs, list)
for df in dfs:
- tm.assert_isinstance(df, DataFrame)
+ tm.assertIsInstance(df, DataFrame)
def test_negative_skiprows(self):
with tm.assertRaisesRegexp(ValueError,
@@ -426,10 +426,10 @@ def test_empty_tables(self):
res1 = self.read_html(StringIO(data1))
res2 = self.read_html(StringIO(data2))
assert_framelist_equal(res1, res2)
-
+
def test_tfoot_read(self):
"""
- Make sure that read_html reads tfoot, containing td or th.
+ Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
@@ -452,10 +452,10 @@ def test_tfoot_read(self):
data1 = data_template.format(footer = "")
data2 = data_template.format(footer ="<tr><td>footA</td><th>footB</th></tr>")
-
+
d1 = {'A': ['bodyA'], 'B': ['bodyB']}
d2 = {'A': ['bodyA', 'footA'], 'B': ['bodyB', 'footB']}
-
+
tm.assert_frame_equal(self.read_html(data1)[0], DataFrame(d1))
tm.assert_frame_equal(self.read_html(data2)[0], DataFrame(d2))
@@ -721,8 +721,8 @@ def test_data_fail(self):
def test_works_on_valid_markup(self):
filename = os.path.join(DATA_PATH, 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
- tm.assert_isinstance(dfs, list)
- tm.assert_isinstance(dfs[0], DataFrame)
+ tm.assertIsInstance(dfs, list)
+ tm.assertIsInstance(dfs[0], DataFrame)
@slow
def test_fallback_success(self):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 4c040252ee3cb..52430bb6e0999 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -276,7 +276,7 @@ def test_squeeze(self):
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
@@ -1016,7 +1016,7 @@ def test_parse_dates_column_list(self):
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
- tm.assert_isinstance(expected['aux_date'][0], datetime)
+ tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
@@ -1117,7 +1117,7 @@ def test_read_csv_infer_compression(self):
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
- tm.assert_isinstance(df1[0].values[0], compat.text_type)
+ tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
@@ -1300,7 +1300,7 @@ def test_iterator(self):
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
- tm.assert_isinstance(treader, TextFileReader)
+ tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
@@ -1601,7 +1601,7 @@ def test_converters(self):
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
- tm.assert_isinstance(result['D'][0], (datetime, Timestamp))
+ tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
@@ -2727,7 +2727,7 @@ def test_iterator(self):
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
- tm.assert_isinstance(treader, TextFileReader)
+ tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py
index 5227bb23ad616..4ffc0b98ebc71 100644
--- a/pandas/sparse/tests/test_array.py
+++ b/pandas/sparse/tests/test_array.py
@@ -129,19 +129,19 @@ def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
- tm.assert_isinstance(res, SparseArray)
+ tm.assertIsInstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
- tm.assert_isinstance(res2, SparseArray)
+ tm.assertIsInstance(res2, SparseArray)
assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
- tm.assert_isinstance(res3, SparseArray)
+ tm.assertIsInstance(res3, SparseArray)
assert_sp_array_equal(res, res3)
res4 = op(first, 4)
- tm.assert_isinstance(res4, SparseArray)
+ tm.assertIsInstance(res4, SparseArray)
# ignore this if the actual op raises (e.g. pow)
try:
diff --git a/pandas/sparse/tests/test_libsparse.py b/pandas/sparse/tests/test_libsparse.py
index cd68d264e6bf9..440f4ffb46cb5 100644
--- a/pandas/sparse/tests/test_libsparse.py
+++ b/pandas/sparse/tests/test_libsparse.py
@@ -287,7 +287,7 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
# see if survive the round trip
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
- tm.assert_isinstance(xbindex, BlockIndex)
+ tm.assertIsInstance(xbindex, BlockIndex)
self.assertTrue(xbindex.equals(xindex))
self.assertTrue(ybindex.equals(yindex))
check_cases(_check_case)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index b506758355228..788e4bd7ef80a 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -230,9 +230,9 @@ def test_to_dense_preserve_name(self):
def test_constructor(self):
# test setup guys
self.assertTrue(np.isnan(self.bseries.fill_value))
- tm.assert_isinstance(self.bseries.sp_index, BlockIndex)
+ tm.assertIsInstance(self.bseries.sp_index, BlockIndex)
self.assertTrue(np.isnan(self.iseries.fill_value))
- tm.assert_isinstance(self.iseries.sp_index, IntIndex)
+ tm.assertIsInstance(self.iseries.sp_index, IntIndex)
self.assertEqual(self.zbseries.fill_value, 0)
assert_equal(self.zbseries.values.values,
@@ -258,7 +258,7 @@ def _check_const(sparse, name):
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
- tm.assert_isinstance(s5, SparseTimeSeries)
+ tm.assertIsInstance(s5, SparseTimeSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
@@ -404,13 +404,13 @@ def test_set_value(self):
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
- tm.assert_isinstance(res, SparseSeries)
+ tm.assertIsInstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
assert_sp_series_equal(res, expected)
res = self.bseries[:5]
- tm.assert_isinstance(res, SparseSeries)
+ tm.assertIsInstance(res, SparseSeries)
assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
@@ -756,13 +756,13 @@ def test_shift(self):
def test_cumsum(self):
result = self.bseries.cumsum()
expected = self.bseries.to_dense().cumsum()
- tm.assert_isinstance(result, SparseSeries)
+ tm.assertIsInstance(result, SparseSeries)
self.assertEqual(result.name, self.bseries.name)
assert_series_equal(result.to_dense(), expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
assert_series_equal(result, expected)
def test_combine_first(self):
@@ -957,7 +957,7 @@ def test_as_matrix(self):
def test_copy(self):
cp = self.frame.copy()
- tm.assert_isinstance(cp, SparseDataFrame)
+ tm.assertIsInstance(cp, SparseDataFrame)
assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
@@ -966,9 +966,9 @@ def test_copy(self):
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
- tm.assert_isinstance(series, SparseSeries)
+ tm.assertIsInstance(series, SparseSeries)
- tm.assert_isinstance(self.iframe['A'].sp_index, IntIndex)
+ tm.assertIsInstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEqual(self.zframe['A'].fill_value, 0)
@@ -978,7 +978,7 @@ def test_constructor(self):
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
- tm.assert_isinstance(series, SparseSeries)
+ tm.assertIsInstance(series, SparseSeries)
# construct from nested dict
data = {}
@@ -1047,9 +1047,9 @@ def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
- tm.assert_isinstance(x, SparseSeries)
+ tm.assertIsInstance(x, SparseSeries)
df = SparseDataFrame(x)
- tm.assert_isinstance(df, SparseDataFrame)
+ tm.assertIsInstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
@@ -1098,13 +1098,13 @@ def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
- tm.assert_isinstance(sdf, SparseDataFrame)
+ tm.assertIsInstance(sdf, SparseDataFrame)
self.assertTrue(np.isnan(sdf.default_fill_value))
- tm.assert_isinstance(sdf['A'].sp_index, BlockIndex)
+ tm.assertIsInstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
- tm.assert_isinstance(sdf['A'].sp_index, IntIndex)
+ tm.assertIsInstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
@@ -1172,7 +1172,7 @@ def _compare_to_dense(a, b, da, db, op):
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
- tm.assert_isinstance(mixed_result, SparseDataFrame)
+ tm.assertIsInstance(mixed_result, SparseDataFrame)
assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
@@ -1220,7 +1220,7 @@ def test_op_corners(self):
self.assertTrue(empty.empty)
foo = self.frame + self.empty
- tm.assert_isinstance(foo.index, DatetimeIndex)
+ tm.assertIsInstance(foo.index, DatetimeIndex)
assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
@@ -1304,7 +1304,7 @@ def _check_frame(frame):
# insert SparseSeries
frame['E'] = frame['A']
- tm.assert_isinstance(frame['E'], SparseSeries)
+ tm.assertIsInstance(frame['E'], SparseSeries)
assert_sp_series_equal(frame['E'], frame['A'], check_names=False)
# insert SparseSeries differently-indexed
@@ -1318,7 +1318,7 @@ def _check_frame(frame):
# insert Series
frame['F'] = frame['A'].to_dense()
- tm.assert_isinstance(frame['F'], SparseSeries)
+ tm.assertIsInstance(frame['F'], SparseSeries)
assert_sp_series_equal(frame['F'], frame['A'], check_names=False)
# insert Series differently-indexed
@@ -1331,7 +1331,7 @@ def _check_frame(frame):
# insert ndarray
frame['H'] = np.random.randn(N)
- tm.assert_isinstance(frame['H'], SparseSeries)
+ tm.assertIsInstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
@@ -1407,7 +1407,7 @@ def test_append(self):
def test_apply(self):
applied = self.frame.apply(np.sqrt)
- tm.assert_isinstance(applied, SparseDataFrame)
+ tm.assertIsInstance(applied, SparseDataFrame)
assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
@@ -1415,7 +1415,7 @@ def test_apply(self):
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
- tm.assert_isinstance(broadcasted, SparseDataFrame)
+ tm.assertIsInstance(broadcasted, SparseDataFrame)
assert_frame_equal(broadcasted.to_dense(),
self.frame.to_dense().apply(np.sum, broadcast=True))
@@ -1443,7 +1443,7 @@ def test_apply_nonuq(self):
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
- tm.assert_isinstance(result, SparseDataFrame)
+ tm.assertIsInstance(result, SparseDataFrame)
def test_astype(self):
self.assertRaises(Exception, self.frame.astype, np.int64)
@@ -1635,7 +1635,7 @@ def test_count(self):
def test_cumsum(self):
result = self.frame.cumsum()
expected = self.frame.to_dense().cumsum()
- tm.assert_isinstance(result, SparseDataFrame)
+ tm.assertIsInstance(result, SparseDataFrame)
assert_frame_equal(result.to_dense(), expected)
def _check_all(self, check_func):
@@ -1794,9 +1794,9 @@ def test_from_dict(self):
def test_pickle(self):
def _test_roundtrip(panel):
result = self.round_trip_pickle(panel)
- tm.assert_isinstance(result.items, Index)
- tm.assert_isinstance(result.major_axis, Index)
- tm.assert_isinstance(result.minor_axis, Index)
+ tm.assertIsInstance(result.items, Index)
+ tm.assertIsInstance(result.major_axis, Index)
+ tm.assertIsInstance(result.minor_axis, Index)
assert_sp_panel_equal(panel, result)
_test_roundtrip(self.panel)
@@ -1804,7 +1804,7 @@ def _test_roundtrip(panel):
def test_dense_to_sparse(self):
wp = Panel.from_dict(self.data_dict)
dwp = wp.to_sparse()
- tm.assert_isinstance(dwp['ItemA']['A'], SparseSeries)
+ tm.assertIsInstance(dwp['ItemA']['A'], SparseSeries)
def test_to_dense(self):
dwp = self.panel.to_dense()
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 445530bc5b00c..a0e4d5663fde9 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -574,7 +574,7 @@ def _check_structures(self, func, static_comp,
fill_value=None):
series_result = func(self.series, 50)
- tm.assert_isinstance(series_result, Series)
+ tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
@@ -782,7 +782,7 @@ def _check_ew_ndarray(self, func, preserve_nan=False):
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
- tm.assert_isinstance(series_result, Series)
+ tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
@@ -1844,7 +1844,7 @@ def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
def _check_expanding_structures(self, func):
series_result = func(self.series)
- tm.assert_isinstance(series_result, Series)
+ tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py
index 5c8d47ec2a82a..60e976f09365b 100644
--- a/pandas/stats/tests/test_ols.py
+++ b/pandas/stats/tests/test_ols.py
@@ -41,7 +41,7 @@ def _check_repr(obj):
def _compare_ols_results(model1, model2):
- tm.assert_isinstance(model1, type(model2))
+ tm.assertIsInstance(model1, type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
@@ -370,7 +370,7 @@ def test_longpanel_series_combo(self):
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assertTrue(notnull(model.beta.values).all())
- tm.assert_isinstance(model, PanelOLS)
+ tm.assertIsInstance(model, PanelOLS)
model.summary
def test_series_rhs(self):
@@ -394,7 +394,7 @@ def test_various_attributes(self):
for attr in series_attrs:
value = getattr(model, attr)
- tm.assert_isinstance(value, Series)
+ tm.assertIsInstance(value, Series)
# works
model._results
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c80cea3ab7a7d..8192f6e99116b 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -174,13 +174,13 @@ def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
- tm.assert_isinstance(result, np.ndarray)
+ tm.assertIsInstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
- tm.assert_isinstance(result, np.ndarray)
+ tm.assertIsInstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
@@ -211,7 +211,7 @@ def test_value_counts(self):
arr = np.random.randn(4)
factor = cut(arr, 4)
- tm.assert_isinstance(factor, Categorical)
+ tm.assertIsInstance(factor, Categorical)
result = algos.value_counts(factor)
expected = algos.value_counts(np.asarray(factor))
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index cd60bafdd30cf..e17910a2e14be 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -8,7 +8,7 @@
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, PandasDelegate
from pandas.tseries.base import DatetimeIndexOpsMixin
-from pandas.util.testing import assertRaisesRegexp, assert_isinstance
+from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
from pandas import Series, Index, Int64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta
import pandas.tslib as tslib
@@ -68,7 +68,7 @@ def test_slicing_maintains_type(self):
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
- assert_isinstance(result, klass)
+ assertIsInstance(result, klass)
self.assertEqual(result, expected)
@@ -109,12 +109,12 @@ def setUp(self):
def test_shallow_copying(self):
original = self.container.copy()
- assert_isinstance(self.container.view(), FrozenNDArray)
+ assertIsInstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
- assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
+ assertIsInstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 63b913f59f18a..e9e6ec965cbf7 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -2068,7 +2068,7 @@ def test_slicing_and_getting_ops(self):
# row
res_row = df.iloc[2,:]
tm.assert_series_equal(res_row, exp_row)
- tm.assert_isinstance(res_row["cats"], compat.string_types)
+ tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:,0]
@@ -2088,7 +2088,7 @@ def test_slicing_and_getting_ops(self):
# row
res_row = df.loc["j",:]
tm.assert_series_equal(res_row, exp_row)
- tm.assert_isinstance(res_row["cats"], compat.string_types)
+ tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:,"cats"]
@@ -2109,7 +2109,7 @@ def test_slicing_and_getting_ops(self):
# row
res_row = df.ix["j",:]
tm.assert_series_equal(res_row, exp_row)
- tm.assert_isinstance(res_row["cats"], compat.string_types)
+ tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:,"cats"]
@@ -2143,7 +2143,7 @@ def test_slicing_and_getting_ops(self):
# i : int, slice, or sequence of integers
res_row = df.irow(2)
tm.assert_series_equal(res_row, exp_row)
- tm.assert_isinstance(res_row["cats"], compat.string_types)
+ tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.irow(slice(2,4))
tm.assert_frame_equal(res_df, exp_df)
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index 242b54c84d0ee..13596bd35bb62 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -16,7 +16,7 @@ class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected, lengths):
self.assertNotIsInstance(iter_res, list)
- tm.assert_isinstance(list_res, list)
+ tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 1bb1cd5ec7a80..b38dba5008905 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -395,7 +395,7 @@ def test_to_string_unicode_columns(self):
buf.getvalue()
result = self.frame.to_string()
- tm.assert_isinstance(result, compat.text_type)
+ tm.assertIsInstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
@@ -1470,7 +1470,7 @@ def test_to_string(self):
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
- tm.assert_isinstance(s, compat.string_types)
+ tm.assertIsInstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
@@ -1719,7 +1719,7 @@ def test_to_html(self):
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
- tm.assert_isinstance(s, compat.string_types)
+ tm.assertIsInstance(s, compat.string_types)
biggie.to_html(columns=['B', 'A'], col_space=17)
biggie.to_html(columns=['B', 'A'],
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index cfa0ed1a11772..8c9233c1d687b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1841,13 +1841,13 @@ def test_irow_icol_duplicates(self):
result = df.irow(0)
result2 = df.ix[0]
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
result = df.T.icol(0)
result2 = df.T.ix[:, 0]
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
@@ -2439,7 +2439,7 @@ def test_set_index_cast_datetimeindex(self):
'B': np.random.randn(1000)})
idf = df.set_index('A')
- tm.assert_isinstance(idf.index, DatetimeIndex)
+ tm.assertIsInstance(idf.index, DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
@@ -2602,7 +2602,7 @@ def test_constructor_list_frames(self):
self.assertEqual(result.shape, (1,0))
result = DataFrame([DataFrame(dict(A = lrange(5)))])
- tm.assert_isinstance(result.iloc[0,0], DataFrame)
+ tm.assertIsInstance(result.iloc[0,0], DataFrame)
def test_constructor_mixed_dtypes(self):
@@ -2922,10 +2922,10 @@ def test_constructor_dict_cast(self):
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
- tm.assert_isinstance(df['Col1']['Row2'], float)
+ tm.assertIsInstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
- tm.assert_isinstance(dm[1][1], int)
+ tm.assertIsInstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
@@ -3575,7 +3575,7 @@ def test_constructor_from_items(self):
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
- tm.assert_isinstance(recons['foo'][0], tuple)
+ tm.assertIsInstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index', columns=['one', 'two', 'three'])
@@ -4368,7 +4368,7 @@ def test_astype_str(self):
def test_array_interface(self):
result = np.sqrt(self.frame)
- tm.assert_isinstance(result, type(self.frame))
+ tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
@@ -5134,7 +5134,7 @@ def test_itertuples(self):
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
- tm.assert_isinstance(tup[1], np.integer)
+ tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
@@ -7332,10 +7332,10 @@ def test_asfreq_datetimeindex(self):
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
- tm.assert_isinstance(df.index, DatetimeIndex)
+ tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
- tm.assert_isinstance(ts.index, DatetimeIndex)
+ tm.assertIsInstance(ts.index, DatetimeIndex)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
@@ -10554,10 +10554,10 @@ def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
- tm.assert_isinstance(res, Series)
+ tm.assertIsInstance(res, Series)
self.assertIs(res.index, agg_axis)
else:
- tm.assert_isinstance(res, DataFrame)
+ tm.assertIsInstance(res, DataFrame)
_checkit()
_checkit(axis=1)
@@ -10570,7 +10570,7 @@ def _checkit(axis=0, raw=False):
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
@@ -10729,7 +10729,7 @@ def test_apply_multi_index(self):
s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
s.columns = ['col1','col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
- tm.assert_isinstance(res.index, MultiIndex)
+ tm.assertIsInstance(res.index, MultiIndex)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
@@ -10738,7 +10738,7 @@ def test_applymap(self):
# GH #465, function returning tuples
result = self.frame.applymap(lambda x: (x, x))
- tm.assert_isinstance(result['A'][0], tuple)
+ tm.assertIsInstance(result['A'][0], tuple)
# GH 2909, object conversion to float in constructor?
df = DataFrame(data=[1,'a'])
@@ -11721,10 +11721,10 @@ def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
- tm.assert_isinstance(ct1, Series)
+ tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
- tm.assert_isinstance(ct2, Series)
+ tm.assertIsInstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
@@ -12083,8 +12083,8 @@ def test_mode(self):
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
- tm.assert_isinstance(axis0, Series)
- tm.assert_isinstance(axis1, Series)
+ tm.assertIsInstance(axis0, Series)
+ tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 576c5c6be890d..c6eb99985dc60 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -549,7 +549,7 @@ def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df1.groupby("val1", squeeze=True).apply(func)
- tm.assert_isinstance(result,Series)
+ tm.assertIsInstance(result,Series)
df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
{"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
@@ -557,12 +557,12 @@ def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df2.groupby("val1", squeeze=True).apply(func)
- tm.assert_isinstance(result,Series)
+ tm.assertIsInstance(result,Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
result = df.groupby('X',squeeze=False).count()
- tm.assert_isinstance(result,DataFrame)
+ tm.assertIsInstance(result,DataFrame)
# GH5592
# inconcistent return type
@@ -670,7 +670,7 @@ def test_agg_period_index(self):
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
- tm.assert_isinstance(rs.index, PeriodIndex)
+ tm.assertIsInstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
@@ -912,7 +912,7 @@ def test_aggregate_item_by_item(self):
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
self.assertEqual(len(result), 0)
def test_agg_item_by_item_raise_typeerror(self):
@@ -1642,22 +1642,22 @@ def test_as_index_series_return_frame(self):
result = grouped['C'].agg(np.sum)
expected = grouped.agg(np.sum).ix[:, ['A', 'C']]
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].agg(np.sum)
expected2 = grouped2.agg(np.sum).ix[:, ['A', 'B', 'C']]
- tm.assert_isinstance(result2, DataFrame)
+ tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
result = grouped['C'].sum()
expected = grouped.sum().ix[:, ['A', 'C']]
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].sum()
expected2 = grouped2.sum().ix[:, ['A', 'B', 'C']]
- tm.assert_isinstance(result2, DataFrame)
+ tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
# corner case
@@ -2023,7 +2023,7 @@ def test_wrap_aggregated_output_multindex(self):
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
- tm.assert_isinstance(agged.columns, MultiIndex)
+ tm.assertIsInstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ('foo', 'one'):
@@ -2181,7 +2181,7 @@ def f(piece):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
self.assertTrue(result.index.equals(ts.index))
def test_apply_series_yield_constant(self):
@@ -2779,11 +2779,11 @@ def convert_force_pure(x):
result = grouped.agg(convert_fast)
self.assertEqual(result.dtype, np.object_)
- tm.assert_isinstance(result[0], Decimal)
+ tm.assertIsInstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
self.assertEqual(result.dtype, np.object_)
- tm.assert_isinstance(result[0], Decimal)
+ tm.assertIsInstance(result[0], Decimal)
def test_fast_apply(self):
# make sure that fast apply is correctly called
@@ -3225,7 +3225,7 @@ def g(group):
result = self.df.groupby('A')['C'].apply(f)
expected = self.df.groupby('A')['C'].apply(g)
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
assert_series_equal(result, expected)
def test_getitem_list_of_columns(self):
@@ -3534,7 +3534,7 @@ def test_groupby_groups_datetimeindex(self):
# it works!
groups = grouped.groups
- tm.assert_isinstance(list(groups.keys())[0], datetime)
+ tm.assertIsInstance(list(groups.keys())[0], datetime)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 5cbe49a1decbf..d84c813b2b898 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -425,7 +425,7 @@ def create_index(self):
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
- tm.assert_isinstance(new_index, np.ndarray)
+ tm.assertIsInstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
super(TestIndex, self).test_copy_and_deepcopy()
@@ -447,7 +447,7 @@ def test_constructor(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
- tm.assert_isinstance(index, Index)
+ tm.assertIsInstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
@@ -507,7 +507,7 @@ def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
- tm.assert_isinstance(rs, PeriodIndex)
+ tm.assertIsInstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
@@ -664,7 +664,7 @@ def test_asof(self):
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
- tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
+ tm.assertIsInstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
@@ -2272,11 +2272,11 @@ def test_view(self):
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
- tm.assert_isinstance(arr, Int64Index)
+ tm.assertIsInstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
- tm.assert_isinstance(arr, Index)
+ tm.assertIsInstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
@@ -2374,7 +2374,7 @@ def test_join_outer(self):
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2387,7 +2387,7 @@ def test_join_outer(self):
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2410,7 +2410,7 @@ def test_join_inner(self):
elidx = np.array([1, 6])
eridx = np.array([4, 1])
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2423,7 +2423,7 @@ def test_join_inner(self):
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2439,7 +2439,7 @@ def test_join_left(self):
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2449,7 +2449,7 @@ def test_join_left(self):
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
- tm.assert_isinstance(res, Int64Index)
+ tm.assertIsInstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
@@ -2478,7 +2478,7 @@ def test_join_right(self):
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
- tm.assert_isinstance(other, Int64Index)
+ tm.assertIsInstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
@@ -2489,7 +2489,7 @@ def test_join_right(self):
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
- tm.assert_isinstance(other, Int64Index)
+ tm.assertIsInstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
@@ -3339,7 +3339,7 @@ def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
- tm.assert_isinstance(single_level, Index)
+ tm.assertIsInstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
@@ -4149,7 +4149,7 @@ def test_difference(self):
sortorder=0,
names=self.index.names)
- tm.assert_isinstance(result, MultiIndex)
+ tm.assertIsInstance(result, MultiIndex)
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
@@ -4459,7 +4459,7 @@ def _check_all(other):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
- tm.assert_isinstance(result, MultiIndex)
+ tm.assertIsInstance(result, MultiIndex)
assertRaisesRegexp(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
@@ -4473,11 +4473,11 @@ def test_join_self(self):
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
- tm.assert_isinstance(result, MultiIndex)
+ tm.assertIsInstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
- tm.assert_isinstance(result, MultiIndex)
+ tm.assertIsInstance(result, MultiIndex)
self.assertIsNone(indexer)
self.check_level_names(result, self.index.names)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index d88a02390fd0c..b666fba274b70 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -814,7 +814,7 @@ def test_chained_getitem_with_lists(self):
# Regression in chained getitem indexing with embedded list-like from 0.12
def check(result, expected):
self.assert_numpy_array_equal(result,expected)
- tm.assert_isinstance(result, np.ndarray)
+ tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5*[np.zeros(3)], 'B':5*[np.ones(3)]})
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 9460c6373d0d2..4198bf87a4bae 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -117,26 +117,26 @@ def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
- tm.assert_isinstance(multi.index, MultiIndex)
+ tm.assertIsInstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
- tm.assert_isinstance(multi.columns, MultiIndex)
+ tm.assertIsInstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
- tm.assert_isinstance(multi.index, MultiIndex)
+ tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
- tm.assert_isinstance(multi.index, MultiIndex)
+ tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
- tm.assert_isinstance(multi.index, MultiIndex)
+ tm.assertIsInstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
@@ -702,7 +702,7 @@ def test_setitem_change_dtype(self):
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
- # tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
+ # tm.assertIsInstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
@@ -798,12 +798,12 @@ def test_reset_index_with_drop(self):
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
- tm.assert_isinstance(deleveled, DataFrame)
+ tm.assertIsInstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
- tm.assert_isinstance(deleveled, Series)
+ tm.assertIsInstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
@@ -1325,7 +1325,7 @@ def test_reorder_levels(self):
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
- tm.assert_isinstance(df.columns, MultiIndex)
+ tm.assertIsInstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
@@ -1994,7 +1994,7 @@ def test_indexing_ambiguity_bug_1678(self):
result = frame.ix[:, 1]
exp = frame.icol(1)
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 529d3ed68e24d..bc0aaee1b10b6 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -821,7 +821,7 @@ def test_set_value(self):
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
- tm.assert_isinstance(res, Panel)
+ tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 7a72200077225..289f7f134aa27 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -584,7 +584,7 @@ def test_set_value(self):
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
- tm.assert_isinstance(res, Panel4D)
+ tm.assertIsInstance(res, Panel4D)
self.assertIsNot(res, self.panel4d)
self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 007d742895be2..1e6fa68f1c85b 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -781,7 +781,7 @@ def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
- tm.assert_isinstance(s, Series)
+ tm.assertIsInstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
@@ -1069,7 +1069,7 @@ def test_setindex(self):
# works
series = self.series.copy()
series.index = np.arange(len(series))
- tm.assert_isinstance(series.index, Index)
+ tm.assertIsInstance(series.index, Index)
def test_array_finalize(self):
pass
@@ -1326,7 +1326,7 @@ def test_getitem_setitem_integers(self):
def test_getitem_box_float64(self):
value = self.ts[5]
- tm.assert_isinstance(value, np.float64)
+ tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
@@ -3132,7 +3132,7 @@ def test_operators_timedelta64(self):
# scalar Timestamp on rhs
maxa = df['A'].max()
- tm.assert_isinstance(maxa, Timestamp)
+ tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
@@ -5102,7 +5102,7 @@ def test_clip(self):
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
- tm.assert_isinstance(expected, Series)
+ tm.assertIsInstance(expected, Series)
def test_clip_types_and_nulls(self):
@@ -5798,7 +5798,7 @@ def test_map_decimal(self):
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
- tm.assert_isinstance(result[0], Decimal)
+ tm.assertIsInstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
@@ -5980,7 +5980,7 @@ def test_apply_args(self):
result = s.apply(str.split, args=(',',))
self.assertEqual(result[0], ['foo', 'bar'])
- tm.assert_isinstance(result[0], list)
+ tm.assertIsInstance(result[0], list)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
@@ -7081,7 +7081,7 @@ def test_reset_index(self):
rs = s.reset_index(level=[0, 2], drop=True)
self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 90da68eed5cc4..facbd57512257 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -49,7 +49,7 @@ def test_iter(self):
for s in ds.str:
# iter must yield a Series
- tm.assert_isinstance(s, Series)
+ tm.assertIsInstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
@@ -135,7 +135,7 @@ def test_count(self):
tm.assert_almost_equal(result, exp)
result = Series(values).str.count('f[o]+')
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
# mixed
@@ -145,7 +145,7 @@ def test_count(self):
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.count('a')
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -156,7 +156,7 @@ def test_count(self):
tm.assert_almost_equal(result, exp)
result = Series(values).str.count('f[o]+')
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_contains(self):
@@ -195,7 +195,7 @@ def test_contains(self):
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.contains('o')
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -235,7 +235,7 @@ def test_startswith(self):
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -263,7 +263,7 @@ def test_endswith(self):
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -315,7 +315,7 @@ def test_lower_upper(self):
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = ['a', NA, 'b', NA, NA, 'foo', NA, NA, NA]
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -392,7 +392,7 @@ def test_replace(self):
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = ['a', NA, 'b', NA, NA, 'foo', NA, NA, NA]
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -429,7 +429,7 @@ def test_repeat(self):
rs = Series(mixed).str.repeat(3)
xp = ['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA]
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -462,7 +462,7 @@ def test_deprecated_match(self):
with tm.assert_produces_warning():
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = [('BAD_', 'BAD'), NA, ('BAD_', 'BAD'), NA, NA, [], NA, NA, NA]
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -495,7 +495,7 @@ def test_match(self):
with tm.assert_produces_warning():
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
xp = [True, NA, True, NA, NA, False, NA, NA, NA]
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -796,7 +796,7 @@ def test_join(self):
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -819,7 +819,7 @@ def test_len(self):
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -844,7 +844,7 @@ def test_findall(self):
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -982,7 +982,7 @@ def test_pad(self):
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(),
@@ -991,7 +991,7 @@ def test_pad(self):
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(),
@@ -1000,7 +1000,7 @@ def test_pad(self):
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -1097,19 +1097,19 @@ def test_center_ljust_rjust(self):
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA,
NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA,
NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA,
NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
@@ -1214,11 +1214,11 @@ def test_split(self):
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
NA, NA, NA])
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
@@ -1261,11 +1261,11 @@ def test_rsplit(self):
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
NA, NA, NA])
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
- tm.assert_isinstance(result, Series)
+ tm.assertIsInstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
@@ -1612,7 +1612,7 @@ def test_slice(self):
xp = Series(['foo', NA, 'bar', NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
@@ -1690,21 +1690,21 @@ def test_strip_lstrip_rstrip_mixed(self):
xp = Series(['aa', NA, 'bb', NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
@@ -1797,7 +1797,7 @@ def test_get(self):
xp = Series(['b', NA, 'd', NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
+ tm.assertIsInstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index cc0a0ea5662db..38296e3a5ff5a 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -7,15 +7,16 @@
import numpy as np
import sys
from pandas import Series, DataFrame
+import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal, assertRaisesRegexp, raise_with_traceback,
- assert_series_equal, assert_frame_equal,
+ assert_almost_equal, assertRaisesRegexp, raise_with_traceback,
+ assert_series_equal, assert_frame_equal, assert_isinstance,
RNGContext
)
# let's get meta.
-class TestAssertAlmostEqual(unittest.TestCase):
+class TestAssertAlmostEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_almost_equal_both(self, a, b, **kwargs):
@@ -112,7 +113,8 @@ def test_assert_almost_equal_inf(self):
self._assert_not_almost_equal_both(np.inf, 0)
-class TestUtilTesting(unittest.TestCase):
+
+class TestUtilTesting(tm.TestCase):
_multiprocess_can_split_ = True
def test_raise_with_traceback(self):
@@ -130,7 +132,8 @@ def test_raise_with_traceback(self):
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
-class TestAssertSeriesEqual(unittest.TestCase):
+
+class TestAssertSeriesEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_equal(self, x, y, **kwargs):
@@ -190,7 +193,7 @@ def test_multiindex_dtype(self):
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
-class TestAssertFrameEqual(unittest.TestCase):
+class TestAssertFrameEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_equal(self, x, y, **kwargs):
@@ -221,7 +224,7 @@ def test_empty_dtypes(self):
df2=pd.DataFrame(columns=["col1","col2"])
self._assert_equal(df1, df2, check_dtype=False)
self._assert_not_equal(df1, df2, check_dtype=True)
-
+
class TestRNGContext(unittest.TestCase):
@@ -233,3 +236,43 @@ def test_RNGContext(self):
with RNGContext(1):
self.assertEqual(np.random.randn(), expected1)
self.assertEqual(np.random.randn(), expected0)
+
+
+
+class TestDeprecatedTests(tm.TestCase):
+
+ def test_warning(self):
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertEquals(1, 1)
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertNotEquals(1, 2)
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.assert_(True)
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertAlmostEquals(1.0, 1.0000000001)
+
+ with tm.assert_produces_warning(FutureWarning):
+ self.assertNotAlmostEquals(1, 2)
+
+ with tm.assert_produces_warning(FutureWarning):
+ assert_isinstance(Series([1, 2]), Series, msg='xxx')
+
+
+class TestLocale(tm.TestCase):
+
+ def test_locale(self):
+ if sys.platform == 'win32':
+ raise nose.SkipTest("skipping on win platforms as locale not available")
+
+ #GH9744
+ locales = tm.get_locales()
+ self.assertTrue(len(locales) >= 1)
+
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
\ No newline at end of file
diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py
index 38f058358b37f..fb334cf9912f3 100644
--- a/pandas/tests/test_util.py
+++ b/pandas/tests/test_util.py
@@ -61,33 +61,6 @@ def f4(new=None):
pass
-class TestTesting(tm.TestCase):
-
- def test_warning(self):
-
- with tm.assert_produces_warning(FutureWarning):
- self.assertEquals(1, 1)
-
- with tm.assert_produces_warning(FutureWarning):
- self.assertNotEquals(1, 2)
-
- with tm.assert_produces_warning(FutureWarning):
- self.assert_(True)
-
- with tm.assert_produces_warning(FutureWarning):
- self.assertAlmostEquals(1.0, 1.0000000001)
-
- with tm.assert_produces_warning(FutureWarning):
- self.assertNotAlmostEquals(1, 2)
-
- def test_locale(self):
- if sys.platform == 'win32':
- raise nose.SkipTest("skipping on win platforms as locale not available")
-
- #GH9744
- locales = pandas.util.testing.get_locales()
- self.assertTrue(len(locales) >= 1)
-
def test_rands():
r = tm.rands(10)
assert(len(r) == 10)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 7b322b0d311de..d357182a60b1f 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -790,7 +790,7 @@ def _constructor(self):
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
- tm.assert_isinstance(result, NotADataFrame)
+ tm.assertIsInstance(result, NotADataFrame)
def test_append_dtype_coerce(self):
@@ -2535,7 +2535,7 @@ def _constructor(self):
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
- tm.assert_isinstance(result, NotADataFrame)
+ tm.assertIsInstance(result, NotADataFrame)
if __name__ == '__main__':
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index bb95234657ec2..34789a3c52cb7 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -268,7 +268,7 @@ def _check_output(res, col, index=['A', 'B'], columns=['C']):
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
- tm.assert_isinstance(rtable, Series)
+ tm.assertIsInstance(rtable, Series)
for item in ['DD', 'EE', 'FF']:
gmarg = table[item]['All', '']
self.assertEqual(gmarg, self.data[item].mean())
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index dc0bc14ce1ea6..1b38f51ed4f71 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -4,7 +4,7 @@
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
-from pandas.util.testing import assertRaisesRegexp, assert_isinstance
+from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 69b1d84670d45..d5a63adba00d4 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -138,7 +138,7 @@ def test_getitem(self):
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
self.assertEqual(len(fancy_indexed), 5)
- tm.assert_isinstance(fancy_indexed, DatetimeIndex)
+ tm.assertIsInstance(fancy_indexed, DatetimeIndex)
self.assertIsNone(fancy_indexed.freq)
# 32-bit vs. 64-bit platforms
@@ -176,21 +176,21 @@ def test_union(self):
right = self.rng[5:10]
the_union = left.union(right)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_union = left.union(right)
- tm.assert_isinstance(the_union, Index)
+ tm.assertIsInstance(the_union, Index)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_union = left.union(right)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
# order does not matter
self.assert_numpy_array_equal(right.union(left), the_union)
@@ -199,7 +199,7 @@ def test_union(self):
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_union = self.rng.union(rng)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
def test_outer_join(self):
# should just behave as union
@@ -209,14 +209,14 @@ def test_outer_join(self):
right = self.rng[5:10]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
# non-overlapping, no gap
@@ -224,13 +224,13 @@ def test_outer_join(self):
right = self.rng[5:10]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
# overlapping, but different offset
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_join = self.rng.join(rng, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
def test_union_not_cacheable(self):
@@ -253,7 +253,7 @@ def test_intersection(self):
the_int = rng1.intersection(rng2)
expected = rng[10:25]
self.assertTrue(the_int.equals(expected))
- tm.assert_isinstance(the_int, DatetimeIndex)
+ tm.assertIsInstance(the_int, DatetimeIndex)
self.assertEqual(the_int.offset, rng.offset)
the_int = rng1.intersection(rng2.view(DatetimeIndex))
@@ -333,7 +333,7 @@ def test_daterange_bug_456(self):
rng2.offset = datetools.BDay()
result = rng1.union(rng2)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
def test_error_with_zero_monthends(self):
self.assertRaises(ValueError, date_range, '1/1/2000', '1/1/2001',
@@ -535,7 +535,7 @@ def test_getitem(self):
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
self.assertEqual(len(fancy_indexed), 5)
- tm.assert_isinstance(fancy_indexed, DatetimeIndex)
+ tm.assertIsInstance(fancy_indexed, DatetimeIndex)
self.assertIsNone(fancy_indexed.freq)
# 32-bit vs. 64-bit platforms
@@ -573,21 +573,21 @@ def test_union(self):
right = self.rng[5:10]
the_union = left.union(right)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_union = left.union(right)
- tm.assert_isinstance(the_union, Index)
+ tm.assertIsInstance(the_union, Index)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_union = left.union(right)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
# order does not matter
self.assert_numpy_array_equal(right.union(left), the_union)
@@ -596,7 +596,7 @@ def test_union(self):
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_union = self.rng.union(rng)
- tm.assert_isinstance(the_union, DatetimeIndex)
+ tm.assertIsInstance(the_union, DatetimeIndex)
def test_outer_join(self):
# should just behave as union
@@ -606,14 +606,14 @@ def test_outer_join(self):
right = self.rng[5:10]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
# non-overlapping, no gap
@@ -621,13 +621,13 @@ def test_outer_join(self):
right = self.rng[5:10]
the_join = left.join(right, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
# overlapping, but different offset
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_join = self.rng.join(rng, how='outer')
- tm.assert_isinstance(the_join, DatetimeIndex)
+ tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
def test_intersection_bug(self):
@@ -682,7 +682,7 @@ def test_daterange_bug_456(self):
rng2.offset = datetools.CDay()
result = rng1.union(rng2)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
def test_cdaterange(self):
rng = cdate_range('2013-05-01', periods=3)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 275fcd4d987ed..680456df104e4 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -79,7 +79,7 @@ def test_normalize_date():
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
- tm.assert_isinstance(valu, np.datetime64)
+ tm.assertIsInstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 0218af63ca7d6..a597087316f77 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -282,7 +282,7 @@ def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
- tm.assert_isinstance(res, compat.text_type) # GH3363
+ tm.assertIsInstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
@@ -1192,7 +1192,7 @@ def test_hash_error(self):
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
- tm.assert_isinstance(series, TimeSeries)
+ tm.assertIsInstance(series, TimeSeries)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
@@ -1350,7 +1350,7 @@ def test_getitem_ndim2(self):
result = idx[:, None]
# MPL kludge
- tm.assert_isinstance(result, PeriodIndex)
+ tm.assertIsInstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
@@ -1442,7 +1442,7 @@ def test_periods_number_check(self):
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
- [tm.assert_isinstance(x, Period) for x in rs]
+ [tm.assertIsInstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
@@ -1562,7 +1562,7 @@ def test_frame_setitem(self):
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
- tm.assert_isinstance(rs.index, PeriodIndex)
+ tm.assertIsInstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
@@ -2212,7 +2212,7 @@ def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
- tm.assert_isinstance(result[0], Period)
+ tm.assertIsInstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
@@ -2226,7 +2226,7 @@ def test_take(self):
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
- tm.assert_isinstance(taken, PeriodIndex)
+ tm.assertIsInstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
@@ -2236,7 +2236,7 @@ def test_joins(self):
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
- tm.assert_isinstance(joined, PeriodIndex)
+ tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
@@ -2409,7 +2409,7 @@ def test_map_with_string_constructor(self):
res = index.map(t)
# should return an array
- tm.assert_isinstance(res, np.ndarray)
+ tm.assertIsInstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
@@ -2425,7 +2425,7 @@ def test_convert_array_of_periods(self):
periods = list(rng)
result = pd.Index(periods)
- tm.assert_isinstance(result, PeriodIndex)
+ tm.assertIsInstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
@@ -2434,9 +2434,9 @@ def test_with_multi_index(self):
s = Series([0, 1, 2, 3], index_as_arrays)
- tm.assert_isinstance(s.index.levels[0], PeriodIndex)
+ tm.assertIsInstance(s.index.levels[0], PeriodIndex)
- tm.assert_isinstance(s.index.values[0][0], Period)
+ tm.assertIsInstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
@@ -2467,7 +2467,7 @@ def test_append_concat(self):
# drops index
result = pd.concat([s1, s2])
- tm.assert_isinstance(result.index, PeriodIndex)
+ tm.assertIsInstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index c5ed8a1ac3e31..2ba65c07aa114 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -472,7 +472,7 @@ def test_gaps(self):
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
- tm.assert_isinstance(data, np.ma.core.MaskedArray)
+ tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
@@ -486,7 +486,7 @@ def test_gaps(self):
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
- tm.assert_isinstance(data, np.ma.core.MaskedArray)
+ tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
@@ -500,7 +500,7 @@ def test_gaps(self):
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
- tm.assert_isinstance(data, np.ma.core.MaskedArray)
+ tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@@ -518,7 +518,7 @@ def test_gap_upsample(self):
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
- tm.assert_isinstance(data, np.ma.core.MaskedArray)
+ tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 3927caef58d2b..95e41e43efd52 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -463,7 +463,7 @@ def test_resample_reresample(self):
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEqual(len(result), 22)
- tm.assert_isinstance(result.index.freq, offsets.DateOffset)
+ tm.assertIsInstance(result.index.freq, offsets.DateOffset)
self.assertEqual(result.index.freq, offsets.Hour(8))
def test_resample_timestamp_to_period(self):
@@ -789,7 +789,7 @@ def test_upsample_apply_functions(self):
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
- tm.assert_isinstance(result, DataFrame)
+ tm.assertIsInstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 7105141da365f..dd820394d40a0 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -877,7 +877,7 @@ def test_append_join_nondatetimeindex(self):
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
- tm.assert_isinstance(result[0], Timedelta)
+ tm.assertIsInstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
@@ -1103,7 +1103,7 @@ def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
- tm.assert_isinstance(list(result.values())[0][0], Timedelta)
+ tm.assertIsInstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d','1d','2d'])
self.assertTrue(idx.equals(list(idx)))
@@ -1309,7 +1309,7 @@ def test_take(self):
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
- tm.assert_isinstance(taken, TimedeltaIndex)
+ tm.assertIsInstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 9ad814410741c..a078aba2269bb 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -68,8 +68,8 @@ def setUp(self):
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
- tm.assert_isinstance(self.dups, TimeSeries)
- tm.assert_isinstance(self.dups.index, DatetimeIndex)
+ tm.assertIsInstance(self.dups, TimeSeries)
+ tm.assertIsInstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
@@ -369,13 +369,13 @@ def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
- tm.assert_isinstance(s[5], Timestamp)
+ tm.assertIsInstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
- tm.assert_isinstance(s[5], Timestamp)
+ tm.assertIsInstance(s[5], Timestamp)
- tm.assert_isinstance(s.iget_value(5), Timestamp)
+ tm.assertIsInstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
@@ -433,9 +433,9 @@ def test_index_convert_to_datetime_array(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
- tm.assert_isinstance(converted, np.ndarray)
+ tm.assertIsInstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
- tm.assert_isinstance(x, datetime)
+ tm.assertIsInstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
@@ -453,9 +453,9 @@ def test_index_convert_to_datetime_array_explicit_pytz(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
- tm.assert_isinstance(converted, np.ndarray)
+ tm.assertIsInstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
- tm.assert_isinstance(x, datetime)
+ tm.assertIsInstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
@@ -473,9 +473,9 @@ def test_index_convert_to_datetime_array_dateutil(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
- tm.assert_isinstance(converted, np.ndarray)
+ tm.assertIsInstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
- tm.assert_isinstance(x, datetime)
+ tm.assertIsInstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
@@ -856,7 +856,7 @@ def test_string_na_nat_conversion(self):
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
- tm.assert_isinstance(result2, DatetimeIndex)
+ tm.assertIsInstance(result2, DatetimeIndex)
self.assert_numpy_array_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
@@ -2092,7 +2092,7 @@ def test_append_join_nondatetimeindex(self):
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
- tm.assert_isinstance(result[0], Timestamp)
+ tm.assertIsInstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
@@ -2357,7 +2357,7 @@ def test_iteration_preserves_tz(self):
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
- tm.assert_isinstance(list(result.values())[0][0], Timestamp)
+ tm.assertIsInstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
@@ -2643,7 +2643,7 @@ def test_take(self):
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
- tm.assert_isinstance(taken, DatetimeIndex)
+ tm.assertIsInstance(taken, DatetimeIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.tz, expected.tz)
self.assertEqual(taken.name, expected.name)
@@ -2662,7 +2662,7 @@ def test_groupby_function_tuple_1677(self):
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
- tm.assert_isinstance(result.index[0], tuple)
+ tm.assertIsInstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
@@ -3137,11 +3137,11 @@ def test_datetimeindex_union_join_empty(self):
empty = Index([])
result = dti.union(empty)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
@@ -4003,8 +4003,8 @@ def test_min_max(self):
the_min = rng2.min()
the_max = rng2.max()
- tm.assert_isinstance(the_min, Timestamp)
- tm.assert_isinstance(the_max, Timestamp)
+ tm.assertIsInstance(the_min, Timestamp)
+ tm.assertIsInstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py
index 1f811af0e24ba..6889f8e2afbb2 100644
--- a/pandas/tseries/tests/test_timeseries_legacy.py
+++ b/pandas/tseries/tests/test_timeseries_legacy.py
@@ -90,7 +90,7 @@ def test_unpickle_legacy_len0_daterange(self):
ex_index = DatetimeIndex([], freq='B')
self.assertTrue(result.index.equals(ex_index))
- tm.assert_isinstance(result.index.freq, offsets.BDay)
+ tm.assertIsInstance(result.index.freq, offsets.BDay)
self.assertEqual(len(result), 0)
def test_arithmetic_interaction(self):
@@ -102,12 +102,12 @@ def test_arithmetic_interaction(self):
result = dseries + oseries
expected = dseries * 2
- tm.assert_isinstance(result.index, DatetimeIndex)
+ tm.assertIsInstance(result.index, DatetimeIndex)
assert_series_equal(result, expected)
result = dseries + oseries[:5]
expected = dseries + dseries[:5]
- tm.assert_isinstance(result.index, DatetimeIndex)
+ tm.assertIsInstance(result.index, DatetimeIndex)
assert_series_equal(result, expected)
def test_join_interaction(self):
@@ -119,7 +119,7 @@ def _check_join(left, right, how='inner'):
ea, eb, ec = left.join(DatetimeIndex(right), how=how,
return_indexers=True)
- tm.assert_isinstance(ra, DatetimeIndex)
+ tm.assertIsInstance(ra, DatetimeIndex)
self.assertTrue(ra.equals(ea))
assert_almost_equal(rb, eb)
@@ -143,8 +143,8 @@ def test_unpickle_daterange(self):
filepath = os.path.join(pth, 'data', 'daterange_073.pickle')
rng = read_pickle(filepath)
- tm.assert_isinstance(rng[0], datetime)
- tm.assert_isinstance(rng.offset, offsets.BDay)
+ tm.assertIsInstance(rng[0], datetime)
+ tm.assertIsInstance(rng.offset, offsets.BDay)
self.assertEqual(rng.values.dtype, object)
def test_setops(self):
@@ -153,17 +153,17 @@ def test_setops(self):
result = index[:5].union(obj_index[5:])
expected = index
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
result = index[:10].intersection(obj_index[5:])
expected = index[5:10]
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
result = index[:10] - obj_index[5:]
expected = index[:5]
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertTrue(result.equals(expected))
def test_index_conversion(self):
@@ -179,7 +179,7 @@ def test_tolist(self):
rng = date_range('1/1/2000', periods=10)
result = rng.tolist()
- tm.assert_isinstance(result[0], Timestamp)
+ tm.assertIsInstance(result[0], Timestamp)
def test_object_convert_fail(self):
idx = DatetimeIndex([NaT])
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index c7909acca96bb..c5107046a3f1c 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -238,7 +238,7 @@ def test_astimezone(self):
expected = utc.tz_convert(self.tzstr('US/Eastern'))
result = utc.astimezone(self.tzstr('US/Eastern'))
self.assertEqual(expected, result)
- tm.assert_isinstance(result, Timestamp)
+ tm.assertIsInstance(result, Timestamp)
def test_create_with_tz(self):
stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
@@ -1045,11 +1045,11 @@ def test_join_utc_convert(self):
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertEqual(result.tz, left.tz)
result = left.join(right[:-5], how=how)
- tm.assert_isinstance(result, DatetimeIndex)
+ tm.assertIsInstance(result, DatetimeIndex)
self.assertEqual(result.tz.zone, 'UTC')
def test_join_aware(self):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 04e868a4a0819..a88e3ff282e91 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -511,13 +511,6 @@ def equalContents(arr1, arr2):
return frozenset(arr1) == frozenset(arr2)
-def assert_isinstance(obj, class_type_or_tuple, msg=''):
- """asserts that obj is an instance of class_type_or_tuple"""
- assert isinstance(obj, class_type_or_tuple), (
- "%sExpected object to be of type %r, found %r instead" % (
- msg, class_type_or_tuple, type(obj)))
-
-
def assert_equal(a, b, msg=""):
"""asserts that a equals b, like nose's assert_equal, but allows custom message to start.
Passes a and b to format string as well. So you can use '{0}' and '{1}' to display a and b.
@@ -534,8 +527,8 @@ def assert_equal(a, b, msg=""):
def assert_index_equal(left, right, exact=False, check_names=True):
- assert_isinstance(left, Index, '[index] ')
- assert_isinstance(right, Index, '[index] ')
+ assertIsInstance(left, Index, '[index] ')
+ assertIsInstance(right, Index, '[index] ')
if not left.equals(right) or (exact and type(left) != type(right)):
raise AssertionError("[index] left [{0} {1}], right [{2} {3}]".format(left.dtype,
left,
@@ -601,6 +594,8 @@ def assertIsInstance(obj, cls, msg=''):
"%sExpected object to be of type %r, found %r instead" % (
msg, cls, type(obj)))
+def assert_isinstance(obj, class_type_or_tuple, msg=''):
+ return deprecate('assert_isinstance', assertIsInstance)(obj, class_type_or_tuple, msg=msg)
def assertNotIsInstance(obj, cls, msg=''):
"""Test that obj is not an instance of cls
@@ -670,7 +665,7 @@ def assert_series_equal(left, right, check_dtype=True,
check_exact=False,
check_names=True):
if check_series_type:
- assert_isinstance(left, type(right))
+ assertIsInstance(left, type(right))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
@@ -688,7 +683,7 @@ def assert_series_equal(left, right, check_dtype=True,
for level in range(left.index.nlevels):
lindex = left.index.get_level_values(level)
rindex = right.index.get_level_values(level)
- assert_isinstance(lindex, type(rindex))
+ assertIsInstance(lindex, type(rindex))
assert_attr_equal('dtype', lindex, rindex)
assert_attr_equal('inferred_type', lindex, rindex)
if check_names:
@@ -711,9 +706,9 @@ def assert_frame_equal(left, right, check_dtype=True,
by_blocks=False,
check_exact=False):
if check_frame_type:
- assert_isinstance(left, type(right))
- assert_isinstance(left, DataFrame)
- assert_isinstance(right, DataFrame)
+ assertIsInstance(left, type(right))
+ assertIsInstance(left, DataFrame)
+ assertIsInstance(right, DataFrame)
if check_less_precise:
if not by_blocks:
@@ -749,11 +744,11 @@ def assert_frame_equal(left, right, check_dtype=True,
for level in range(left.index.nlevels):
lindex = left.index.get_level_values(level)
rindex = right.index.get_level_values(level)
- assert_isinstance(lindex, type(rindex))
+ assertIsInstance(lindex, type(rindex))
assert_attr_equal('dtype', lindex, rindex)
assert_attr_equal('inferred_type', lindex, rindex)
if check_column_type:
- assert_isinstance(left.columns, type(right.columns))
+ assertIsInstance(left.columns, type(right.columns))
assert_attr_equal('dtype', left.columns, right.columns)
assert_attr_equal('inferred_type', left.columns, right.columns)
if check_names:
@@ -767,7 +762,7 @@ def assert_panelnd_equal(left, right,
assert_func=assert_frame_equal,
check_names=False):
if check_panel_type:
- assert_isinstance(left, type(right))
+ assertIsInstance(left, type(right))
for axis in ['items', 'major_axis', 'minor_axis']:
left_ind = getattr(left, axis)
| `testing.py` has `assert_isinstance` and `assertIsInstance` duplicately, and each test case uses either of them by choice.
Changed to use `assertIsInstance` in all cases, and make `assert_isinstance` show warning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10458 | 2015-06-28T00:04:07Z | 2015-06-30T10:46:13Z | 2015-06-30T10:46:13Z | 2015-06-30T13:03:04Z |
DOC: Add warning for newbs not to edit auto-generated file | diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 5d4b18b36050f..0a488a778cc6b 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -1,12 +1,23 @@
+"""This file generates `generated.pyx` which is then included in `../algos.pyx`
+during building. To regenerate `generated.pyx`, just run:
+
+ `python generate_code.py`.
+
+"""
+
from __future__ import print_function
-# we only need to be able to run this file on 2.7
-# don't introduce a pandas/pandas.compat import
-# or we get a bootstrapping problem
-from StringIO import StringIO
+import os
+from pandas.compat import StringIO
import numpy as np
_int64_max = np.iinfo(np.int64).max
+warning_to_new_contributors = """
+# DO NOT EDIT THIS FILE: This file was autogenerated from generate_code.py, so
+# please edit that file and then run `python2 generate_code.py` to re-generate
+# this file.
+"""
+
header = """
cimport numpy as np
cimport cython
@@ -2526,8 +2537,14 @@ def generate_from_template(template, exclude=None):
take_2d_multi_template]
-def generate_take_cython_file(path='generated.pyx'):
+def generate_take_cython_file():
+ # Put `generated.pyx` in the same directory as this file
+ directory = os.path.dirname(os.path.realpath(__file__))
+ filename = 'generated.pyx'
+ path = os.path.join(directory, filename)
+
with open(path, 'w') as f:
+ print(warning_to_new_contributors, file=f)
print(header, file=f)
print(generate_ensure_dtypes(), file=f)
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index 83dfacba45211..b67d99146e664 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -1,4 +1,9 @@
+# DO NOT EDIT THIS FILE: This file was autogenerated from generate_code.py, so
+# please edit that file and then run `python2 generate_code.py` to re-generate
+# this file.
+
+
cimport numpy as np
cimport cython
| @jreback, here is the proposed changed we talked about in [this PR](https://github.com/pydata/pandas/pull/10337#issuecomment-111822584) for other fellow newbs that make the same mistake I did.
You said "and more importantly prob a note in `internals.rst` about how/what to change", but I didn't understand that. `internals.rst` is not auto-generated, right? So, why would I add a note there? Perhaps you wanted a note to appear in `internals.html`? That seems unnecessary since if the user has that file on their machine, they must have intentionally generated it...
| https://api.github.com/repos/pandas-dev/pandas/pulls/10456 | 2015-06-27T22:17:06Z | 2015-07-07T16:36:14Z | null | 2015-07-07T17:40:57Z |
Add odo to ecosystem docs | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index c70b6deade36e..26ff9ec536c45 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -132,19 +132,19 @@ Pandas DataFrames with timeseries indexes.
`pydatastream <https://github.com/vfilimonov/pydatastream>`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-PyDatastream is a Python interface to the
+PyDatastream is a Python interface to the
`Thomson Dataworks Enterprise (DWE/Datastream) <http://dataworks.thomson.com/Dataworks/Enterprise/1.0/>`__
-SOAP API to return indexed Pandas DataFrames or Panels with financial data.
+SOAP API to return indexed Pandas DataFrames or Panels with financial data.
This package requires valid credentials for this API (non free).
`pandaSDMX <http://pandasdmx.readthedocs.org>`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-pandaSDMX is an extensible library to retrieve and acquire statistical data
-and metadata disseminated in
-`SDMX <http://www.sdmx.org>`_ 2.1. This standard is currently supported by
+pandaSDMX is an extensible library to retrieve and acquire statistical data
+and metadata disseminated in
+`SDMX <http://www.sdmx.org>`_ 2.1. This standard is currently supported by
the European statistics office (Eurostat)
-and the European Central Bank (ECB). Datasets may be returned as pandas Series
-or multi-indexed DataFrames.
+and the European Central Bank (ECB). Datasets may be returned as pandas Series
+or multi-indexed DataFrames.
`fredapi <https://github.com/mortada/fredapi>`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -183,8 +183,16 @@ Out-of-core
-------------
`Blaze <http://blaze.pydata.org/>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Blaze provides a standard API for doing computations with various
in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, PyTables,
PySpark.
+
+`Odo <http://odo.pydata.org>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Odo provides a uniform API for moving data between different formats. It uses
+pandas own ``read_csv`` for CSV IO and leverages many existing packages such as
+PyTables, h5py, and pymongo to move data between non pandas formats. Its graph
+based approach is also extensible by end users for custom formats that may be
+too specific for the core of odo.
| closes ContinuumIO/odo#244
| https://api.github.com/repos/pandas-dev/pandas/pulls/10455 | 2015-06-27T15:53:09Z | 2015-06-27T19:04:42Z | 2015-06-27T19:04:41Z | 2015-06-27T19:04:43Z |
Attempt to fix issue #10366 encoding and categoricals hdf serialization. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6b4bde588469e..f128cec97c1fe 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -765,3 +765,4 @@ Bug Fixes
- Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`)
- Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`)
- Bug in ``TimedeltaIndex`` formatter causing error while trying to save ``DataFrame`` with ``TimedeltaIndex`` using ``to_csv`` (:issue:`10833`)
+- Bug in ``Categorical`` hdf serialiation in presence of alternate encodings. (:issue:`10366`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 8ef6363f836ae..db5856ac3fa60 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3049,7 +3049,8 @@ def write_metadata(self, key, values):
"""
values = Series(values)
- self.parent.put(self._get_metadata_path(key), values, format='table')
+ self.parent.put(self._get_metadata_path(key), values, format='table',
+ encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
@@ -4428,6 +4429,9 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None):
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
+ # fix? issue #10366
+ data = _convert_string_array(data, _ensure_encoding(encoding),
+ itemsize=itemsize)
data = data.astype(dtype, copy=False).astype(object, copy=False)
except (Exception) as e:
f = np.vectorize(lambda x: x.decode(encoding), otypes=[np.object])
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 210852d83094f..022546192f297 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -930,6 +930,51 @@ def test_encoding(self):
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
+ def test_latin_encoding(self):
+
+ if compat.PY2:
+ self.assertRaisesRegexp(TypeError, '\[unicode\] is not implemented as a table column')
+ return
+
+ values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
+ [b'E\xc9, 17', b'a', b'b', b'c'],
+ [b'EE, 17', b'', b'a', b'b', b'c'],
+ [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
+ [b'', b'a', b'b', b'c'],
+ [b'\xf8\xfc', b'a', b'b', b'c'],
+ [b'A\xf8\xfc', b'', b'a', b'b', b'c'],
+ [np.nan, b'', b'b', b'c'],
+ [b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
+
+ def _try_decode(x, encoding='latin-1'):
+ try:
+ return x.decode(encoding)
+ except AttributeError:
+ return x
+ # not sure how to remove latin-1 from code in python 2 and 3
+ values = [[_try_decode(x) for x in y] for y in values]
+
+ examples = []
+ for dtype in ['category', object]:
+ for val in values:
+ examples.append(pandas.Series(val, dtype=dtype))
+
+ def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
+ with ensure_clean_path(self.path) as store:
+ s.to_hdf(store, key, format='table', encoding=encoding,
+ nan_rep=nan_rep)
+ retr = read_hdf(store, key)
+ s_nan = s.replace(nan_rep, np.nan)
+ assert_series_equal(s_nan, retr)
+
+ for s in examples:
+ roundtrip(s)
+
+ # fails:
+ # for x in examples:
+ # roundtrip(s, nan_rep=b'\xf8\xfc')
+
+
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
| closes #10366 .
Probably not quite the right approach but want to run travis. Tests are a bit weak at this point (just testing for non-exceptions). Encoding issues might be relevant to other types besides categoricals (but categoricals raise exceptions when strings to mangled to non-uniqueness).
| https://api.github.com/repos/pandas-dev/pandas/pulls/10454 | 2015-06-27T14:47:44Z | 2015-08-22T20:21:50Z | null | 2015-08-22T20:21:50Z |
DOC: fixed docstrings for StringMethods ljust and rjust | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index db14e2b487415..e73fa207152c1 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1270,11 +1270,11 @@ def pad(self, width, side='left', fillchar=' '):
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
- @Appender(_shared_docs['str_pad'] % dict(side='right', method='right'))
+ @Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
- @Appender(_shared_docs['str_pad'] % dict(side='left', method='left'))
+ @Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
| Very minor fix but `str.ljust()` and `str.rjust()` have broken links in the docstrings.
You can see in http://pandas.pydata.org/pandas-docs/version/0.16.2/generated/pandas.Series.str.ljust.html#pandas.Series.str.ljust that it should be linking to `str.ljust()` instead of `str.right()` (which doesn't exist)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10453 | 2015-06-27T14:36:26Z | 2015-06-27T14:39:07Z | 2015-06-27T14:39:07Z | 2015-06-27T14:40:08Z |
DOC: GH10414 Missing example in NA values in GroupBy | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 8ea28c6b686f5..5a350b4d9a1e7 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -174,7 +174,12 @@ NA values in GroupBy
~~~~~~~~~~~~~~~~~~~~
NA groups in GroupBy are automatically excluded. This behavior is consistent
-with R, for example.
+with R, for example:
+
+.. ipython:: python
+
+ df
+ df.groupby('one').mean()
See the groupby section :ref:`here <groupby.missing>` for more information.
| closes #10414
| https://api.github.com/repos/pandas-dev/pandas/pulls/10450 | 2015-06-26T20:04:35Z | 2015-07-01T10:58:05Z | 2015-07-01T10:58:05Z | 2015-07-01T10:58:05Z |
ENH: Simplify using read_hdf for HDF files with one dataset | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 939a5b9dd1d42..28ec828b81c34 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -32,6 +32,7 @@ New features
Other enhancements
^^^^^^^^^^^^^^^^^^
+- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`)
- ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 92208c37f787b..eb800c37db98f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -271,7 +271,7 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
f(path_or_buf)
-def read_hdf(path_or_buf, key, **kwargs):
+def read_hdf(path_or_buf, key=None, **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
@@ -280,7 +280,8 @@ def read_hdf(path_or_buf, key, **kwargs):
Parameters
----------
path_or_buf : path (string), or buffer to read from
- key : group identifier in the store
+ key : group identifier in the store. Can be omitted a HDF file contains
+ a single pandas object.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
@@ -329,6 +330,12 @@ def read_hdf(path_or_buf, key, **kwargs):
'implemented.')
try:
+ if key is None:
+ keys = store.keys()
+ if len(keys) != 1:
+ raise ValueError('key must be provided when HDF file contains '
+ 'multiple datasets.')
+ key = keys[0]
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index ace3e4c5e18dd..4ae2c331f5a65 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -4731,6 +4731,17 @@ def test_invalid_complib(self):
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', complib='blosc:zlib')
+ # GH10443
+ def test_read_nokey(self):
+ df = DataFrame(np.random.rand(4, 5),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', mode='a')
+ reread = read_hdf(path)
+ assert_frame_equal(df, reread)
+ df.to_hdf(path, 'df2', mode='a')
+ self.assertRaises(ValueError, read_hdf, path)
def _test_sort(obj):
if isinstance(obj, DataFrame):
| Allow read_hdf to be used without a key when a single pandas object
is stored in a HDF file. Raises if multiple pandas objects found.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10443 | 2015-06-25T19:53:00Z | 2015-07-13T10:56:10Z | 2015-07-13T10:56:10Z | 2015-07-13T13:22:12Z |
BUG: Bug fix implement __reduce__/__setstate__ for Period pickle support | diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py
index e7cc89fcc0b61..686efde8402d1 100644
--- a/pandas/io/tests/generate_legacy_storage_files.py
+++ b/pandas/io/tests/generate_legacy_storage_files.py
@@ -78,7 +78,8 @@ def create_data():
index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
names=['one', 'two'])),
dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
- cat=Series(Categorical(['foo', 'bar', 'baz'])))
+ cat=Series(Categorical(['foo', 'bar', 'baz'])),
+ per=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index b4a4930e09d68..619d1a87a71e0 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -969,6 +969,14 @@ cdef class Period(object):
value = ("%s" % formatted)
return value
+ def __setstate__(self, state):
+ self.freq=state[1]
+ self.ordinal=state[2]
+
+ def __reduce__(self):
+ object_state = None, self.freq, self.ordinal
+ return (Period, object_state)
+
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index b9757c9e1b5d7..b78b5d5ad71d7 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -6,6 +6,9 @@
"""
+import pickle
+import os
+
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
@@ -2536,6 +2539,14 @@ def test_searchsorted(self):
ValueError, 'Different period frequency: H',
lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H')))
+ def test_round_trip(self):
+
+ import pickle
+ p = Period('2000Q1')
+
+ new_p = self.round_trip_pickle(p)
+ self.assertEqual(new_p, p)
+
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
| Fix Period pickle issue https://github.com/pydata/pandas/issues/10439
| https://api.github.com/repos/pandas-dev/pandas/pulls/10441 | 2015-06-25T17:56:15Z | 2015-08-20T16:24:10Z | null | 2015-08-20T16:27:25Z |
BUG GH10425 test_categorical big-endian fix | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 191e777903368..704e13ba56b8c 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -78,3 +78,4 @@ Bug Fixes
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
+- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 3a2f388bdd65b..beff41fd9d109 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -2883,7 +2883,7 @@ def test_to_records(self):
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
- dtype=[('index', '<i8'), ('0', 'O')])
+ dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
| BUG: Changing test_categorical to compare with native byte ordering instead of comparing with little-endian one
closes #10425
| https://api.github.com/repos/pandas-dev/pandas/pulls/10438 | 2015-06-25T09:07:31Z | 2015-06-26T23:11:50Z | null | 2015-06-26T23:11:50Z |
BUG: GH10365 in interpolate_1d when method is piecewise_polynomial | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 191e777903368..e58a6363bf38d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -78,3 +78,4 @@ Bug Fixes
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
+- Bug in ``interpolate_1d`` with ``method='piecewise_polynomial`` (:issue:`10365`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 35b79cecfa996..102decc9d042b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1682,8 +1682,6 @@ def _interp_limit(invalid, limit):
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
- xvalues = xvalues[firstIndex:]
-
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
bounds_error=bounds_error, order=order)
@@ -1745,6 +1743,8 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
+ if method == "piecewise_polynomial":
+ y = y.reshape((-1, 1))
method = alt_methods[method]
new_y = method(x, y, new_x)
return new_y
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 9a8ec00188d9c..0a9f8dab77372 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -872,6 +872,47 @@ def test_interp_limit_no_nans(self):
expected = s
assert_series_equal(result, expected)
+ def test_interp_piecewise_polynomial(self):
+ # GH 10365
+ tm._skip_if_no_scipy()
+ s1 = Series([1, 2, 3, 4, nan, 6, nan])
+ s2 = Series([nan, nan, 3, 4, nan, 6, 7])
+ result1 = s1.interpolate(method='piecewise_polynomial')
+ result2 = s2.interpolate(method='piecewise_polynomial')
+ expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
+ expected2 = Series([nan, nan, 3., 4., 5., 6., 7.])
+
+ assert_series_equal(expected1, result1)
+ assert_series_equal(expected2, result2)
+
+ def test_interp_krogh(self):
+ tm._skip_if_no_scipy()
+ s1 = Series([0, -2, 0, np.nan], index=[0, 0, 1, 5])
+ s2 = Series([nan, 0, -2, 0, np.nan], index=[-1, 0, 0, 1, 5])
+ s3 = Series([nan, 0, 0, 0, np.nan], index=[-1, 0, 0, 0, 5])
+ result1 = s1.interpolate(method='krogh')
+ result2 = s2.interpolate(method='krogh')
+ result3 = s3.interpolate(method='krogh')
+ expected1 = Series([0., -2., 0., 40.], index=[0, 0, 1, 5])
+ expected2 = Series([nan, 0., -2., 0., 40.], index=[-1, 0, 0, 1, 5])
+ expected3 = Series([nan, 0., 0., 0., 0.], index=[-1, 0, 0, 0, 5])
+
+ assert_series_equal(expected1, result1)
+ assert_series_equal(expected2, result2)
+ assert_series_equal(expected3, result3)
+
+ def test_interp_barycentric(self):
+ tm._skip_if_no_scipy()
+ s1 = Series([nan, 0, 0, 0, 0, nan])
+ s2 = Series([nan, 0, 2, 1, nan])
+ result1 = s1.interpolate(method='barycentric')
+ result2 = s2.interpolate(method='barycentric')
+ expected1 = Series([nan, 0., 0., 0., 0., 0.])
+ expected2 = Series([nan, 0., 2., 1., -3.])
+
+ assert_series_equal(expected1, result1)
+ assert_series_equal(expected2, result2)
+
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
| Starting point for closing #10365.
I only have the trivial case. I think this method (and perhaps other methods as well) needs #10383 for it to be useful.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10435 | 2015-06-25T06:14:43Z | 2015-08-21T01:37:30Z | null | 2022-10-13T00:16:38Z |
BUG: closes bug in stack when index is not unique | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index de2261a79da47..13d61957eea00 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -561,6 +561,7 @@ Bug Fixes
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
- Bug in ``pd.rolling_*`` where ``Series.name`` would be lost in the output (:issue:`10565`)
+- Bug in ``stack`` when index or columns are not unique. (:issue:`10417`)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index f782aa38bc965..fecfe5cd82c6d 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -461,6 +461,12 @@ def stack(frame, level=-1, dropna=True):
-------
stacked : Series
"""
+ def factorize(index):
+ if index.is_unique:
+ return index, np.arange(len(index))
+ cat = Categorical(index, ordered=True)
+ return cat.categories, cat.codes
+
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
@@ -475,20 +481,22 @@ def stack(frame, level=-1, dropna=True):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
- new_levels.append(frame.columns)
-
new_labels = [lab.repeat(K) for lab in frame.index.labels]
- new_labels.append(np.tile(np.arange(K), N).ravel())
+
+ clev, clab = factorize(frame.columns)
+ new_levels.append(clev)
+ new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
- ilabels = np.arange(N).repeat(K)
- clabels = np.tile(np.arange(K), N).ravel()
- new_index = MultiIndex(levels=[frame.index, frame.columns],
- labels=[ilabels, clabels],
+ levels, (ilab, clab) = \
+ zip(*map(factorize, (frame.index, frame.columns)))
+ labels = ilab.repeat(K), np.tile(clab, N).ravel()
+ new_index = MultiIndex(levels=levels,
+ labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index a7ef49c41a011..65ba5fd036a35 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -964,6 +964,44 @@ def test_stack(self):
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
+ # GH10417
+ def check(left, right):
+ assert_series_equal(left, right)
+ self.assertFalse(left.index.is_unique)
+ li, ri = left.index, right.index
+ for i in range(ri.nlevels):
+ tm.assert_numpy_array_equal(li.levels[i], ri.levels[i])
+ tm.assert_numpy_array_equal(li.labels[i], ri.labels[i])
+
+ df = DataFrame(np.arange(12).reshape(4, 3),
+ index=list('abab'),
+ columns=['1st', '2nd', '3rd'])
+
+ mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
+ labels=[np.tile(np.arange(2).repeat(3), 2),
+ np.tile(np.arange(3), 4)])
+
+ left, right = df.stack(), Series(np.arange(12), index=mi)
+ check(left, right)
+
+ df.columns = ['1st', '2nd', '1st']
+ mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']],
+ labels=[np.tile(np.arange(2).repeat(3), 2),
+ np.tile([0, 1, 0], 4)])
+
+ left, right = df.stack(), Series(np.arange(12), index=mi)
+ check(left, right)
+
+ tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
+ df.index = MultiIndex.from_tuples(tpls)
+ mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
+ labels=[np.tile(np.arange(2).repeat(3), 2),
+ np.repeat([1, 0, 1], [3, 6, 3]),
+ np.tile([0, 1, 0], 4)])
+
+ left, right = df.stack(), Series(np.arange(12), index=mi)
+ check(left, right)
+
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
| closes https://github.com/pydata/pandas/issues/10417
| https://api.github.com/repos/pandas-dev/pandas/pulls/10433 | 2015-06-25T02:08:32Z | 2015-08-08T12:18:12Z | 2015-08-08T12:18:12Z | 2015-08-30T02:30:09Z |
BUG: provide categorical concat always on axis 0, #10430 | diff --git a/.gitignore b/.gitignore
index e8b557d68ac39..c0f576178ecc0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,6 +41,8 @@ doc/_build
dist
# Egg metadata
*.egg-info
+.eggs
+
# tox testing tool
.tox
# rope
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index a9e5d1f3f0ebd..edd4a532cf8f5 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1722,6 +1722,7 @@ def _concat_compat(to_concat, axis=0):
----------
to_concat : array of arrays
axis : axis to provide concatenation
+ in the current impl this is always 0, e.g. we only have 1-d categoricals
Returns
-------
@@ -1744,7 +1745,7 @@ def convert_categorical(x):
# convert to object type and perform a regular concat
from pandas.core.common import _concat_compat
- return _concat_compat([ np.array(x,copy=False).astype('object') for x in to_concat ],axis=axis)
+ return _concat_compat([ np.array(x,copy=False).astype('object') for x in to_concat ],axis=0)
# we could have object blocks and categorical's here
# if we only have a single cateogoricals then combine everything
@@ -1761,4 +1762,4 @@ def convert_categorical(x):
raise ValueError("incompatible categories in categorical concat")
# concat them
- return Categorical(np.concatenate([ convert_categorical(x) for x in to_concat ],axis=axis), categories=categories)
+ return Categorical(np.concatenate([ convert_categorical(x) for x in to_concat ],axis=0), categories=categories)
| numpy 1.10 makes this an error for 1-d on axis != 0
closes #10430
| https://api.github.com/repos/pandas-dev/pandas/pulls/10431 | 2015-06-25T00:10:49Z | 2015-06-25T01:10:34Z | 2015-06-25T01:10:34Z | 2015-06-25T01:10:34Z |
BUG: Timedeltas with no specified units (and frac) should raise, #10426 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 168fd803c5f8a..6e5e9cbb34605 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -61,7 +61,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`)
-- 4x improvement in ``timedelta`` string parsing (:issue:`6755`)
+- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`)
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
.. _whatsnew_0170.bug_fixes:
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 4e0b30569afe5..7105141da365f 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -112,6 +112,20 @@ def test_construction(self):
# only leading neg signs are allowed
self.assertRaises(ValueError, lambda : Timedelta('10 days -1 h 1.5m 1s 3us'))
+ # no units specified
+ self.assertRaises(ValueError, lambda : Timedelta('3.1415'))
+
+ # invalid construction
+ tm.assertRaisesRegexp(ValueError,
+ "cannot construct a TimeDelta",
+ lambda : Timedelta())
+ tm.assertRaisesRegexp(ValueError,
+ "unit abbreviation w/o a number",
+ lambda : Timedelta('foo'))
+ tm.assertRaisesRegexp(ValueError,
+ "cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
+ lambda : Timedelta(day=10))
+
# roundtripping both for string and value
for v in ['1s',
'-1s',
@@ -149,17 +163,6 @@ def test_construction(self):
self.assertEqual(Timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),Timedelta('0 days, 00:00:02'))
- # invalid
- tm.assertRaisesRegexp(ValueError,
- "cannot construct a TimeDelta",
- lambda : Timedelta())
- tm.assertRaisesRegexp(ValueError,
- "unit abbreviation w/o a number",
- lambda : Timedelta('foo'))
- tm.assertRaisesRegexp(ValueError,
- "cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
- lambda : Timedelta(day=10))
-
def test_repr(self):
self.assertEqual(repr(Timedelta(10,unit='d')),"Timedelta('10 days 00:00:00')")
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index d32952a160194..fc60ff3b7b6a5 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -2400,7 +2400,6 @@ cdef inline parse_timedelta_string(object ts, coerce=False):
have_value = 1
have_dot = 0
-
# we had a dot, but we have a fractional
# value since we have an unit
if have_dot and len(unit):
@@ -2415,6 +2414,10 @@ cdef inline parse_timedelta_string(object ts, coerce=False):
# we have a dot as part of a regular format
# e.g. hh:mm:ss.fffffff
elif have_dot:
+
+ if (len(number) or len(frac)) and not len(unit) and current_unit is None:
+ raise ValueError("no units specified")
+
if len(frac) > 0 and len(frac) <= 3:
m = 10**(3-len(frac)) * 1000L * 1000L
elif len(frac) > 3 and len(frac) <= 6:
| closes #10426
| https://api.github.com/repos/pandas-dev/pandas/pulls/10429 | 2015-06-24T13:14:21Z | 2015-06-24T20:46:26Z | 2015-06-24T20:46:26Z | 2015-06-24T20:46:26Z |
BUG: using .loc[:,column] fails type coercion when the object is a multi-index | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 990eec08d0bd6..d85e3f079e09d 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2605,9 +2605,14 @@ def is_list_like(arg):
not isinstance(arg, compat.string_and_binary_types))
def is_null_slice(obj):
+ """ we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
+def is_full_slice(obj, l):
+ """ we have a full length slice """
+ return (isinstance(obj, slice) and obj.start == 0 and
+ obj.stop == l and obj.step is None)
def is_hashable(arg):
"""Return True if hash(arg) will succeed, False otherwise.
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 02309e6e4e3b5..a9d277088f178 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -6,7 +6,7 @@
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
- is_null_slice,
+ is_null_slice, is_full_slice,
ABCSeries, ABCDataFrame, ABCPanel, is_float,
_values_from_object, _infer_fill_value, is_integer)
import numpy as np
@@ -399,10 +399,10 @@ def setter(item, v):
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
- # as we have a null slice which means essentially reassign to the columns
- # of a multi-dim object
- # GH6149
- if isinstance(pi, tuple) and all(is_null_slice(idx) for idx in pi):
+ # as we have a null slice or a slice with full bounds
+ # which means essentially reassign to the columns of a multi-dim object
+ # GH6149 (null slice), GH10408 (full bounds)
+ if isinstance(pi, tuple) and all(is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi):
s = v
else:
# set the item, possibly having a dtype change
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 710367bf04605..9ed8554e90b80 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1072,6 +1072,25 @@ def test_loc_setitem_consistency(self):
df['x'] = 1
assert_frame_equal(df,expected)
+ # .loc[:,column] setting with slice == len of the column
+ # GH10408
+ data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
+Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
+Region,Site,RespondentID,,,,,
+Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
+Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
+Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
+Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
+
+ df = pd.read_csv(StringIO(data),header=[0,1], index_col=[0,1,2])
+ df.loc[:,('Respondent','StartDate')] = pd.to_datetime(df.loc[:,('Respondent','StartDate')])
+ df.loc[:,('Respondent','EndDate')] = pd.to_datetime(df.loc[:,('Respondent','EndDate')])
+ df.loc[:,('Respondent','Duration')] = df.loc[:,('Respondent','EndDate')] - df.loc[:,('Respondent','StartDate')]
+
+ df.loc[:,('Respondent','Duration')] = df.loc[:,('Respondent','Duration')].astype('timedelta64[s]')
+ expected = Series([1380,720,840,2160.],index=df.index,name=('Respondent','Duration'))
+ assert_series_equal(df[('Respondent','Duration')],expected)
+
def test_loc_setitem_frame(self):
df = self.frame_labels
@@ -2331,14 +2350,14 @@ def test_setitem_dtype_upcast(self):
assert_frame_equal(df,expected)
# GH10280
- df = DataFrame(np.arange(6,dtype='int64').reshape(2, 3),
+ df = DataFrame(np.arange(6,dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
- right = DataFrame([[0, val, 2], [3, 4, 5]],
+ right = DataFrame([[0, val, 2], [3, 4, 5]],
index=list('ab'),
columns=['foo', 'bar', 'baz'])
@@ -2346,12 +2365,12 @@ def test_setitem_dtype_upcast(self):
self.assertTrue(com.is_integer_dtype(left['foo']))
self.assertTrue(com.is_integer_dtype(left['baz']))
- left = DataFrame(np.arange(6,dtype='int64').reshape(2, 3) / 10.0,
+ left = DataFrame(np.arange(6,dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
- right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]],
+ right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]],
index=list('ab'),
columns=['foo', 'bar', 'baz'])
| from [SO](http://stackoverflow.com/questions/31024821/pandas-dataframe-casting-to-timedelta-fails-with-loc)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10428 | 2015-06-24T12:52:36Z | 2015-06-24T20:45:55Z | 2015-06-24T20:45:55Z | 2015-06-24T20:45:55Z |
Removed scikit-timeseries migration docs from FAQ | diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 0ac33db8495c8..7714d937e15d6 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -81,149 +81,6 @@ representation; i.e., 1KB = 1024 bytes).
See also :ref:`Categorical Memory Usage <categorical.memory>`.
-
-.. _ref-scikits-migration:
-
-Migrating from scikits.timeseries to pandas >= 0.8.0
-----------------------------------------------------
-
-Starting with pandas 0.8.0, users of scikits.timeseries should have all of the
-features that they need to migrate their code to use pandas. Portions of the
-scikits.timeseries codebase for implementing calendar logic and timespan
-frequency conversions (but **not** resampling, that has all been implemented
-from scratch from the ground up) have been ported to the pandas codebase.
-
-The scikits.timeseries notions of ``Date`` and ``DateArray`` are responsible
-for implementing calendar logic:
-
-::
-
- In [16]: dt = ts.Date('Q', '1984Q3')
-
- # sic
- In [17]: dt
- Out[17]: <Q-DEC : 1984Q1>
-
- In [18]: dt.asfreq('D', 'start')
- Out[18]: <D : 01-Jan-1984>
-
- In [19]: dt.asfreq('D', 'end')
- Out[19]: <D : 31-Mar-1984>
-
- In [20]: dt + 3
- Out[20]: <Q-DEC : 1984Q4>
-
-``Date`` and ``DateArray`` from scikits.timeseries have been reincarnated in
-pandas ``Period`` and ``PeriodIndex``:
-
-.. ipython:: python
-
- pd.pnow('D') # scikits.timeseries.now()
- pd.Period(year=2007, month=3, day=15, freq='D')
- p = pd.Period('1984Q3')
- p
- p.asfreq('D', 'start')
- p.asfreq('D', 'end')
- (p + 3).asfreq('T') + 6 * 60 + 30
- rng = pd.period_range('1990', '2010', freq='A')
- rng
- rng.asfreq('B', 'end') - 3
-
-.. csv-table::
- :header: "scikits.timeseries", "pandas", "Notes"
- :widths: 20, 20, 60
-
- Date, Period, "A span of time, from yearly through to secondly"
- DateArray, PeriodIndex, "An array of timespans"
- convert, resample, "Frequency conversion in scikits.timeseries"
- convert_to_annual, pivot_annual, "currently supports up to daily frequency, see :issue:`736`"
-
-
-PeriodIndex / DateArray properties and functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The scikits.timeseries ``DateArray`` had a number of information
-properties. Here are the pandas equivalents:
-
-.. csv-table::
- :header: "scikits.timeseries", "pandas", "Notes"
- :widths: 20, 60, 20
-
- get_steps, ``np.diff(idx.values)``,
- has_missing_dates, ``not idx.is_full``,
- is_full, ``idx.is_full``,
- is_valid, ``idx.is_monotonic and idx.is_unique``,
- is_chronological, ``is_monotonic``,
- ``arr.sort_chronologically()``, ``idx.order()``,
-
-Frequency conversion
-~~~~~~~~~~~~~~~~~~~~
-
-Frequency conversion is implemented using the ``resample`` method on Series
-and DataFrame objects with a DatetimeIndex or PeriodIndex. ``resample`` also
-works on panels (3D). Here is some code that resamples daily data to montly:
-
-.. ipython:: python
-
- rng = pd.period_range('Jan-2000', periods=50, freq='M')
- data = pd.Series(np.random.randn(50), index=rng)
- data
- data.resample('A', how=np.mean)
-
-Plotting
-~~~~~~~~
-
-Much of the plotting functionality of scikits.timeseries has been ported and
-adopted to pandas's data structures. For example:
-
-.. ipython:: python
-
- rng = pd.period_range('1987Q2', periods=10, freq='Q-DEC')
- data = pd.Series(np.random.randn(10), index=rng)
-
- @savefig skts_ts_plot.png
- data.plot()
-
-Converting to and from period format
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Use the ``to_timestamp`` and ``to_period`` instance methods.
-
-Treatment of missing data
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Unlike scikits.timeseries, pandas data structures are not based on NumPy's
-``MaskedArray`` object. Missing data is represented as ``NaN`` in numerical
-arrays and either as ``None`` or ``NaN`` in non-numerical arrays. Implementing
-a version of pandas's data structures that use MaskedArray is possible but
-would require the involvement of a dedicated maintainer. Active pandas
-developers are not interested in this.
-
-Resampling with timestamps and periods
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-``resample`` has a ``kind`` argument which allows you to resample time series
-with a DatetimeIndex to PeriodIndex:
-
-.. ipython:: python
-
- rng = pd.date_range('1/1/2000', periods=200, freq='D')
- data = pd.Series(np.random.randn(200), index=rng)
- data[:10]
- data.index
- data.resample('M', kind='period')
-
-Similarly, resampling from periods to timestamps is possible with an optional
-interval (``'start'`` or ``'end'``) convention:
-
-.. ipython:: python
-
- rng = pd.period_range('Jan-2000', periods=50, freq='M')
- data = pd.Series(np.random.randn(50), index=rng)
- resampled = data.resample('A', kind='timestamp', convention='end')
- resampled.index
-
-
Byte-Ordering Issues
--------------------
Occasionally you may have to deal with data that were created on a machine with
| Issue #10281
| https://api.github.com/repos/pandas-dev/pandas/pulls/10423 | 2015-06-24T06:35:24Z | 2015-06-24T10:53:33Z | 2015-06-24T10:53:33Z | 2015-06-24T10:54:52Z |
BUG: GH9907 generate_range when start/end has higher resolution than offset | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 690dd1ab196b0..f4dce6316ce5c 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -76,3 +76,4 @@ Bug Fixes
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
+- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 3bebd0daa6d29..3a69a13739e5d 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -2450,12 +2450,12 @@ def generate_range(start=None, end=None, periods=None,
if start and not offset.onOffset(start):
start = offset.rollforward(start)
- if end and not offset.onOffset(end):
+ elif end and not offset.onOffset(end):
end = offset.rollback(end)
- if periods is None and end < start:
- end = None
- periods = 0
+ if periods is None and end < start:
+ end = None
+ periods = 0
if end is None:
end = start + (periods - 1) * offset
@@ -2465,7 +2465,6 @@ def generate_range(start=None, end=None, periods=None,
cur = start
- next_date = cur
while cur <= end:
yield cur
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 69b1d84670d45..54a15ca66e6e9 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -57,6 +57,26 @@ def test_3(self):
end=datetime(2008, 1, 6)),
[])
+ def test_precision_finer_than_offset(self):
+ # GH 9907
+ result1 = DatetimeIndex(start='2015-04-15 00:00:03',
+ end='2016-04-22 00:00:00', freq='Q')
+ result2 = DatetimeIndex(start='2015-04-15 00:00:03',
+ end='2015-06-22 00:00:04', freq='W')
+ expected1_list = ['2015-06-30 00:00:03', '2015-09-30 00:00:03',
+ '2015-12-31 00:00:03', '2016-03-31 00:00:03']
+ expected2_list = ['2015-04-19 00:00:03', '2015-04-26 00:00:03',
+ '2015-05-03 00:00:03', '2015-05-10 00:00:03',
+ '2015-05-17 00:00:03', '2015-05-24 00:00:03',
+ '2015-05-31 00:00:03', '2015-06-07 00:00:03',
+ '2015-06-14 00:00:03', '2015-06-21 00:00:03']
+ expected1 = DatetimeIndex(expected1_list, dtype='datetime64[ns]',
+ freq='Q-DEC', tz=None)
+ expected2 = DatetimeIndex(expected2_list, dtype='datetime64[ns]',
+ freq='W-SUN', tz=None)
+ self.assertTrue(result1.equals(expected1))
+ self.assertTrue(result2.equals(expected2))
+
class TestDateRange(tm.TestCase):
| To close #9907
| https://api.github.com/repos/pandas-dev/pandas/pulls/10422 | 2015-06-24T05:25:20Z | 2015-06-30T10:53:45Z | null | 2015-06-30T10:53:45Z |
DOC: DataFrame Properties | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a4e4cf612ca85..d9101c2fadafe 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -413,10 +413,17 @@ def _get_axes(N, K, index=index, columns=columns):
@property
def axes(self):
+ """
+ Return a list with the row axis labels and column axis labels as the
+ only members. They are returned in that order.
+ """
return [self.index, self.columns]
@property
def shape(self):
+ """
+ Return a tuple representing the dimensionality of the DataFrame.
+ """
return (len(self.index), len(self.columns))
def _repr_fits_vertical_(self):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b9e007a1e4d58..3530995eac799 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -373,12 +373,12 @@ def _stat_axis(self):
@property
def shape(self):
- "tuple of axis dimensions"
+ "Return a tuple of axis dimensions"
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
- "index(es) of the NDFrame"
+ "Return index label(s) of the internal NDFrame"
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index dfbc5dbf84572..4b6a6e029fa20 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -473,6 +473,9 @@ def _unpickle_series_compat(self, state):
# indexers
@property
def axes(self):
+ """
+ Return a list of the row axis labels
+ """
return [self.index]
def _ixs(self, i, axis=0):
| Added explanations for `axes` and `shape`
| https://api.github.com/repos/pandas-dev/pandas/pulls/10421 | 2015-06-23T23:15:39Z | 2015-08-20T15:26:07Z | null | 2015-08-20T15:26:07Z |
BUG: Fix value_counts name handling | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 742077d39fb18..02ef2bbed19b6 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -73,6 +73,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
@@ -100,3 +101,6 @@ Bug Fixes
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
+- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)
+
+
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c97d459fb96df..c958a70b43089 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -202,6 +202,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
from pandas.tools.tile import cut
from pandas.tseries.period import PeriodIndex
+ name = getattr(values, 'name', None)
values = Series(values).values
if bins is not None:
@@ -222,7 +223,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
if com.is_datetime_or_timedelta_dtype(dtype) or is_period:
if is_period:
- values = PeriodIndex(values)
+ values = PeriodIndex(values, name=name)
values = values.view(np.int64)
keys, counts = htable.value_count_int64(values)
@@ -247,7 +248,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
- result = Series(counts, index=com._values_from_object(keys))
+ result = Series(counts, index=com._values_from_object(keys), name=name)
if bins is not None:
# TODO: This next line should be more efficient
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 540b900844a9e..c3004aec60cc5 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -431,10 +431,10 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
if isinstance(self, PeriodIndex):
# preserve freq
- result.index = self._simple_new(result.index.values, self.name,
+ result.index = self._simple_new(result.index.values,
freq=self.freq)
elif isinstance(self, DatetimeIndex):
- result.index = self._simple_new(result.index.values, self.name,
+ result.index = self._simple_new(result.index.values,
tz=getattr(self, 'tz', None))
return result
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index e9526f9fad1ac..cd60bafdd30cf 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -181,23 +181,24 @@ def f():
class Ops(tm.TestCase):
def setUp(self):
- self.bool_index = tm.makeBoolIndex(10)
- self.int_index = tm.makeIntIndex(10)
- self.float_index = tm.makeFloatIndex(10)
- self.dt_index = tm.makeDateIndex(10)
- self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
- self.period_index = tm.makePeriodIndex(10)
- self.string_index = tm.makeStringIndex(10)
+ self.bool_index = tm.makeBoolIndex(10, name='a')
+ self.int_index = tm.makeIntIndex(10, name='a')
+ self.float_index = tm.makeFloatIndex(10, name='a')
+ self.dt_index = tm.makeDateIndex(10, name='a')
+ self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(tz='US/Eastern')
+ self.period_index = tm.makePeriodIndex(10, name='a')
+ self.string_index = tm.makeStringIndex(10, name='a')
+ self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
- self.int_series = Series(arr, index=self.int_index)
- self.float_series = Series(arr, index=self.float_index)
- self.dt_series = Series(arr, index=self.dt_index)
+ self.int_series = Series(arr, index=self.int_index, name='a')
+ self.float_series = Series(arr, index=self.float_index, name='a')
+ self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
- self.period_series = Series(arr, index=self.period_index)
- self.string_series = Series(arr, index=self.string_index)
+ self.period_series = Series(arr, index=self.period_index, name='a')
+ self.string_series = Series(arr, index=self.string_index, name='a')
- types = ['bool','int','float','dt', 'dt_tz', 'period','string']
+ types = ['bool','int','float','dt', 'dt_tz', 'period','string', 'unicode']
fmts = [ "{0}_{1}".format(t,f) for t in types for f in ['index','series'] ]
self.objs = [ getattr(self,f) for f in fmts if getattr(self,f,None) is not None ]
@@ -213,9 +214,9 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False):
try:
if isinstance(o, Series):
- expected = Series(getattr(o.index,op),index=o.index)
+ expected = Series(getattr(o.index,op), index=o.index, name='a')
else:
- expected = getattr(o,op)
+ expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
@@ -361,21 +362,28 @@ def test_value_counts_unique_nunique(self):
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
- expected_index = o[::-1]
- o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
+
+ # resets name from Index
+ expected_index = pd.Index(o[::-1], name=None)
+
+ # attach name to klass
+ o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq, name='a')
# don't test boolean
elif isinstance(o,Index) and o.is_boolean():
continue
elif isinstance(o, Index):
- expected_index = values[::-1]
- o = klass(np.repeat(values, range(1, len(o) + 1)))
+ expected_index = pd.Index(values[::-1], name=None)
+ o = klass(np.repeat(values, range(1, len(o) + 1)), name='a')
else:
- expected_index = values[::-1]
+ expected_index = pd.Index(values[::-1], name=None)
idx = np.repeat(o.index.values, range(1, len(o) + 1))
- o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx)
+ o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx, name='a')
- expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64')
- tm.assert_series_equal(o.value_counts(), expected_s)
+ expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64', name='a')
+ result = o.value_counts()
+ tm.assert_series_equal(result, expected_s)
+ self.assertTrue(result.index.name is None)
+ self.assertEqual(result.name, 'a')
result = o.unique()
if isinstance(o, (DatetimeIndex, PeriodIndex)):
@@ -410,21 +418,34 @@ def test_value_counts_unique_nunique(self):
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
- expected_index = o
- o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
+
+ # resets name from Index
+ expected_index = pd.Index(o, name=None)
+ # attach name to klass
+ o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq, name='a')
elif isinstance(o, Index):
- expected_index = values
- o = klass(np.repeat(values, range(1, len(o) + 1)))
+ expected_index = pd.Index(values, name=None)
+ o = klass(np.repeat(values, range(1, len(o) + 1)), name='a')
else:
- expected_index = values
+ expected_index = pd.Index(values, name=None)
idx = np.repeat(o.index.values, range(1, len(o) + 1))
- o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx)
-
- expected_s_na = Series(list(range(10, 2, -1)) +[3], index=expected_index[9:0:-1], dtype='int64')
- expected_s = Series(list(range(10, 2, -1)), index=expected_index[9:1:-1], dtype='int64')
-
- tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
+ o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx, name='a')
+
+ expected_s_na = Series(list(range(10, 2, -1)) +[3],
+ index=expected_index[9:0:-1],
+ dtype='int64', name='a')
+ expected_s = Series(list(range(10, 2, -1)),
+ index=expected_index[9:1:-1],
+ dtype='int64', name='a')
+
+ result_s_na = o.value_counts(dropna=False)
+ tm.assert_series_equal(result_s_na, expected_s_na)
+ self.assertTrue(result_s_na.index.name is None)
+ self.assertEqual(result_s_na.name, 'a')
+ result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
+ self.assertTrue(result_s.index.name is None)
+ self.assertEqual(result_s.name, 'a')
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
@@ -508,14 +529,15 @@ def test_value_counts_inferred(self):
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
- s = klass(df['dt'].copy())
+ s = klass(df['dt'].copy(), name='dt')
- idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X'])
- expected_s = Series([3, 2, 1], index=idx)
+ idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z',
+ '2009-01-01 00:00:00X'])
+ expected_s = Series([3, 2, 1], index=idx, name='dt')
tm.assert_series_equal(s.value_counts(), expected_s)
- expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'],
- dtype='datetime64[ns]')
+ expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z',
+ '2008-09-09 00:00:00Z'], dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
@@ -526,7 +548,7 @@ def test_value_counts_inferred(self):
# with NaT
s = df['dt'].copy()
- s = klass([v for v in s.values] + [pd.NaT])
+ s = klass([v for v in s.values] + [pd.NaT], name='dt')
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
@@ -547,10 +569,10 @@ def test_value_counts_inferred(self):
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
- td = klass(td)
+ td = klass(td, name='dt')
result = td.value_counts()
- expected_s = Series([6], index=[Timedelta('1day')])
+ expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'])
@@ -560,9 +582,8 @@ def test_value_counts_inferred(self):
self.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
- td2 = klass(td2)
+ td2 = klass(td2, name='dt')
result2 = td2.value_counts()
-
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
@@ -629,7 +650,7 @@ def test_duplicated_drop_duplicates(self):
# special case
if original.is_boolean():
result = original.drop_duplicates()
- expected = Index([False,True])
+ expected = Index([False,True], name='a')
tm.assert_index_equal(result, expected)
continue
@@ -668,7 +689,8 @@ def test_duplicated_drop_duplicates(self):
idx.drop_duplicates(inplace=True)
else:
- expected = Series([False] * len(original), index=original.index)
+ expected = Series([False] * len(original),
+ index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
@@ -676,17 +698,17 @@ def test_duplicated_drop_duplicates(self):
idx = original.index[list(range(len(original))) + [5, 3]]
values = original.values[list(range(len(original))) + [5, 3]]
- s = Series(values, index=idx)
+ s = Series(values, index=idx, name='a')
- expected = Series([False] * len(original) + [True, True], index=idx)
+ expected = Series([False] * len(original) + [True, True],
+ index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
last_base = [False] * len(idx)
last_base[3] = True
last_base[5] = True
- expected = Series(last_base, index=idx)
- expected
+ expected = Series(last_base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(take_last=True), expected)
tm.assert_series_equal(s.drop_duplicates(take_last=True),
s[~np.array(last_base)])
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 83d6b97788e91..04e868a4a0819 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -817,43 +817,43 @@ def getArangeMat():
# make index
-def makeStringIndex(k=10):
- return Index(rands_array(nchars=10, size=k))
+def makeStringIndex(k=10, name=None):
+ return Index(rands_array(nchars=10, size=k), name=name)
-def makeUnicodeIndex(k=10):
+def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k))
-def makeCategoricalIndex(k=10, n=3):
+def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
- return CategoricalIndex(np.random.choice(x,k))
+ return CategoricalIndex(np.random.choice(x,k), name=name)
-def makeBoolIndex(k=10):
+def makeBoolIndex(k=10, name=None):
if k == 1:
- return Index([True])
+ return Index([True], name=name)
elif k == 2:
- return Index([False,True])
- return Index([False,True] + [False]*(k-2))
+ return Index([False,True], name=name)
+ return Index([False,True] + [False]*(k-2), name=name)
-def makeIntIndex(k=10):
- return Index(lrange(k))
+def makeIntIndex(k=10, name=None):
+ return Index(lrange(k), name=name)
-def makeFloatIndex(k=10):
+def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
- return Index(values * (10 ** np.random.randint(0, 9)))
+ return Index(values * (10 ** np.random.randint(0, 9)), name=name)
-def makeDateIndex(k=10, freq='B'):
+def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
- dr = bdate_range(dt, periods=k, freq=freq)
- return DatetimeIndex(dr)
+ dr = bdate_range(dt, periods=k, freq=freq, name=name)
+ return DatetimeIndex(dr, name=name)
-def makeTimedeltaIndex(k=10, freq='D'):
- return TimedeltaIndex(start='1 day',periods=k,freq=freq)
+def makeTimedeltaIndex(k=10, freq='D', name=None):
+ return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
-def makePeriodIndex(k=10):
+def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
- dr = PeriodIndex(start=dt, periods=k, freq='B')
+ dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
@@ -885,21 +885,21 @@ def all_timeseries_index_generator(k=10):
# make series
-def makeFloatSeries():
+def makeFloatSeries(name=None):
index = makeStringIndex(N)
- return Series(randn(N), index=index)
+ return Series(randn(N), index=index, name=name)
-def makeStringSeries():
+def makeStringSeries(name=None):
index = makeStringIndex(N)
- return Series(randn(N), index=index)
+ return Series(randn(N), index=index, name=name)
-def makeObjectSeries():
+def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
- return Series(dateIndex, index=index)
+ return Series(dateIndex, index=index, name=name)
def getSeriesData():
@@ -907,16 +907,16 @@ def getSeriesData():
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
-def makeTimeSeries(nper=None, freq='B'):
+def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
- return Series(randn(nper), index=makeDateIndex(nper, freq=freq))
+ return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
-def makePeriodSeries(nper=None):
+def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
- return Series(randn(nper), index=makePeriodIndex(nper))
+ return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
| Closes #10150. Also, made `test_base` have name attributes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10419 | 2015-06-23T21:27:25Z | 2015-06-27T03:51:56Z | 2015-06-27T03:51:56Z | 2015-07-04T12:52:08Z |
improve documentation for pandas.Series.interpolate | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b9e007a1e4d58..13c38789d03db 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2872,18 +2872,21 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
'polynomial', 'spline' 'piecewise_polynomial', 'pchip'}
* 'linear': ignore the index and treat the values as equally
- spaced. default
+ spaced. This is the only method supported on MultiIndexes.
+ default
* 'time': interpolation works on daily and higher resolution
data to interpolate given length of interval
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
- `scipy.interpolate.interp1d` with the order given both
+ `scipy.interpolate.interp1d` with the order given. Both
'polynomial' and 'spline' requre that you also specify and order
- (int) e.g. df.interpolate(method='polynomial', order=4)
+ (int) e.g. df.interpolate(method='polynomial', order=4). These
+ use the actual numerical values of the index
* 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all
wrappers around the scipy interpolation methods of similar
- names. See the scipy documentation for more on their behavior:
+ names. These use the actual numerical values of the index. See
+ the scipy documentation for more on their behavior:
http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
| Close #10362 . Specify information on the use of index in pandas.Series.interpolate documentation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10418 | 2015-06-23T20:33:59Z | 2015-08-07T10:54:07Z | 2015-08-07T10:54:07Z | 2015-08-07T10:54:08Z |
ENH: tolerance argument for limiting pad, backfill and nearest neighbor reindexing | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 624e10b431de5..71d16a40f0215 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1100,6 +1100,30 @@ Note that the same result could have been achieved using
increasing or descreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate`
will not make any checks on the order of the index.
+.. _basics.limits_on_reindex_fill:
+
+Limits on filling while reindexing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``limit`` and ``tolerance`` arguments provide additional control over
+filling while reindexing. Limit specifies the maximum count of consecutive
+matches:
+
+.. ipython:: python
+
+ ts2.reindex(ts.index, method='ffill', limit=1)
+
+In contrast, tolerance specifies the maximum distance between the index and
+indexer values:
+
+.. ipython:: python
+
+ ts2.reindex(ts.index, method='ffill', tolerance='1 day')
+
+Notice that when used on a ``DatetimeIndex``, ``TimedeltaIndex`` or
+``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible.
+This allows you to specify tolerance with appropriate strings.
+
.. _basics.drop:
Dropping labels from an axis
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7e69a8044a305..5eb808e450a8a 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -180,6 +180,22 @@ Other enhancements
s.drop_duplicates(keep=False)
+- Reindex now has a ``tolerance`` argument that allows for finer control of :ref:`basics.limits_on_reindex_fill`:
+
+ .. ipython:: python
+
+ df = pd.DataFrame({'x': range(5), 't': pd.date_range('2000-01-01', periods=5)})
+ df.reindex([0.1, 1.9, 3.5], method='nearest', tolerance=0.2)
+
+ When used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. This allows you to specify tolerance with a string:
+
+ .. ipython:: python
+
+ df = df.set_index('t')
+ df.reindex(pd.to_datetime(['1999-12-31']), method='nearest', tolerance='1 day')
+
+ ``tolerance`` is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods.
+
.. _whatsnew_0170.api:
.. _whatsnew_0170.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 062cbe579785c..8f7aee0cb6f15 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2518,33 +2518,36 @@ def lookup(self, row_labels, col_labels):
#----------------------------------------------------------------------
# Reindexing and alignment
- def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
+ def _reindex_axes(self, axes, level, limit, tolerance, method,
+ fill_value, copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, copy, level, fill_value,
- limit)
+ limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
- fill_value, limit)
+ fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
- limit=None):
+ limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method, level,
- limit=limit)
+ limit=limit,
+ tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
- limit=None):
+ limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
- limit=limit)
+ limit=limit,
+ tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2fc288de438b3..27cb2641034dc 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -922,7 +922,7 @@ def to_hdf(self, path_or_buf, key, **kwargs):
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
- dropna : boolean, default False.
+ dropna : boolean, default False.
If true, ALL nan rows will not be written to store.
"""
@@ -1551,7 +1551,8 @@ def select(self, crit, axis=0):
return self.reindex(**{axis_name: new_axis})
- def reindex_like(self, other, method=None, copy=True, limit=None):
+ def reindex_like(self, other, method=None, copy=True, limit=None,
+ tolerance=None):
""" return an object with matching indicies to myself
Parameters
@@ -1560,7 +1561,12 @@ def reindex_like(self, other, method=None, copy=True, limit=None):
method : string or None
copy : boolean, default True
limit : int, default None
- Maximum size gap to forward or backward fill
+ Maximum number of consecutive labels to fill for inexact matches.
+ tolerance : optional
+ Maximum distance between labels of the other object and this
+ object for inexact matches.
+
+ .. versionadded:: 0.17.0
Notes
-----
@@ -1572,7 +1578,8 @@ def reindex_like(self, other, method=None, copy=True, limit=None):
reindexed : same as input
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS,
- method=method, copy=copy, limit=limit)
+ method=method, copy=copy, limit=limit,
+ tolerance=tolerance)
return self.reindex(**d)
@@ -1736,7 +1743,13 @@ def sort_index(self, axis=0, ascending=True):
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
limit : int, default None
- Maximum size gap to forward or backward fill
+ Maximum number of consecutive elements to forward or backward fill
+ tolerance : optional
+ Maximum distance between original and new labels for inexact
+ matches. The values of the index at the matching locations most
+ satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+
+ .. versionadded:: 0.17.0
Examples
--------
@@ -1758,6 +1771,7 @@ def reindex(self, *args, **kwargs):
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
+ tolerance = kwargs.pop('tolerance', None)
fill_value = kwargs.pop('fill_value', np.nan)
if kwargs:
@@ -1782,10 +1796,11 @@ def reindex(self, *args, **kwargs):
pass
# perform the reindex on the axes
- return self._reindex_axes(axes, level, limit,
+ return self._reindex_axes(axes, level, limit, tolerance,
method, fill_value, copy).__finalize__(self)
- def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
+ def _reindex_axes(self, axes, level, limit, tolerance, method,
+ fill_value, copy):
""" perform the reinxed for all the axes """
obj = self
for a in self._AXIS_ORDERS:
@@ -1795,7 +1810,8 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
- labels, level=level, limit=limit, method=method)
+ labels, level=level, limit=limit, tolerance=tolerance,
+ method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
@@ -1836,7 +1852,13 @@ def _reindex_multi(self, axes, copy, fill_value):
Broadcast across a level, matching Index values on the
passed MultiIndex level
limit : int, default None
- Maximum size gap to forward or backward fill
+ Maximum number of consecutive elements to forward or backward fill
+ tolerance : optional
+ Maximum distance between original and new labels for inexact
+ matches. The values of the index at the matching locations most
+ satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+
+ .. versionadded:: 0.17.0
Examples
--------
@@ -2910,7 +2932,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
use the actual numerical values of the index
* 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all
wrappers around the scipy interpolation methods of similar
- names. These use the actual numerical values of the index. See
+ names. These use the actual numerical values of the index. See
the scipy documentation for more on their behavior:
http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 12ad8a590c304..ed89d163bf608 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1602,28 +1602,38 @@ def sym_diff(self, other, result_name=None):
attribs['freq'] = None
return self._shallow_copy(the_diff, infer=True, **attribs)
- def get_loc(self, key, method=None):
+ def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
- method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}
+ method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
+ tolerance : optional
+ Maximum distance from index value for inexact matches. The value of
+ the index at the matching location most satisfy the equation
+ ``abs(index[loc] - key) <= tolerance``.
+
+ .. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
+ if tolerance is not None:
+ raise ValueError('tolerance argument only valid if using pad, '
+ 'backfill or nearest lookups')
return self._engine.get_loc(_values_from_object(key))
- indexer = self.get_indexer([key], method=method)
+ indexer = self.get_indexer([key], method=method,
+ tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
@@ -1692,7 +1702,7 @@ def get_level_values(self, level):
self._validate_index_level(level)
return self
- def get_indexer(self, target, method=None, limit=None):
+ def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
@@ -1701,15 +1711,21 @@ def get_indexer(self, target, method=None, limit=None):
Parameters
----------
target : Index
- method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}
+ method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
- limit : int
- Maximum number of consecuctive labels in ``target`` to match for
+ limit : int, optional
+ Maximum number of consecutive labels in ``target`` to match for
inexact matches.
+ tolerance : optional
+ Maximum distance between original and new labels for inexact
+ matches. The values of the index at the matching locations most
+ satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+
+ .. versionadded:: 0.17.0
Examples
--------
@@ -1725,36 +1741,54 @@ def get_indexer(self, target, method=None, limit=None):
"""
method = com._clean_reindex_fill_method(method)
target = _ensure_index(target)
+ if tolerance is not None:
+ tolerance = self._convert_tolerance(tolerance)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
- return pself.get_indexer(ptarget, method=method, limit=limit)
+ return pself.get_indexer(ptarget, method=method, limit=limit,
+ tolerance=tolerance)
- if not is_dtype_equal(self.dtype,target.dtype):
+ if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
- return this.get_indexer(target, method=method, limit=limit)
+ return this.get_indexer(target, method=method, limit=limit,
+ tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
- indexer = self._get_fill_indexer(target, method, limit)
+ indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
- indexer = self._get_nearest_indexer(target, limit)
+ indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
+ if tolerance is not None:
+ raise ValueError('tolerance argument only valid if doing pad, '
+ 'backfill or nearest reindexing')
+ if limit is not None:
+ raise ValueError('limit argument only valid if doing pad, '
+ 'backfill or nearest reindexing')
+
indexer = self._engine.get_indexer(target.values)
return com._ensure_platform_int(indexer)
- def _get_fill_indexer(self, target, method, limit=None):
+ def _convert_tolerance(self, tolerance):
+ # override this method on subclasses
+ return tolerance
+
+ def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad'
else self._engine.get_backfill_indexer)
indexer = method(target.values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
+ if tolerance is not None:
+ indexer = self._filter_indexer_tolerance(
+ target.values, indexer, tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
@@ -1787,7 +1821,7 @@ def _get_fill_indexer_searchsorted(self, target, method, limit=None):
indexer[indexer == len(self)] = -1
return indexer
- def _get_nearest_indexer(self, target, limit):
+ def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
@@ -1804,6 +1838,14 @@ def _get_nearest_indexer(self, target, limit):
indexer = np.where(op(left_distances, right_distances)
| (right_indexer == -1),
left_indexer, right_indexer)
+ if tolerance is not None:
+ indexer = self._filter_indexer_tolerance(
+ target, indexer, tolerance)
+ return indexer
+
+ def _filter_indexer_tolerance(self, target, indexer, tolerance):
+ distance = abs(self.values[indexer] - target)
+ indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
def get_indexer_non_unique(self, target):
@@ -1911,7 +1953,8 @@ def _can_reindex(self, indexer):
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
- def reindex(self, target, method=None, level=None, limit=None):
+ def reindex(self, target, method=None, level=None, limit=None,
+ tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
@@ -1951,7 +1994,8 @@ def reindex(self, target, method=None, level=None, limit=None):
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
- limit=limit)
+ limit=limit,
+ tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
@@ -3098,7 +3142,8 @@ def _can_reindex(self, indexer):
""" always allow reindexing """
pass
- def reindex(self, target, method=None, level=None, limit=None):
+ def reindex(self, target, method=None, level=None, limit=None,
+ tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
@@ -3167,7 +3212,7 @@ def _reindex_non_unique(self, target):
return new_target, indexer, new_indexer
- def get_indexer(self, target, method=None, limit=None):
+ def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
@@ -3416,6 +3461,14 @@ def _maybe_cast_slice_bound(self, label, side, kind):
return label
+ def _convert_tolerance(self, tolerance):
+ try:
+ return float(tolerance)
+ except ValueError:
+ raise ValueError('tolerance argument for %s must be numeric: %r'
+ % (type(self).__name__, tolerance))
+
+
class Int64Index(NumericIndex):
"""
@@ -3672,7 +3725,7 @@ def __contains__(self, other):
except:
return False
- def get_loc(self, key, method=None):
+ def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
@@ -3684,7 +3737,8 @@ def get_loc(self, key, method=None):
return nan_idxs
except (TypeError, NotImplementedError):
pass
- return super(Float64Index, self).get_loc(key, method=method)
+ return super(Float64Index, self).get_loc(key, method=method,
+ tolerance=tolerance)
@property
def is_all_dates(self):
@@ -4906,7 +4960,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
return new_index, indexer
- def get_indexer(self, target, method=None, limit=None):
+ def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
@@ -4952,6 +5006,9 @@ def get_indexer(self, target, method=None, limit=None):
self_index = self._tuple_index
if method == 'pad' or method == 'backfill':
+ if tolerance is not None:
+ raise NotImplementedError("tolerance not implemented yet "
+ 'for MultiIndex')
indexer = self_index._get_fill_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
@@ -4961,7 +5018,8 @@ def get_indexer(self, target, method=None, limit=None):
return com._ensure_platform_int(indexer)
- def reindex(self, target, method=None, level=None, limit=None):
+ def reindex(self, target, method=None, level=None, limit=None,
+ tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
@@ -5000,7 +5058,8 @@ def reindex(self, target, method=None, level=None, limit=None):
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
- limit=limit)
+ limit=limit,
+ tolerance=tolerance)
else:
raise Exception(
"cannot handle a non-unique multi-index!")
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8c836ae564e28..1a7bfd2d9c88b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -1929,6 +1929,12 @@ def test_reindex_methods(self):
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
+ actual = df.reindex_like(df, method=method, tolerance=0)
+ assert_frame_equal(df, actual)
+
+ actual = df.reindex(target, method=method, tolerance=1)
+ assert_frame_equal(expected, actual)
+
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
@@ -1944,6 +1950,10 @@ def test_reindex_methods(self):
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
+ expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
+ actual = df.reindex(target, method='nearest', tolerance=0.2)
+ assert_frame_equal(expected, actual)
+
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 9a3576a8fd846..688091d39d7c1 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1220,6 +1220,16 @@ def test_get_indexer(self):
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
+ def test_get_indexer_invalid(self):
+ # GH10411
+ idx = Index(np.arange(10))
+
+ with tm.assertRaisesRegexp(ValueError, 'tolerance argument'):
+ idx.get_indexer([1, 0], tolerance=1)
+
+ with tm.assertRaisesRegexp(ValueError, 'limit argument'):
+ idx.get_indexer([1, 0], limit=1)
+
def test_get_indexer_nearest(self):
idx = Index(np.arange(10))
@@ -1228,10 +1238,20 @@ def test_get_indexer_nearest(self):
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, [0, 5, 9])
+ actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
+ tm.assert_numpy_array_equal(actual, [0, 5, 9])
+
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9], [0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, expected)
+ actual = idx.get_indexer([0.2, 1.8, 8.5], method=method, tolerance=1)
+ tm.assert_numpy_array_equal(actual, expected)
+
+ for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1], [0, 2, -1]]):
+ actual = idx.get_indexer([0.2, 1.8, 8.5], method=method, tolerance=0.2)
+ tm.assert_numpy_array_equal(actual, expected)
+
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
@@ -1261,20 +1281,39 @@ def test_get_indexer_strings(self):
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
+ with tm.assertRaises(TypeError):
+ idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
+
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
for method in all_methods:
self.assertEqual(idx.get_loc(1, method=method), 1)
+ if method is not None:
+ self.assertEqual(idx.get_loc(1, method=method, tolerance=0), 1)
with tm.assertRaises(TypeError):
idx.get_loc([1, 2], method=method)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method), loc)
+ for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
+ self.assertEqual(idx.get_loc(1.1, method, tolerance=1), loc)
+
+ for method in ['pad', 'backfill', 'nearest']:
+ with tm.assertRaises(KeyError):
+ idx.get_loc(1.1, method, tolerance=0.05)
+
+ with tm.assertRaisesRegexp(ValueError, 'must be numeric'):
+ idx.get_loc(1.1, 'nearest', tolerance='invalid')
+ with tm.assertRaisesRegexp(ValueError, 'tolerance .* valid if'):
+ idx.get_loc(1.1, tolerance=1)
+
idx = pd.Index(['a', 'c'])
with tm.assertRaises(TypeError):
idx.get_loc('a', method='nearest')
+ with tm.assertRaises(TypeError):
+ idx.get_loc('a', method='pad', tolerance='invalid')
def test_slice_locs(self):
for dtype in [int, float]:
@@ -2266,12 +2305,20 @@ def test_get_loc(self):
idx = Float64Index([0.0, 1.0, 2.0])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(1, method), 1)
+ if method is not None:
+ self.assertEqual(idx.get_loc(1, method, tolerance=0), 1)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method), loc)
+ self.assertEqual(idx.get_loc(1.1, method, tolerance=0.9), loc)
self.assertRaises(KeyError, idx.get_loc, 'foo')
self.assertRaises(KeyError, idx.get_loc, 1.5)
+ self.assertRaises(KeyError, idx.get_loc, 1.5,
+ method='pad', tolerance=0.1)
+
+ with tm.assertRaisesRegexp(ValueError, 'must be numeric'):
+ idx.get_loc(1.4, method='nearest', tolerance='foo')
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
@@ -2838,10 +2885,28 @@ def test_get_loc(self):
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
+ if method is not None:
+ self.assertEqual(idx.get_loc(idx[1], method,
+ tolerance=pd.Timedelta('0 days')),
+ 1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
+ self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
+ tolerance='1 day'), 1)
+ self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
+ tolerance=pd.Timedelta('1D')), 1)
+ self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
+ tolerance=np.timedelta64(1, 'D')), 1)
+ self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
+ tolerance=timedelta(1)), 1)
+ with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
+ idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
+ with tm.assertRaises(KeyError):
+ idx.get_loc('2000-01-01T03', method='nearest',
+ tolerance='2 hours')
+
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
@@ -2878,6 +2943,11 @@ def test_get_indexer(self):
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')),
+ [0, -1, 1])
+ with tm.assertRaises(ValueError):
+ idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
@@ -2988,6 +3058,22 @@ def test_get_loc(self):
self.assertEqual(idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
+ idx = pd.period_range('2000-01-01', periods=5)[::2]
+ self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
+ tolerance='1 day'), 1)
+ self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
+ tolerance=pd.Timedelta('1D')), 1)
+ self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
+ tolerance=np.timedelta64(1, 'D')), 1)
+ self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
+ tolerance=timedelta(1)), 1)
+ with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
+ idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
+ with tm.assertRaisesRegexp(ValueError, 'different freq'):
+ idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
+ with tm.assertRaises(KeyError):
+ idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
+
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx), [0, 1, 2])
@@ -2997,9 +3083,15 @@ def test_get_indexer(self):
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest', tolerance='1 hour'),
+ [0, -1, 1])
with self.assertRaisesRegexp(ValueError, 'different freq'):
- idx.asfreq('D').get_indexer(idx)
+ idx.get_indexer(target, 'nearest', tolerance='1 minute')
+
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest', tolerance='1 day'), [0, 1, 1])
def test_repeat(self):
# GH10183
@@ -3029,6 +3121,13 @@ def test_get_loc(self):
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
+ self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
+ self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
+ self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
+
+ with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
+ idx.get_loc(idx[1], method='nearest', tolerance='foo')
+
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
@@ -3040,6 +3139,10 @@ def test_get_indexer(self):
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1])
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest',
+ tolerance=pd.Timedelta('1 hour')),
+ [0, -1, 1])
def test_numeric_compat(self):
@@ -4059,6 +4162,8 @@ def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with tm.assertRaises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
+ with tm.assertRaises(NotImplementedError):
+ midx.get_indexer(['a'], method='pad', tolerance=2)
def test_format(self):
self.index.format()
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 36a8600e51725..4fa8aaf34846f 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -6465,6 +6465,13 @@ def test_reindex_nearest(self):
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
+ actual = s.reindex_like(actual, method='nearest', tolerance=1)
+ assert_series_equal(expected, actual)
+
+ actual = s.reindex(target, method='nearest', tolerance=0.2)
+ expected = Series([0, 1, np.nan, 2], target)
+ assert_series_equal(expected, actual)
+
def test_reindex_backfill(self):
pass
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 727852ced25b0..c353e66bc2dbb 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -203,6 +203,14 @@ def asobject(self):
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
+ def _convert_tolerance(self, tolerance):
+ try:
+ return tslib.Timedelta(tolerance).to_timedelta64()
+ except ValueError:
+ raise ValueError('tolerance argument for %s must be convertible '
+ 'to Timedelta: %r'
+ % (type(self).__name__, tolerance))
+
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
Parameters
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 19ff9a4b19a3e..0525a29ef3fd0 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1270,7 +1270,7 @@ def get_value_maybe_box(self, series, key):
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
- def get_loc(self, key, method=None):
+ def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -1278,10 +1278,15 @@ def get_loc(self, key, method=None):
-------
loc : int
"""
+ if tolerance is not None:
+ # try converting tolerance now, so errors don't get swallowed by
+ # the try/except clauses below
+ tolerance = self._convert_tolerance(tolerance)
+
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
- return Index.get_loc(self, key, method=method)
+ return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
@@ -1290,7 +1295,7 @@ def get_loc(self, key, method=None):
return self.indexer_at_time(key)
try:
- return Index.get_loc(self, key, method=method)
+ return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
@@ -1299,7 +1304,7 @@ def get_loc(self, key, method=None):
try:
stamp = Timestamp(key, tz=self.tz)
- return Index.get_loc(self, stamp, method=method)
+ return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index e7b229e91cbc8..56d7d45120fdc 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -502,22 +502,26 @@ def to_timestamp(self, freq=None, how='start'):
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
- def _add_delta(self, other):
+ def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
- return self.shift(nanos // offset_nanos)
+ return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq:
- return self.shift(other.n)
+ return other.n
raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
+ def _add_delta(self, other):
+ ordinal_delta = self._maybe_convert_timedelta(other)
+ return self.shift(ordinal_delta)
+
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
@@ -586,13 +590,13 @@ def get_value(self, series, key):
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
- def get_indexer(self, target, method=None, limit=None):
+ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if hasattr(target, 'freq') and target.freq != self.freq:
raise ValueError('target and index have different freq: '
'(%s, %s)' % (target.freq, self.freq))
- return Index.get_indexer(self, target, method, limit)
+ return Index.get_indexer(self, target, method, limit, tolerance)
- def get_loc(self, key, method=None):
+ def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -614,7 +618,7 @@ def get_loc(self, key, method=None):
key = Period(key, self.freq)
try:
- return Index.get_loc(self, key.ordinal, method=method)
+ return Index.get_loc(self, key.ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
@@ -694,6 +698,10 @@ def _get_string_slice(self, key):
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
+ def _convert_tolerance(self, tolerance):
+ tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)
+ return self._maybe_convert_timedelta(tolerance)
+
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index d7172dd304b6b..b0c9d8852f8c9 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -645,7 +645,7 @@ def get_value_maybe_box(self, series, key):
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
- def get_loc(self, key, method=None):
+ def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -653,12 +653,17 @@ def get_loc(self, key, method=None):
-------
loc : int
"""
+ if tolerance is not None:
+ # try converting tolerance now, so errors don't get swallowed by
+ # the try/except clauses below
+ tolerance = self._convert_tolerance(tolerance)
+
if _is_convertible_to_td(key):
key = Timedelta(key)
- return Index.get_loc(self, key, method=method)
+ return Index.get_loc(self, key, method, tolerance)
try:
- return Index.get_loc(self, key, method=method)
+ return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
@@ -667,7 +672,7 @@ def get_loc(self, key, method=None):
try:
stamp = Timedelta(key)
- return Index.get_loc(self, stamp, method=method)
+ return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
| xref #9817
This does not entirely solve the floating point precision issues, but gets us part of the way there -- we can explicitly lookup data with a fixed tolerance for nearest neighbor matches.
It is also is useful in its own right, mostly as a simple sanity check to verify that labels are not entirely misaligned.
Example usage:
```
In [2]: df = pd.DataFrame({'x': range(5)})
In [3]: df.reindex([0.1, 1.9, 3.5], method='nearest', max_distance=0.2)
Out[3]:
x
0.1 0
1.9 2
3.5 NaN
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10411 | 2015-06-23T08:21:54Z | 2015-08-18T23:05:06Z | 2015-08-18T23:05:06Z | 2015-08-20T00:47:38Z |
TST: Use unicode literals in string test | diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index a66410320e816..90da68eed5cc4 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -747,20 +747,18 @@ def test_isnumeric(self):
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
- values = ['A', '3', unichr(0x00bc), unichr(0x2605),
- unichr(0x1378), unichr(0xFF13), 'four']
+ values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
- unicodes = [u('A'), u('3'), unichr(0x00bc), unichr(0x2605),
- unichr(0x1378), unichr(0xFF13), u('four')]
+
+ unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
self.assertEqual(s.str.isnumeric().tolist(), [v.isnumeric() for v in unicodes])
self.assertEqual(s.str.isdecimal().tolist(), [v.isdecimal() for v in unicodes])
- values = ['A', np.nan, unichr(0x00bc), unichr(0x2605),
- np.nan, unichr(0xFF13), 'four']
+ values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
@@ -1950,33 +1948,16 @@ def test_encode_decode_errors(self):
tm.assert_series_equal(result, exp)
def test_normalize(self):
- def unistr(codes):
- # build unicode string from unichr
- # we cannot use six.u() here because it escapes unicode
- return ''.join([unichr(c) for c in codes])
-
- values = ['ABC', # ASCII
- unistr([0xFF21, 0xFF22, 0xFF23]), # ABC
- unistr([0xFF11, 0xFF12, 0xFF13]), # 123
- np.nan,
- unistr([0xFF71, 0xFF72, 0xFF74])] # アイエ
+ values = ['ABC', u'ABC', u'123', np.nan, u'アイエ']
s = Series(values, index=['a', 'b', 'c', 'd', 'e'])
- normed = [compat.u_safe('ABC'),
- compat.u_safe('ABC'),
- compat.u_safe('123'),
- np.nan,
- unistr([0x30A2, 0x30A4, 0x30A8])] # アイエ
+ normed = [u'ABC', u'ABC', u'123', np.nan, u'アイエ']
expected = Series(normed, index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFKC')
tm.assert_series_equal(result, expected)
- expected = Series([compat.u_safe('ABC'),
- unistr([0xFF21, 0xFF22, 0xFF23]), # ABC
- unistr([0xFF11, 0xFF12, 0xFF13]), # 123
- np.nan,
- unistr([0xFF71, 0xFF72, 0xFF74])], # アイエ
+ expected = Series([u'ABC', u'ABC', u'123', np.nan, u'アイエ'],
index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFC')
@@ -1985,12 +1966,8 @@ def unistr(codes):
with tm.assertRaisesRegexp(ValueError, "invalid normalization form"):
s.str.normalize('xxx')
- s = Index([unistr([0xFF21, 0xFF22, 0xFF23]), # ABC
- unistr([0xFF11, 0xFF12, 0xFF13]), # 123
- unistr([0xFF71, 0xFF72, 0xFF74])]) # アイエ
- expected = Index([compat.u_safe('ABC'),
- compat.u_safe('123'),
- unistr([0x30A2, 0x30A4, 0x30A8])])
+ s = Index([u'ABC', u'123', u'アイエ'])
+ expected = Index([u'ABC', u'123', u'アイエ'])
result = s.str.normalize('NFKC')
tm.assert_index_equal(result, expected)
| Follow-up of #10397. Fix some ugly unicode tests. Shall I fix followings also?
- Remove `compat.u()` completely, because the escaped literal is different from normal unicode literals internally.
- Remove `compat.callable` brought back in v3.2 ([some code](https://github.com/pydata/pandas/blob/master/pandas/core/frame.py#L2307) doesn't use `compat.callable` already)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10405 | 2015-06-22T13:21:28Z | 2015-06-23T14:12:07Z | 2015-06-23T14:12:07Z | 2015-06-23T14:12:14Z |
BUG: GH10392 bug where Table.select_column does not preserve column name | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 168fd803c5f8a..9293c6b36879b 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -74,3 +74,4 @@ Bug Fixes
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
+- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 31f649c498c14..92208c37f787b 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3608,7 +3608,7 @@ def read_column(self, column, where=None, start=None, stop=None, **kwargs):
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
- a.tz, True))
+ a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 6aaeb6652f2b6..e9c39127d1032 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -3766,7 +3766,7 @@ def f():
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
- self.assertIsInstance(result,Series)
+ self.assertIsInstance(result, Series)
# not a data indexable column
self.assertRaises(
@@ -3806,6 +3806,14 @@ def f():
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
+ # GH 10392 - make sure column name is preserved
+ df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
+ store.append('df4', df4, data_columns=True)
+ expected = df4['B']
+ result = store.select_column('df4', 'B')
+ tm.assert_series_equal(result, expected)
+
+
def test_coordinates(self):
df = tm.makeTimeDataFrame()
| To close #10392
| https://api.github.com/repos/pandas-dev/pandas/pulls/10401 | 2015-06-21T15:43:36Z | 2015-06-23T11:00:14Z | 2015-06-23T11:00:14Z | 2015-06-23T11:00:19Z |
BUG: GH10395 bug in DataFrame.interpolate with axis=1 and inplace=True | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9293c6b36879b..e021eb9d02abe 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -75,3 +75,4 @@ Bug Fixes
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
+- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 35db4051c60c8..b9e007a1e4d58 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2925,47 +2925,50 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
if axis == 0:
ax = self._info_axis_name
+ _maybe_transposed_self = self
elif axis == 1:
- self = self.T
+ _maybe_transposed_self = self.T
ax = 1
- ax = self._get_axis_number(ax)
+ else:
+ _maybe_transposed_self = self
+ ax = _maybe_transposed_self._get_axis_number(ax)
- if self.ndim == 2:
+ if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
- if isinstance(self.index, MultiIndex) and method != 'linear':
+ if isinstance(_maybe_transposed_self.index, MultiIndex) and method != 'linear':
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
- if self._data.get_dtype_counts().get('object') == len(self.T):
+ if _maybe_transposed_self._data.get_dtype_counts().get('object') == len(_maybe_transposed_self.T):
raise TypeError("Cannot interpolate with all NaNs.")
# create/use the index
if method == 'linear':
- index = np.arange(len(self._get_axis(alt_ax))) # prior default
+ index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax))) # prior default
else:
- index = self._get_axis(alt_ax)
+ index = _maybe_transposed_self._get_axis(alt_ax)
if pd.isnull(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
- new_data = self._data.interpolate(method=method,
- axis=ax,
- index=index,
- values=self,
- limit=limit,
- inplace=inplace,
- downcast=downcast,
- **kwargs)
+ new_data = _maybe_transposed_self._data.interpolate(
+ method=method,
+ axis=ax,
+ index=index,
+ values=_maybe_transposed_self,
+ limit=limit,
+ inplace=inplace,
+ downcast=downcast,
+ **kwargs
+ )
if inplace:
if axis == 1:
- self._update_inplace(new_data)
- self = self.T
- else:
- self._update_inplace(new_data)
+ new_data = self._constructor(new_data).T._data
+ self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 44f7791b7f8ba..9a8ec00188d9c 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1117,6 +1117,14 @@ def test_interp_inplace(self):
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
+ def test_interp_inplace_row(self):
+ # GH 10395
+ result = DataFrame({'a': [1.,2.,3.,4.], 'b': [np.nan, 2., 3., 4.],
+ 'c': [3, 2, 2, 2]})
+ expected = result.interpolate(method='linear', axis=1, inplace=False)
+ result.interpolate(method='linear', axis=1, inplace=True)
+ assert_frame_equal(result, expected)
+
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
| To close #10395.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10400 | 2015-06-21T06:24:00Z | 2015-06-23T14:00:40Z | 2015-06-23T14:00:39Z | 2015-06-23T14:00:48Z |
BLD: remove support for 3.2, #9118 | diff --git a/.travis.yml b/.travis.yml
index 246154310a50f..b867601ba0b96 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -86,13 +86,6 @@ matrix:
- CLIPBOARD=xsel
- BUILD_TYPE=conda
- JOB_NAME: "34_slow"
- - python: 3.2
- env:
- - NOSE_ARGS="not slow and not network and not disabled"
- - FULL_DEPS=true
- - CLIPBOARD_GUI=qt4
- - BUILD_TYPE=pydata
- - JOB_NAME: "32_nslow"
- python: 2.7
env:
- EXPERIMENTAL=true
@@ -103,13 +96,6 @@ matrix:
- BUILD_TYPE=pydata
- PANDAS_TESTING_MODE="deprecate"
allow_failures:
- - python: 3.2
- env:
- - NOSE_ARGS="not slow and not network and not disabled"
- - FULL_DEPS=true
- - CLIPBOARD_GUI=qt4
- - BUILD_TYPE=pydata
- - JOB_NAME: "32_nslow"
- python: 2.7
env:
- NOSE_ARGS="slow and not network and not disabled"
diff --git a/ci/requirements-3.2.txt b/ci/requirements-3.2.txt
deleted file mode 100644
index 8c2f675b65603..0000000000000
--- a/ci/requirements-3.2.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-python-dateutil==2.1
-pytz==2013b
-numpy==1.7.1
-cython==0.19.1
diff --git a/doc/source/install.rst b/doc/source/install.rst
index b3f86db5e3e59..5a17f25ab1654 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.6, 2.7, 3.2, 3.3, and 3.4.
+Officially Python 2.6, 2.7, 3.3, and 3.4.
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6f7e9bce0a3a6..8e3074c2de3f2 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -7,6 +7,10 @@ This is a major release from 0.16.2 and includes a small number of API changes,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
+.. warning::
+
+ pandas >= 0.17.0 will no longer support compatibility with Python version 3.2 (:issue:`9118`)
+
Highlights include:
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 2a273629544cb..20d71de28d2e2 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -37,7 +37,6 @@
import types
PY3 = (sys.version_info[0] >= 3)
-PY3_2 = sys.version_info[:2] == (3, 2)
PY2 = sys.version_info[0] == 2
diff --git a/pandas/core/common.py b/pandas/core/common.py
index c2203bd506d7e..990eec08d0bd6 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2813,11 +2813,7 @@ def _get_handle(path, mode, encoding=None, compression=None):
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
- if compat.PY3_2:
- # gzip and bz2 don't work with TextIOWrapper in 3.2
- encoding = encoding or get_option('display.encoding')
- f = StringIO(f.read().decode(encoding))
- elif compat.PY3:
+ if compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 508cf1103cee5..d1e6e5677da0b 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1327,10 +1327,7 @@ def _wrap_compressed(f, compression, encoding=None):
import gzip
f = gzip.GzipFile(fileobj=f)
- if compat.PY3_2:
- # 3.2's gzip doesn't support read1
- f = StringIO(f.read().decode(encoding))
- elif compat.PY3:
+ if compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index c83ba897125bf..3a2f388bdd65b 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -395,8 +395,8 @@ def f():
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
- # works only on numpy >= 1.7.1 and not on PY3.2
- if LooseVersion(np.__version__) > "1.7.1" and not compat.PY3_2:
+ # works only on numpy >= 1.7.1
+ if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index f422c3b49b691..0700d5a050516 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -2690,14 +2690,6 @@ def create_index(self):
def test_pickle_compat_construction(self):
pass
- def test_numeric_compat(self):
- super(TestDatetimeIndex, self).test_numeric_compat()
-
- if not compat.PY3_2:
- for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
- lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
- self.assertRaises(TypeError, f)
-
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 948a0be91b276..5456bd7602e63 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -11,7 +11,7 @@
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta, TimedeltaIndex, isnull, notnull,
bdate_range, date_range, timedelta_range, Int64Index)
import pandas.core.common as com
-from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long, PY3_2
+from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
@@ -1040,8 +1040,6 @@ def test_comparisons_coverage(self):
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
- if PY3_2:
- raise nose.SkipTest('nat comparisons on 3.2 broken')
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 09b2bb24714af..9ad814410741c 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -28,7 +28,7 @@
import pandas.index as _index
-from pandas.compat import range, long, StringIO, lrange, lmap, zip, product, PY3_2
+from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
@@ -2225,8 +2225,6 @@ def test_comparisons_coverage(self):
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
- if PY3_2:
- raise nose.SkipTest('nat comparisons on 3.2 broken')
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 59eb432844ee3..6a858b16fed4a 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -2543,9 +2543,6 @@ cdef inline _get_datetime64_nanos(object val):
npy_datetime ival
unit = get_datetime64_unit(val)
- if unit == 3:
- raise ValueError('NumPy 1.6.1 business freq not supported')
-
ival = get_datetime64_value(val)
if unit != PANDAS_FR_ns:
@@ -2613,9 +2610,6 @@ def cast_to_nanoseconds(ndarray arr):
return result
unit = get_datetime64_unit(arr.flat[0])
- if unit == 3:
- raise ValueError('NumPy 1.6.1 business freq not supported')
-
for i in range(n):
pandas_datetime_to_datetimestruct(ivalues[i], unit, &dts)
iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
diff --git a/setup.py b/setup.py
index 6ff03eb4cf11f..01364892cd0f3 100755
--- a/setup.py
+++ b/setup.py
@@ -185,7 +185,6 @@ def build_extensions(self):
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
| closes #9118
| https://api.github.com/repos/pandas-dev/pandas/pulls/10397 | 2015-06-20T14:58:44Z | 2015-06-22T08:46:38Z | 2015-06-22T08:46:37Z | 2015-06-22T09:03:08Z |
PERF: parse and timedelta ops improvements, #6755 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6f7e9bce0a3a6..b09ca81d2572f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -54,6 +54,9 @@ Removal of prior version deprecations/changes
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- 4x improvement in ``timedelta`` string parsing (:issue:`6755`)
+- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
+
.. _whatsnew_0170.bug_fixes:
Bug Fixes
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 948a0be91b276..565760b545961 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -109,6 +109,9 @@ def test_construction(self):
# currently invalid as it has a - on the hhmmdd part (only allowed on the days)
self.assertRaises(ValueError, lambda : Timedelta('-10 days -1 h 1.5m 1s 3us'))
+ # only leading neg signs are allowed
+ self.assertRaises(ValueError, lambda : Timedelta('10 days -1 h 1.5m 1s 3us'))
+
# roundtripping both for string and value
for v in ['1s',
'-1s',
@@ -151,7 +154,7 @@ def test_construction(self):
"cannot construct a TimeDelta",
lambda : Timedelta())
tm.assertRaisesRegexp(ValueError,
- "cannot create timedelta string convert",
+ "unit abbreviation w/o a number",
lambda : Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 624981c5536f5..60005ef6f2d6f 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -34,22 +34,13 @@ def _convert_listlike(arg, box, unit):
if isinstance(arg, (list,tuple)) or ((hasattr(arg,'__iter__') and not hasattr(arg,'dtype'))):
arg = np.array(list(arg), dtype='O')
+ # these are shortcutable
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
-
- # these are shortcutable
- value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]')
+ value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]', copy=False)
else:
- try:
- value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, coerce=coerce)
- except:
-
- # try to process strings fast; may need to fallback
- try:
- value = np.array([ _get_string_converter(r, unit=unit)() for r in arg ],dtype='m8[ns]')
- except:
- value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit, coerce=coerce) for r in arg ])
+ value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, coerce=coerce)
value = value.astype('timedelta64[ns]', copy=False)
if box:
@@ -95,15 +86,6 @@ def _convert_listlike(arg, box, unit):
'NS' : 'ns',
'ns' : 'ns',
}
-_unit_scale = {
- 'd' : 86400*1e9,
- 'h' : 3600*1e9,
- 'm' : 60*1e9,
- 's' : 1e9,
- 'ms' : 1e6,
- 'us' : 1e3,
- 'ns' : 1,
- }
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
@@ -114,150 +96,11 @@ def _validate_timedelta_unit(arg):
return 'ns'
raise ValueError("invalid timedelta unit {0} provided".format(arg))
-_short_search = re.compile(
- "^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE)
-_full_search = re.compile(
- "^\s*(?P<neg>-?)\s*(?P<days>\d*?\.?\d*?)?\s*(days|d|day)?,?\s*\+?(?P<time>\d{1,2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE)
-_nat_search = re.compile(
- "^\s*(nat|nan)\s*$",re.IGNORECASE)
-_whitespace = re.compile('^\s*$')
-_number_split = re.compile("^(\d+\.?\d*)")
-
-# construct the full2_search
-abbrevs = [('d' ,'days|d|day'),
- ('h' ,'hours|h|hour'),
- ('m' ,'minutes|min|minute|m'),
- ('s' ,'seconds|sec|second|s'),
- ('ms','milliseconds|milli|millis|millisecond|ms'),
- ('us','microseconds|micro|micros|microsecond|us'),
- ('ns','nanoseconds|nano|nanos|nanosecond|ns')]
-
-_full_search2 = re.compile(''.join(
- ["^\s*(?P<neg>-?)\s*"] + [ "(?P<" + p + ">\\d+\.?\d*\s*(" + ss + "))?\\s*" for p, ss in abbrevs ] + ['$']))
-
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, coerce=False):
""" convert strings to timedelta; coerce to Timedelta (if box), else np.timedelta64"""
- if isinstance(r, compat.string_types):
-
- # we are already converting to nanoseconds
- converter = _get_string_converter(r, unit=unit)
- r = converter()
- unit='ns'
-
result = tslib.convert_to_timedelta(r,unit,coerce)
if box:
result = tslib.Timedelta(result)
return result
-
-def _get_string_converter(r, unit='ns'):
- """ return a string converter for r to process the timedelta format """
-
- # treat as a nan
- if isnull(r):
- def convert(r=None, unit=None):
- return tslib.iNaT
- return convert
-
- if _whitespace.search(r):
- def convert(r=None, unit=None):
- return tslib.iNaT
- return convert
-
- m = _short_search.search(r)
- if m:
- def convert(r=None, unit=unit, m=m):
- if r is not None:
- m = _short_search.search(r)
-
- gd = m.groupdict()
-
- r = float(gd['value'])
- u = gd.get('unit')
- if u is not None:
- unit = u.lower()
- result = tslib.cast_from_unit(r, unit)
- if gd['neg']:
- result *= -1
- return result
- return convert
-
- m = _full_search.search(r)
- if m:
- def convert(r=None, unit=None, m=m):
- if r is not None:
- m = _full_search.search(r)
-
- gd = m.groupdict()
-
- # handle time
- value = 0
- time = gd['time']
- if time:
- (hh,mm,ss) = time.split(':')
- value += int((float(hh)*3600 + float(mm)*60 + float(ss))*1e9)
-
- # handle frac
- frac = gd['frac']
- if frac:
- value += round(float(frac)*1e9)
-
- # handle days (possibly negative)
- is_neg = gd['neg']
- if gd['days']:
- days = int((float(gd['days'] or 0) * 86400)*1e9)
- if is_neg:
- days *= -1
- value += days
- else:
- if is_neg:
- value *= -1
- return tslib.cast_from_unit(value, 'ns')
- return convert
-
- # look for combo strings
- m = _full_search2.search(r)
- if m:
- def convert(r=None, unit=None, m=m):
- if r is not None:
- m = _full_search2.search(r)
-
- gd = m.groupdict()
-
- # the parser
- def parse(k, v):
- if v is None:
- return 0
- v = float(_number_split.search(v).group())
- return int(v*_unit_scale[k])
-
- # handle non-days
- days = gd.pop('days',None)
- neg = gd.pop('neg',None)
- value = 0
- for k, v in gd.items():
- value += parse(k,v)
-
- # parse days / neg
- if days:
- days = parse('days',days)
- if neg:
- days *= -1
- value += days
- else:
- if neg:
- value *= -1
-
- return tslib.cast_from_unit(value, 'ns')
- return convert
-
- m = _nat_search.search(r)
- if m:
- def convert(r=None, unit=None, m=m):
- return tslib.iNaT
- return convert
-
- # no converter
- raise ValueError("cannot create timedelta string converter for [{0}]".format(r))
-
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index ef37e003ab67f..65fe3420f670c 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -263,6 +263,7 @@ def _convert_listlike(arg, box, format):
if isinstance(arg, (list,tuple)):
arg = np.array(arg, dtype='O')
+ # these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
@@ -271,6 +272,12 @@ def _convert_listlike(arg, box, format):
pass
return arg
+ elif format is None and com.is_integer_dtype(arg) and unit=='ns':
+ result = arg.astype('datetime64[ns]')
+ if box:
+ return DatetimeIndex(result, tz='utc' if utc else None)
+
+ return result
arg = com._ensure_object(arg)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 59eb432844ee3..14bc32c35d8a4 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -5,6 +5,11 @@ from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
import numpy as np
+# GH3363
+from sys import version_info
+cdef bint PY2 = version_info[0] == 2
+cdef bint PY3 = not PY2
+
from cpython cimport (
PyTypeObject,
PyFloat_Check,
@@ -48,15 +53,11 @@ else:
from dateutil.tz import gettz as _dateutil_gettz
from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
-from pandas.compat import parse_date, string_types, PY3, iteritems
+from pandas.compat import parse_date, string_types, iteritems
-from sys import version_info
import operator
import collections
-# GH3363
-cdef bint PY2 = version_info[0] == 2
-
# initialize numpy
import_array()
#import_ufunc()
@@ -1844,8 +1845,7 @@ class Timedelta(_Timedelta):
if isinstance(value, Timedelta):
value = value.value
elif util.is_string_object(value):
- from pandas import to_timedelta
- value = to_timedelta(value,unit=unit,box=False)
+ value = np.timedelta64(parse_timedelta_string(value, False))
elif isinstance(value, timedelta):
value = convert_to_timedelta64(value,'ns',False)
elif isinstance(value, np.timedelta64):
@@ -2201,13 +2201,272 @@ def array_to_timedelta64(ndarray[object] values, unit='ns', coerce=False):
result = np.empty(n, dtype='m8[ns]')
iresult = result.view('i8')
- for i in range(n):
- result[i] = convert_to_timedelta64(values[i], unit, coerce)
+ # usually we have all strings
+ # if so then we hit the fast path
+ try:
+ for i in range(n):
+ result[i] = parse_timedelta_string(values[i], coerce)
+ except:
+ for i in range(n):
+ result[i] = convert_to_timedelta64(values[i], unit, coerce)
return iresult
+
def convert_to_timedelta(object ts, object unit='ns', coerce=False):
return convert_to_timedelta64(ts, unit, coerce)
+cdef dict timedelta_abbrevs = { 'd' : 'd',
+ 'days' : 'd',
+ 'day' : 'd',
+ 'hours' : 'h',
+ 'hour' : 'h',
+ 'h' : 'h',
+ 'm' : 'm',
+ 'minute' : 'm',
+ 'min' : 'm',
+ 'minutes' : 'm',
+ 's' : 's',
+ 'seconds' : 's',
+ 'sec' : 's',
+ 'second' : 's',
+ 'ms' : 'ms',
+ 'milliseconds' : 'ms',
+ 'millisecond' : 'ms',
+ 'milli' : 'ms',
+ 'millis' : 'ms',
+ 'us' : 'us',
+ 'microseconds' : 'us',
+ 'microsecond' : 'us',
+ 'micro' : 'us',
+ 'micros' : 'us',
+ 'ns' : 'ns',
+ 'nanoseconds' : 'ns',
+ 'nano' : 'ns',
+ 'nanos' : 'ns',
+ 'nanosecond' : 'ns',
+ }
+
+cdef inline int64_t timedelta_as_neg(int64_t value, bint neg):
+ """
+
+ Parameters
+ ----------
+ value : int64_t of the timedelta value
+ neg : boolean if the a negative value
+ """
+ if neg:
+ return -value
+ return value
+
+cdef inline timedelta_from_spec(object number, object frac, object unit):
+ """
+
+ Parameters
+ ----------
+ number : a list of number digits
+ frac : a list of frac digits
+ unit : a list of unit characters
+ """
+ cdef object n
+
+ try:
+ unit = ''.join(unit)
+ unit = timedelta_abbrevs[unit.lower()]
+ except KeyError:
+ raise ValueError("invalid abbreviation: {0}".format(unit))
+
+ n = ''.join(number) + '.' + ''.join(frac)
+ return cast_from_unit(float(n), unit)
+
+cdef inline parse_timedelta_string(object ts, coerce=False):
+ """
+ Parse an regular format timedelta string
+
+ Return an int64_t or raise a ValueError on an invalid parse
+
+ if coerce, set a non-valid value to NaT
+
+ Return a ns based int64
+ """
+
+ cdef:
+ str c
+ bint neg=0, have_dot=0, have_value=0, have_hhmmss=0
+ object current_unit=None
+ int64_t result=0, m=0, r
+ list number=[], frac=[], unit=[]
+
+ # neg : tracks if we have a leading negative for the value
+ # have_dot : tracks if we are processing a dot (either post hhmmss or inside an expression)
+ # have_value : track if we have at least 1 leading unit
+ # have_hhmmss : tracks if we have a regular format hh:mm:ss
+
+ if ts in _nat_strings or not len(ts):
+ return iNaT
+
+ for c in ts:
+
+ # skip whitespace / commas
+ if c == ' ' or c == ',':
+ pass
+
+ # positive signs are ignored
+ elif c == '+':
+ pass
+
+ # neg
+ elif c == '-':
+
+ if neg or have_value or have_hhmmss:
+ raise ValueError("only leading negative signs are allowed")
+
+ neg = 1
+
+ # number (ascii codes)
+ elif ord(c) >= 48 and ord(c) <= 57:
+
+ if have_dot:
+
+ # we found a dot, but now its just a fraction
+ if len(unit):
+ number.append(c)
+ have_dot = 0
+ else:
+ frac.append(c)
+
+ elif not len(unit):
+ number.append(c)
+
+ else:
+
+ try:
+ r = timedelta_from_spec(number, frac, unit)
+ except ValueError:
+ if coerce:
+ return iNaT
+ raise
+ unit, number, frac = [], [c], []
+
+ result += timedelta_as_neg(r, neg)
+
+ # hh:mm:ss.
+ elif c == ':':
+
+ # we flip this off if we have a leading value
+ if have_value:
+ neg = 0
+
+ # we are in the pattern hh:mm:ss pattern
+ if len(number):
+ if current_unit is None:
+ current_unit = 'h'
+ m = 1000000000L * 3600
+ elif current_unit == 'h':
+ current_unit = 'm'
+ m = 1000000000L * 60
+ elif current_unit == 'm':
+ current_unit = 's'
+ m = 1000000000L
+ r = <int64_t> int(''.join(number)) * m
+ result += timedelta_as_neg(r, neg)
+ have_hhmmss = 1
+ else:
+ if coerce:
+ return iNaT
+ raise ValueError("expecting hh:mm:ss format, received: {0}".format(ts))
+ unit, number = [], []
+
+ # after the decimal point
+ elif c == '.':
+
+ if len(number) and current_unit is not None:
+
+ # by definition we had something like
+ # so we need to evaluate the final field from a
+ # hh:mm:ss (so current_unit is 'm')
+ if current_unit != 'm':
+ raise ValueError("expected hh:mm:ss format before .")
+ m = 1000000000L
+ r = <int64_t> int(''.join(number)) * m
+ result += timedelta_as_neg(r, neg)
+ have_value = 1
+ unit, number, frac = [], [], []
+
+ have_dot = 1
+
+ # unit
+ else:
+ unit.append(c)
+ have_value = 1
+ have_dot = 0
+
+
+ # we had a dot, but we have a fractional
+ # value since we have an unit
+ if have_dot and len(unit):
+ try:
+ r = timedelta_from_spec(number, frac, unit)
+ result += timedelta_as_neg(r, neg)
+ except ValueError:
+ if coerce:
+ return iNaT
+ raise
+
+ # we have a dot as part of a regular format
+ # e.g. hh:mm:ss.fffffff
+ elif have_dot:
+ if len(frac) > 0 and len(frac) <= 3:
+ m = 10**(3-len(frac)) * 1000L * 1000L
+ elif len(frac) > 3 and len(frac) <= 6:
+ m = 10**(6-len(frac)) * 1000L
+ else:
+ m = 10**(9-len(frac))
+
+ r = <int64_t> int(''.join(frac)) * m
+ result += timedelta_as_neg(r, neg)
+
+ # we have a regular format
+ # we must have seconds at this point (hence the unit is still 'm')
+ elif current_unit is not None:
+ if current_unit != 'm':
+ raise ValueError("expected hh:mm:ss format")
+ m = 1000000000L
+ r = <int64_t> int(''.join(number)) * m
+ result += timedelta_as_neg(r, neg)
+
+ # we have a last abbreviation
+ elif len(unit):
+
+ if len(number):
+ try:
+ r = timedelta_from_spec(number, frac, unit)
+ result += timedelta_as_neg(r, neg)
+ except ValueError:
+ if coerce:
+ return iNaT
+ raise
+ else:
+ if coerce:
+ return iNaT
+ raise ValueError("unit abbreviation w/o a number")
+
+ # treat as nanoseconds
+ # but only if we don't have anything else
+ else:
+
+ if have_value:
+ raise ValueError("have leftover units")
+ if len(number):
+ try:
+ r = timedelta_from_spec(number, frac, 'ns')
+ result += timedelta_as_neg(r, neg)
+ except ValueError:
+ if coerce:
+ return iNaT
+ raise
+
+ return result
+
cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
"""
Convert an incoming object to a timedelta64 if possible
@@ -2257,10 +2516,7 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
ts = cast_from_unit(ts, unit)
ts = np.timedelta64(ts)
elif util.is_string_object(ts):
- if ts in _nat_strings or coerce:
- return np.timedelta64(iNaT)
- else:
- raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
+ ts = np.timedelta64(parse_timedelta_string(ts, coerce))
elif hasattr(ts,'delta'):
ts = np.timedelta64(_delta_to_nanoseconds(ts),'ns')
@@ -2558,6 +2814,9 @@ cdef inline _get_datetime64_nanos(object val):
cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
""" return a casting of the unit represented to nanoseconds
round the fractional part of a float to our precision, p """
+ cdef:
+ int64_t m
+ int p
if unit == 'D' or unit == 'd':
m = 1000000000L * 86400
| closes #6755
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
dtype_infer_timedelta64_1 | 7.0527 | 119.7317 | 0.0589 |
timedelta_convert_string | 13.3094 | 108.9200 | 0.1222 |
timedelta_convert_string_seconds | 18.2626 | 85.0617 | 0.2147 |
dtype_infer_timedelta64_2 | 9.4047 | 9.4063 | 0.9998 |
timedelta_convert_int | 0.1300 | 0.1260 | 1.0315 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [da92dc0] : PERF: timedelta and datetime64 ops improvements
Base [d8a2f30] : Check for size=0 before setting item
Fixes #10193
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10396 | 2015-06-20T09:27:26Z | 2015-06-22T08:34:19Z | 2015-06-22T08:34:19Z | 2015-06-22T09:16:22Z |
Add nlargest/nsmallest for DataFrame | diff --git a/doc/source/api.rst b/doc/source/api.rst
index a1284a3ff7bc9..1cbe55ddbacb6 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -904,6 +904,8 @@ Reshaping, sorting, transposing
DataFrame.sort
DataFrame.sort_index
DataFrame.sortlevel
+ DataFrame.nlargest
+ DataFrame.nsmallest
DataFrame.swaplevel
DataFrame.stack
DataFrame.unstack
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 524f57953d5b8..58374fabaec32 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1497,6 +1497,20 @@ faster than sorting the entire Series and calling ``head(n)`` on the result.
s.nsmallest(3)
s.nlargest(3)
+.. versionadded:: 0.17.0
+
+``DataFrame`` also has the ``nlargest`` and ``nsmallest`` methods.
+
+.. ipython:: python
+
+ df = DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1],
+ 'b': list('abdceff'),
+ 'c': [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0]})
+ df.nlargest(3, 'a')
+ df.nlargest(5, ['a', 'c'])
+ df.nsmallest(3, 'a')
+ df.nsmallest(5, ['a', 'c'])
+
.. _basics.multi-index_sorting:
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 16c6c639a489e..50a7c3b0c22e9 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -32,6 +32,7 @@ Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsne
New features
~~~~~~~~~~~~
+- ``DataFrame`` has the ``nlargest`` and ``nsmallest`` methods (:issue:`10393`)
- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`)
- Enable writing complex values to HDF stores when using table format (:issue:`10447`)
- Enable reading gzip compressed files via URL, either by explicitly setting the compression parameter or by inferring from the presence of the HTTP Content-Encoding header in the response (:issue:`8685`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index deda8294d139a..d8948bc82fe61 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3127,6 +3127,79 @@ def sortlevel(self, level=0, axis=0, ascending=True,
else:
return self._constructor(new_data).__finalize__(self)
+ def _nsorted(self, columns, n, method, take_last):
+ if not com.is_list_like(columns):
+ columns = [columns]
+ columns = list(columns)
+ ser = getattr(self[columns[0]], method)(n, take_last=take_last)
+ ascending = dict(nlargest=False, nsmallest=True)[method]
+ return self.loc[ser.index].sort(columns, ascending=ascending,
+ kind='mergesort')
+
+ def nlargest(self, n, columns, take_last=False):
+ """Get the rows of a DataFrame sorted by the `n` largest
+ values of `columns`.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ n : int
+ Number of items to retrieve
+ columns : list or str
+ Column name or names to order by
+ take_last : bool, optional
+ Where there are duplicate values, take the last duplicate
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = DataFrame({'a': [1, 10, 8, 11, -1],
+ ... 'b': list('abdce'),
+ ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
+ >>> df.nlargest(3, 'a')
+ a b c
+ 3 11 c 3
+ 1 10 b 2
+ 2 8 d NaN
+ """
+ return self._nsorted(columns, n, 'nlargest', take_last)
+
+ def nsmallest(self, n, columns, take_last=False):
+ """Get the rows of a DataFrame sorted by the `n` smallest
+ values of `columns`.
+
+ .. versionadded:: 0.17.0
+
+ Parameters
+ ----------
+ n : int
+ Number of items to retrieve
+ columns : list or str
+ Column name or names to order by
+ take_last : bool, optional
+ Where there are duplicate values, take the last duplicate
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = DataFrame({'a': [1, 10, 8, 11, -1],
+ ... 'b': list('abdce'),
+ ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
+ >>> df.nsmallest(3, 'a')
+ a b c
+ 4 -1 e 4
+ 0 1 a 1
+ 2 8 d NaN
+ """
+ return self._nsorted(columns, n, 'nsmallest', take_last)
+
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3b93465c1efe9..77ef5fecf22c9 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -14609,6 +14609,41 @@ def test_dataframe_metadata(self):
self.assertEqual(df._metadata, unpickled._metadata)
self.assertEqual(df.testattr, unpickled.testattr)
+ def test_nlargest(self):
+ # GH10393
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10])})
+ result = df.nlargest(5, 'a')
+ expected = df.sort('a', ascending=False).head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nlargest_multiple_columns(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10]),
+ 'c': np.random.permutation(10).astype('float64')})
+ result = df.nlargest(5, ['a', 'b'])
+ expected = df.sort(['a', 'b'], ascending=False).head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nsmallest(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10])})
+ result = df.nsmallest(5, 'a')
+ expected = df.sort('a').head(5)
+ tm.assert_frame_equal(result, expected)
+
+ def test_nsmallest_multiple_columns(self):
+ from string import ascii_lowercase
+ df = pd.DataFrame({'a': np.random.permutation(10),
+ 'b': list(ascii_lowercase[:10]),
+ 'c': np.random.permutation(10).astype('float64')})
+ result = df.nsmallest(5, ['a', 'c'])
+ expected = df.sort(['a', 'c']).head(5)
+ tm.assert_frame_equal(result, expected)
+
def test_to_panel_expanddim(self):
# GH 9762
| Closes #7359
| https://api.github.com/repos/pandas-dev/pandas/pulls/10393 | 2015-06-19T19:11:50Z | 2015-08-04T22:30:32Z | 2015-08-04T22:30:32Z | 2015-08-04T22:30:40Z |
BUG: closes bug in reset_index when index contains NaT | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d4becdf6b524b..6f7e9bce0a3a6 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -60,3 +60,4 @@ Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
+- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 84ef421128cd0..c2203bd506d7e 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1194,7 +1194,7 @@ def _maybe_upcast_putmask(result, mask, other):
if result.dtype in _DATELIKE_DTYPES:
if lib.isscalar(other):
if isnull(other):
- other = tslib.iNaT
+ other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a4abe481cfe81..337bc7d2dbeee 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9494,6 +9494,18 @@ def test_reindex_nan(self):
df.index = df.index.astype('object')
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
+ # GH10388
+ df = pd.DataFrame({'other':['a', 'b', np.nan, 'c'],
+ 'date':['2015-03-22', np.nan, '2012-01-08', np.nan],
+ 'amount':[2, 3, 4, 5]})
+
+ df['date'] = pd.to_datetime(df.date)
+ df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
+
+ left = df.set_index(['delta', 'other', 'date']).reset_index()
+ right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
+ assert_frame_equal(left, right)
+
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
| closes https://github.com/pydata/pandas/issues/10388
| https://api.github.com/repos/pandas-dev/pandas/pulls/10389 | 2015-06-19T00:29:40Z | 2015-06-20T11:09:45Z | 2015-06-20T11:09:44Z | 2015-06-20T12:17:35Z |
BUG: DataFrame.plot raises ValueError when color name is specified by multiple characters | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 742077d39fb18..9e8ecb6c57de5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -81,7 +81,7 @@ Bug Fixes
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
-
+- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 2c8123244c53c..2fbb4dfc6fd91 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1146,6 +1146,53 @@ def test_series_grid_settings(self):
self._check_grid_settings(Series([1,2,3]),
plotting._series_kinds + plotting._common_kinds)
+ @slow
+ def test_standard_colors(self):
+ for c in ['r', 'red', 'green', '#FF0000']:
+ result = plotting._get_standard_colors(1, color=c)
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(1, color=[c])
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(3, color=c)
+ self.assertEqual(result, [c] * 3)
+
+ result = plotting._get_standard_colors(3, color=[c])
+ self.assertEqual(result, [c] * 3)
+
+ @slow
+ def test_standard_colors_all(self):
+ import matplotlib.colors as colors
+
+ # multiple colors like mediumaquamarine
+ for c in colors.cnames:
+ result = plotting._get_standard_colors(num_colors=1, color=c)
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(num_colors=1, color=[c])
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(num_colors=3, color=c)
+ self.assertEqual(result, [c] * 3)
+
+ result = plotting._get_standard_colors(num_colors=3, color=[c])
+ self.assertEqual(result, [c] * 3)
+
+ # single letter colors like k
+ for c in colors.ColorConverter.colors:
+ result = plotting._get_standard_colors(num_colors=1, color=c)
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(num_colors=1, color=[c])
+ self.assertEqual(result, [c])
+
+ result = plotting._get_standard_colors(num_colors=3, color=c)
+ self.assertEqual(result, [c] * 3)
+
+ result = plotting._get_standard_colors(num_colors=3, color=[c])
+ self.assertEqual(result, [c] * 3)
+
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
@@ -1736,7 +1783,6 @@ def test_bar_colors(self):
default_colors = plt.rcParams.get('axes.color_cycle')
-
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
@@ -1762,6 +1808,11 @@ def test_bar_colors(self):
ax = df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
+ tm.close()
+
+ ax = df.plot(kind='bar', color='green')
+ self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
+ tm.close()
@slow
def test_bar_linewidth(self):
@@ -2897,6 +2948,10 @@ def test_line_colors(self):
ax = df.ix[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
+ ax = df.plot(color='red')
+ self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
+ tm.close()
+
@slow
def test_area_colors(self):
from matplotlib import cm
@@ -2972,6 +3027,10 @@ def test_hist_colors(self):
ax = df.ix[:, [0]].plot(kind='hist', color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
+ ax = df.plot(kind='hist', color='green')
+ self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
+ tm.close()
+
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 35893b9de8e75..3265889e4b268 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -134,6 +134,32 @@ def random_color(column):
else:
raise ValueError("color_type must be either 'default' or 'random'")
+ if isinstance(colors, compat.string_types):
+ import matplotlib.colors
+ conv = matplotlib.colors.ColorConverter()
+ def _maybe_valid_colors(colors):
+ try:
+ [conv.to_rgba(c) for c in colors]
+ return True
+ except ValueError:
+ return False
+
+ # check whether the string can be convertable to single color
+ maybe_single_color = _maybe_valid_colors([colors])
+ # check whether each character can be convertable to colors
+ maybe_color_cycle = _maybe_valid_colors(list(colors))
+ if maybe_single_color and maybe_color_cycle and len(colors) > 1:
+ msg = ("'{0}' can be parsed as both single color and "
+ "color cycle. Specify each color using a list "
+ "like ['{0}'] or {1}")
+ raise ValueError(msg.format(colors, list(colors)))
+ elif maybe_single_color:
+ colors = [colors]
+ else:
+ # ``colors`` is regarded as color cycle.
+ # mpl will raise error any of them is invalid
+ pass
+
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
| Derived from #9894. Passing color name with multiple characters results in `ValueError`. Below is the bahavior of current master.
```
# OK
df = pd.DataFrame(np.random.randn(3, 3))
df[0].plot(color='green')
# single green line
# OK
df.plot(color=['green'])
# triple green lines
# NG
df.plot(color='green')
# ValueError: to_rgba: Invalid rgba arg "e"
# -> This should be triple green lines
```
If passed str can be parsed as both single color and color cycle, following error will be raised.
- _"'green' can be parsed as both single color and color cycle. Specify each color using a list like ['green'] or ['g', 'r', 'e', 'e', 'n']"_
Currently, there is no color name which can meet above condition (thus cannot tested).
- http://matplotlib.org/examples/color/named_colors.html
| https://api.github.com/repos/pandas-dev/pandas/pulls/10387 | 2015-06-18T13:42:29Z | 2015-07-01T13:21:04Z | 2015-07-01T13:21:04Z | 2015-07-01T13:47:12Z |
BUG: fix multiple columns as primary key in io.sql.get_schema (GH10385) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 09a39a6d9b2f5..cd41c4fc82146 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -120,7 +120,8 @@ Bug Fixes
- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`)
-
+- Bug in ``io.sql.get_schema`` when specifying multiple columns as primary
+ key (:issue:`10385`).
- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b4e8c7de2b4e1..8d8768c08fe02 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -834,7 +834,11 @@ def _create_table_setup(self):
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
- pkc = PrimaryKeyConstraint(self.keys, name=self.name + '_pk')
+ if not com.is_list_like(self.keys):
+ keys = [self.keys]
+ else:
+ keys = self.keys
+ pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
@@ -899,8 +903,8 @@ def _harmonize_columns(self, parse_dates=None):
def _get_notnull_col_dtype(self, col):
"""
- Infer datatype of the Series col. In case the dtype of col is 'object'
- and it contains NA values, this infers the datatype of the not-NA
+ Infer datatype of the Series col. In case the dtype of col is 'object'
+ and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
@@ -1272,7 +1276,7 @@ def _get_unicode_name(name):
return uname
def _get_valid_mysql_name(name):
- # Filter for unquoted identifiers
+ # Filter for unquoted identifiers
# See http://dev.mysql.com/doc/refman/5.0/en/identifiers.html
uname = _get_unicode_name(name)
if not len(uname):
@@ -1293,7 +1297,7 @@ def _get_valid_sqlite_name(name):
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
-
+
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
@@ -1377,7 +1381,11 @@ def _create_table_setup(self):
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
- cnames_br = ",".join([escape(c) for c in self.keys])
+ if not com.is_list_like(self.keys):
+ keys = [self.keys]
+ else:
+ keys = self.keys
+ cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
@@ -1391,7 +1399,7 @@ def _create_table_setup(self):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
- "CREATE INDEX " + escape("ix_"+self.name+"_"+cnames) +
+ "CREATE INDEX " + escape("ix_"+self.name+"_"+cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
@@ -1416,7 +1424,7 @@ def _sql_type_name(self, col):
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
-
+
if col_type not in _SQL_TYPES:
col_type = "string"
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 33ea63ba41f1f..d8bc3c61f68f0 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -703,6 +703,19 @@ def test_get_schema_dtypes(self):
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
+ def test_get_schema_keys(self):
+ frame = DataFrame({'Col1':[1.1,1.2], 'Col2':[2.1,2.2]})
+ create_sql = sql.get_schema(frame, 'test', 'sqlite',
+ con=self.conn, keys='Col1')
+ constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
+ self.assertTrue(constraint_sentence in create_sql)
+
+ # multiple columns as key (GH10385)
+ create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
+ con=self.conn, keys=['A', 'B'])
+ constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
+ self.assertTrue(constraint_sentence in create_sql)
+
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
@@ -1851,7 +1864,7 @@ def test_illegal_names(self):
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', ok_name])
c_tbl = 'test_ok_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor, index=False,
- if_exists='replace')
+ if_exists='replace')
self.conn.cursor().execute("DROP TABLE `%s`" % c_tbl)
self.conn.commit()
@@ -1962,7 +1975,7 @@ def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY ("A","B")' in create_sql)
+ self.assertTrue('PRIMARY KEY ("A", "B")' in create_sql)
cur = self.db.cursor()
cur.execute(create_sql)
@@ -2277,7 +2290,7 @@ def test_schema(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
- self.assertTrue('PRIMARY KEY (`A`,`B`)' in create_sql)
+ self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
| Closes #10385
| https://api.github.com/repos/pandas-dev/pandas/pulls/10386 | 2015-06-18T12:00:03Z | 2015-07-03T13:33:49Z | 2015-07-03T13:33:49Z | 2015-07-03T13:33:49Z |
ENH: column label filtering via regexes to work for numeric names | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 4a513f3122390..5585dfde69ac5 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -26,7 +26,8 @@ New features
Other enhancements
^^^^^^^^^^^^^^^^^^
-
+- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`).
+
.. _whatsnew_0170.api:
Backwards incompatible API changes
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 35db4051c60c8..e7ce4c2891114 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1928,7 +1928,7 @@ def filter(self, items=None, like=None, regex=None, axis=None):
return self.select(matchf, axis=axis_name)
elif regex:
matcher = re.compile(regex)
- return self.select(lambda x: matcher.search(x) is not None,
+ return self.select(lambda x: matcher.search(str(x)) is not None,
axis=axis_name)
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a4abe481cfe81..1072aa3ba8871 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10741,7 +10741,7 @@ def test_filter(self):
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
- assert_frame_equal(filtered,expected)
+ assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
@@ -10755,6 +10755,17 @@ def test_filter(self):
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
+
+ # regex with ints in column names
+ # from PR #10384
+ df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
+ expected = DataFrame(0., index=[0, 1, 2], columns=[1, 2])
+ filtered = df.filter(regex='^[0-9]+$')
+ self.assert_frame_equal(filtered, expected)
+
+ expected = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '0', '1'])
+ filtered = expected.filter(regex='^[0-9]+$') # shouldn't remove anything
+ self.assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
| Simple fix to allow regex filtering to work for numeric column labels, e.g. df.filter(regex="[12][34]")
closes #10506
| https://api.github.com/repos/pandas-dev/pandas/pulls/10384 | 2015-06-18T06:59:19Z | 2015-07-06T12:01:38Z | null | 2015-07-07T04:17:04Z |
Allow passing other arguments to interpolation functions | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 690dd1ab196b0..9f9813cefd305 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -44,6 +44,7 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
+- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
.. _whatsnew_0170.deprecations:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 990eec08d0bd6..03a4162d401a7 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1588,7 +1588,8 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
return values
-def _clean_interp_method(method, order=None):
+def _clean_interp_method(method, **kwargs):
+ order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
@@ -1603,7 +1604,7 @@ def _clean_interp_method(method, order=None):
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
- fill_value=None, bounds_error=False, order=None):
+ fill_value=None, bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
@@ -1682,18 +1683,17 @@ def _interp_limit(invalid, limit):
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
- xvalues = xvalues[firstIndex:]
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
- bounds_error=bounds_error, order=order)
+ bounds_error=bounds_error, order=order, **kwargs)
if limit:
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
- bounds_error=False, order=None):
+ bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
@@ -1734,7 +1734,7 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
- terp = interpolate.UnivariateSpline(x, y, k=order)
+ terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
@@ -1746,7 +1746,7 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
- new_y = method(x, y, new_x)
+ new_y = method(x, y, new_x, **kwargs)
return new_y
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b9e007a1e4d58..7d7145b88b22a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2896,6 +2896,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
+ kwargs : keyword arguments to pass on to the interpolating function.
Returns
-------
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 9a8ec00188d9c..f434992e9fcd8 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1373,6 +1373,23 @@ def test_spline(self):
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
+ def test_spline_extrapolate(self):
+ tm.skip_if_no_package('scipy', '0.15', 'setting ext on scipy.interpolate.UnivariateSpline')
+ s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
+ result3 = s.interpolate(method='spline', order=1, ext=3)
+ expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
+ assert_series_equal(result3, expected3)
+
+ result1 = s.interpolate(method='spline', order=1, ext=0)
+ expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
+ assert_series_equal(result1, expected1)
+
+ def test_spline_smooth(self):
+ tm._skip_if_no_scipy()
+ s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
+ self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
+ s.interpolate(method='spline', order=3)[5])
+
def test_metadata_propagation_indiv(self):
# groupby
| Closes #10378
| https://api.github.com/repos/pandas-dev/pandas/pulls/10383 | 2015-06-18T05:45:53Z | 2015-06-26T18:08:21Z | 2015-06-26T18:08:21Z | 2015-06-26T18:08:31Z |
Check for size=0 before setting item | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 4a513f3122390..d4becdf6b524b 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -59,3 +59,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
+- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index ff0d5739588f2..007d742895be2 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1480,6 +1480,18 @@ def test_setitem(self):
expected = self.series.append(app)
assert_series_equal(s, expected)
+ # Test for issue #10193
+ key = pd.Timestamp('2012-01-01')
+ series = pd.Series()
+ series[key] = 47
+ expected = pd.Series(47, [key])
+ assert_series_equal(series, expected)
+
+ series = pd.Series([], pd.DatetimeIndex([], freq='D'))
+ series[key] = 47
+ expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
+ assert_series_equal(series, expected)
+
def test_setitem_dtypes(self):
# change dtypes
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 1a0d87c0d26d3..a549c44d119c7 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1497,7 +1497,7 @@ def insert(self, loc, item):
if zone != izone:
raise ValueError('Passed item and index have different timezone')
# check freq can be preserved on edge cases
- if self.freq is not None:
+ if self.size and self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
| This is a second try at fixing #10193; the first try is in #10194. There is some useful discussion in that PR, so I didn't want to clobber that- not sure of the etiquette of multiple PRs for the same bug??
The discussion around setting values in views on #10194 is separate from the bug itself- I find that setting an item on a newly-constructed empty series with a frequency (thus, no views are involved) raises the same error.
I couldn't cherry pick from my old branch easily as some commits contained more code than is necessary for this set of changes, so I have done a new one.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10379 | 2015-06-17T17:38:44Z | 2015-06-18T15:36:21Z | 2015-06-18T15:36:21Z | 2015-06-18T15:43:28Z |
ENH: Enable ExcelWriter to construct in-memory sheets | diff --git a/ci/requirements-3.4.txt b/ci/requirements-3.4.txt
index 24af93fb16194..fd0a5bc53dd7e 100644
--- a/ci/requirements-3.4.txt
+++ b/ci/requirements-3.4.txt
@@ -3,6 +3,7 @@ pytz
openpyxl
xlsxwriter
xlrd
+xlwt
html5lib
patsy
beautiful-soup
diff --git a/ci/requirements-3.4_SLOW.txt b/ci/requirements-3.4_SLOW.txt
index 6372d9b4f6068..ecc31dad78d07 100644
--- a/ci/requirements-3.4_SLOW.txt
+++ b/ci/requirements-3.4_SLOW.txt
@@ -3,6 +3,7 @@ pytz
openpyxl
xlsxwriter
xlrd
+xlwt
html5lib
patsy
beautiful-soup
diff --git a/doc/source/install.rst b/doc/source/install.rst
index b3f86db5e3e59..1ba82bf60f128 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -249,10 +249,9 @@ Optional Dependencies
* `statsmodels <http://statsmodels.sourceforge.net/>`__
* Needed for parts of :mod:`pandas.stats`
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
- * openpyxl version 1.6.1 or higher, but lower than 2.0.0
* Needed for Excel I/O
* `XlsxWriter <https://pypi.python.org/pypi/XlsxWriter>`__
- * Alternative Excel writer.
+ * Alternative Excel writer
* `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3
access.
* `blosc <https://pypi.python.org/pypi/blosc>`__: for msgpack compression using ``blosc``
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 73a2f2f1d3531..9852822c556dc 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2130,7 +2130,9 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`.
df1.to_excel(writer, sheet_name='Sheet1')
df2.to_excel(writer, sheet_name='Sheet2')
-.. note:: Wringing a little more performance out of ``read_excel``
+.. note::
+
+ Wringing a little more performance out of ``read_excel``
Internally, Excel stores all numeric data as floats. Because this can
produce unexpected behavior when reading in data, pandas defaults to trying
to convert integers to floats if it doesn't lose information (``1.0 -->
@@ -2182,6 +2184,45 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
+Writing Excel Files to Memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.17
+
+.. _io.excel_writing_buffer
+
+Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or
+``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`.
+
+.. code-block:: python
+
+ # Safe import for either Python 2.x or 3.x
+ try:
+ from io import BytesIO
+ except ImportError:
+ from cStringIO import StringIO as BytesIO
+
+ bio = BytesIO()
+
+ # By setting the 'engine' in the ExcelWriter constructor.
+ writer = ExcelWriter(bio, engine='xlsxwriter')
+ df.to_excel(writer, sheet_name='Sheet1')
+
+ # Save the workbook
+ writer.save()
+
+ # Seek to the beginning and read to copy the workbook to a variable in memory
+ bio.seek(0)
+ workbook = bio.read()
+
+.. note::
+
+ ``engine`` is optional but recommended. Setting the engine determines
+ the version of workbook produced. Setting ``engine='xlrd'`` will produce an
+ Excel 2003-format workbook (xls). Using either ``'openpyxl'`` or
+ ``'xlsxwriter'`` will produce an Excel 2007-format workbook (xlsx). If
+ omitted, an Excel 2007-formatted workbook is produced.
+
.. _io.clipboard:
Clipboard
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 6f7e9bce0a3a6..fc2e6b1cb936f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -38,6 +38,8 @@ Backwards incompatible API changes
Other API Changes
^^^^^^^^^^^^^^^^^
+- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`)
+- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`)
.. _whatsnew_0170.deprecations:
@@ -53,11 +55,15 @@ Removal of prior version deprecations/changes
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`)
.. _whatsnew_0170.bug_fixes:
Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
+
+
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
+- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cf7f1fa033f6e..a4e4cf612ca85 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1246,6 +1246,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
+
+ For compatibility with to_csv, to_excel serializes lists and dicts to
+ strings before writing.
"""
from pandas.io.excel import ExcelWriter
if self.columns.nlevels > 1:
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index cab342dc339f4..d58d6590b96c0 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -9,11 +9,13 @@
import abc
import numpy as np
+from pandas.core.frame import DataFrame
from pandas.io.parsers import TextParser
from pandas.io.common import _is_url, _urlopen
from pandas.tseries.period import Period
from pandas import json
-from pandas.compat import map, zip, reduce, range, lrange, u, add_metaclass
+from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass,
+ BytesIO, string_types)
from pandas.core import config
from pandas.core.common import pprint_thing
import pandas.compat as compat
@@ -417,10 +419,13 @@ def _parse_cell(cell_contents,cell_typ):
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value,typ))
data.append(row)
-
+
+ if sheet.nrows == 0:
+ return DataFrame()
+
if header is not None:
data[header] = _trim_excel_header(data[header])
-
+
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
@@ -474,6 +479,8 @@ def _conv_value(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
+ elif com.is_list_like(val):
+ val = str(val)
return val
@@ -497,6 +504,11 @@ class ExcelWriter(object):
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
+
+ Notes
+ -----
+ For compatibility with CSV writers, ExcelWriter serializes lists
+ and dicts to strings before writing.
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
@@ -521,9 +533,13 @@ class ExcelWriter(object):
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
- if cls == ExcelWriter:
+ if issubclass(cls, ExcelWriter):
if engine is None:
- ext = os.path.splitext(path)[-1][1:]
+ if isinstance(path, string_types):
+ ext = os.path.splitext(path)[-1][1:]
+ else:
+ ext = 'xlsx'
+
try:
engine = config.get_option('io.excel.%s.writer' % ext)
except KeyError:
@@ -574,7 +590,11 @@ def save(self):
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# validate that this engine can handle the extension
- ext = os.path.splitext(path)[-1]
+ if isinstance(path, string_types):
+ ext = os.path.splitext(path)[-1]
+ else:
+ ext = 'xls' if engine == 'xlwt' else 'xlsx'
+
self.check_extension(ext)
self.path = path
@@ -1159,7 +1179,7 @@ class _XlwtWriter(ExcelWriter):
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
-
+ engine_kwargs['engine'] = engine
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
@@ -1311,6 +1331,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
style_dict = {}
for cell in cells:
+ val = _conv_value(cell.val)
+
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
@@ -1336,7 +1358,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
else:
wks.write(startrow + cell.row,
startcol + cell.col,
- cell.val, style)
+ val, style)
def _convert_to_style(self, style_dict, num_format_str=None):
"""
diff --git a/pandas/io/tests/data/blank.xls b/pandas/io/tests/data/blank.xls
new file mode 100755
index 0000000000000..952c76f045e8a
Binary files /dev/null and b/pandas/io/tests/data/blank.xls differ
diff --git a/pandas/io/tests/data/blank.xlsx b/pandas/io/tests/data/blank.xlsx
new file mode 100755
index 0000000000000..73f6ba6d29af3
Binary files /dev/null and b/pandas/io/tests/data/blank.xlsx differ
diff --git a/pandas/io/tests/data/blank_with_header.xls b/pandas/io/tests/data/blank_with_header.xls
new file mode 100755
index 0000000000000..a2e6350808147
Binary files /dev/null and b/pandas/io/tests/data/blank_with_header.xls differ
diff --git a/pandas/io/tests/data/blank_with_header.xlsx b/pandas/io/tests/data/blank_with_header.xlsx
new file mode 100755
index 0000000000000..7b30edb13be2c
Binary files /dev/null and b/pandas/io/tests/data/blank_with_header.xlsx differ
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 768aa40696cbc..e898d699ff2fd 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -1,6 +1,6 @@
# pylint: disable=E1101
-from pandas.compat import u, range, map, openpyxl_compat
+from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
from datetime import datetime, date, time
import sys
import os
@@ -455,7 +455,7 @@ def test_reading_multiple_specific_sheets(self):
def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
- # See PR #9450
+ # See PR #9450
_skip_if_no_xlrd()
_skip_if_no_xlwt()
@@ -471,7 +471,7 @@ def tdf(sheetname):
with ensure_clean('.xlsx') as pth:
with ExcelWriter(pth) as ew:
- for sheetname, df in dfs.iteritems():
+ for sheetname, df in iteritems(dfs):
df.to_excel(ew,sheetname)
dfs_returned = pd.read_excel(pth,sheetname=sheets)
for s in sheets:
@@ -520,6 +520,29 @@ def test_reader_seconds(self):
actual = read_excel(epoch_1904, 'Sheet1')
tm.assert_frame_equal(actual, expected)
+ # GH6403
+ def test_read_excel_blank(self):
+ _skip_if_no_xlrd()
+
+ blank = os.path.join(self.dirpath, 'blank.xls')
+ actual = read_excel(blank, 'Sheet1')
+ tm.assert_frame_equal(actual, DataFrame())
+
+ blank = os.path.join(self.dirpath, 'blank.xlsx')
+ actual = read_excel(blank, 'Sheet1')
+ tm.assert_frame_equal(actual, DataFrame())
+
+ def test_read_excel_blank_with_header(self):
+ _skip_if_no_xlrd()
+
+ expected = DataFrame(columns=['col_1', 'col_2'])
+ blank = os.path.join(self.dirpath, 'blank_with_header.xls')
+ actual = read_excel(blank, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
+
+ blank = os.path.join(self.dirpath, 'blank_with_header.xlsx')
+ actual = read_excel(blank, 'Sheet1')
+ tm.assert_frame_equal(actual, expected)
class ExcelWriterBase(SharedItems):
# Base class for test cases to run with different Excel writers.
@@ -1218,6 +1241,30 @@ def test_datetimes(self):
tm.assert_series_equal(write_frame['A'], read_frame['A'])
+ # GH7074
+ def test_bytes_io(self):
+ bio = BytesIO()
+ df = DataFrame(np.random.randn(10, 2))
+ writer = ExcelWriter(bio)
+ df.to_excel(writer)
+ writer.save()
+ bio.seek(0)
+ reread_df = pd.read_excel(bio)
+ tm.assert_frame_equal(df, reread_df)
+
+ # GH8188
+ def test_write_lists_dict(self):
+ df = pd.DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}],
+ 'numeric': [1, 2, 3.0],
+ 'str': ['apple', 'banana', 'cherry']})
+ expected = df.copy()
+ expected.mixed = expected.mixed.apply(str)
+ expected.numeric = expected.numeric.astype('int64')
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, 'Sheet1')
+ read = read_excel(path, 'Sheet1', header=0)
+ tm.assert_frame_equal(read, expected)
+
def raise_wrapper(major_ver):
def versioned_raise_wrapper(orig_method):
@functools.wraps(orig_method)
@@ -1512,6 +1559,7 @@ class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase):
class ExcelWriterEngineTests(tm.TestCase):
+
def test_ExcelWriter_dispatch(self):
with tm.assertRaisesRegexp(ValueError, 'No engine'):
ExcelWriter('nothing')
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
index 6c7005cb03c4f..62e0e8fc33b58 100644
--- a/vb_suite/packers.py
+++ b/vb_suite/packers.py
@@ -7,6 +7,7 @@
import os
import pandas as pd
from pandas.core import common as com
+from pandas.compat import BytesIO
from random import randrange
f = '__test__.msg'
@@ -206,3 +207,46 @@ def remove(f):
packers_read_stata_with_validation = Benchmark("pd.read_stata(f)", setup, start_date=start_date)
packers_write_stata_with_validation = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# Excel - alternative writers
+setup = common_setup + """
+bio = BytesIO()
+"""
+
+excel_writer_bench = """
+bio.seek(0)
+writer = pd.io.excel.ExcelWriter(bio, engine='{engine}')
+df[:2000].to_excel(writer)
+writer.save()
+"""
+
+benchmark_xlsxwriter = excel_writer_bench.format(engine='xlsxwriter')
+
+packers_write_excel_xlsxwriter = Benchmark(benchmark_xlsxwriter, setup)
+
+benchmark_openpyxl = excel_writer_bench.format(engine='openpyxl')
+
+packers_write_excel_openpyxl = Benchmark(benchmark_openpyxl, setup)
+
+benchmark_xlwt = excel_writer_bench.format(engine='xlwt')
+
+packers_write_excel_xlwt = Benchmark(benchmark_xlwt, setup)
+
+
+#----------------------------------------------------------------------
+# Excel - reader
+
+setup = common_setup + """
+bio = BytesIO()
+writer = pd.io.excel.ExcelWriter(bio, engine='xlsxwriter')
+df[:2000].to_excel(writer)
+writer.save()
+"""
+
+benchmark_read_excel="""
+bio.seek(0)
+pd.read_excel(bio)
+"""
+
+packers_read_excel = Benchmark(benchmark_read_excel, setup)
| Add support for StringIO/BytesIO to ExcelWriter
Add vbench support for writing excel files
Add support for serializing lists/dicts to strings
Fix bug when reading blank excel sheets
closes #8188
closes #7074
closes #6403
closes #7171
| https://api.github.com/repos/pandas-dev/pandas/pulls/10376 | 2015-06-17T14:37:38Z | 2015-06-20T16:20:11Z | 2015-06-20T16:20:11Z | 2015-06-20T16:20:16Z |
ERR: GH9513 NaT methods now raise ValueError | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e77532b2fe432..069425984b1b2 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -232,6 +232,16 @@ Other API Changes
- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`).
+- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`)
+=========================== ==============================================================
+Behavior Methods
+=========================== ==============================================================
+``return np.nan`` ``weekday``, ``isoweekday``
+``return NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today``
+``return np.datetime64('NaT')`` ``to_datetime64`` (unchanged)
+``raise ValueError`` All other public methods (names not beginning with underscores)
+=========================== ===============================================================
+
.. _whatsnew_0170.deprecations:
Deprecations
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 3d901837f5123..41c814f8943b2 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -2879,6 +2879,9 @@ def test_union(self):
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
+ def test_nat(self):
+ self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
+
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 7326d7a9d811d..826eddb63a5d5 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -210,7 +210,6 @@ def test_valid_dt_with_missing_values(self):
# GH 8689
s = Series(date_range('20130101',periods=5,freq='D'))
- s_orig = s.copy()
s.iloc[2] = pd.NaT
for attr in ['microsecond','nanosecond','second','minute','hour','day']:
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index a8b6fb4389459..6cf972d4d7a8a 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1392,7 +1392,8 @@ def time(self):
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
- return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values, lambda x: x.time()))
+ return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values,
+ lambda x: np.nan if x is tslib.NaT else x.time()))
@property
def date(self):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index c8b96076b26bd..a6c5dc23250b8 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -941,12 +941,34 @@ def test_nat_vector_field_access(self):
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
- 'week', 'dayofyear', 'days_in_month']
+ 'week', 'dayofyear', 'days_in_month', 'daysinmonth',
+ 'dayofweek']
for field in fields:
result = getattr(NaT, field)
self.assertTrue(np.isnan(result))
- self.assertTrue(np.isnan(NaT.weekday()))
+ def test_NaT_methods(self):
+ # GH 9513
+ raise_methods = ['astimezone', 'combine', 'ctime', 'dst', 'fromordinal',
+ 'fromtimestamp', 'isocalendar', 'isoformat',
+ 'strftime', 'strptime',
+ 'time', 'timestamp', 'timetuple', 'timetz',
+ 'toordinal', 'tzname', 'utcfromtimestamp',
+ 'utcnow', 'utcoffset', 'utctimetuple']
+ nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
+ nan_methods = ['weekday', 'isoweekday']
+
+ for method in raise_methods:
+ if hasattr(NaT, method):
+ self.assertRaises(ValueError, getattr(NaT, method))
+
+ for method in nan_methods:
+ if hasattr(NaT, method):
+ self.assertTrue(np.isnan(getattr(NaT, method)()))
+
+ for method in nat_methods:
+ if hasattr(NaT, method):
+ self.assertIs(getattr(NaT, method)(), NaT)
def test_to_datetime_types(self):
@@ -3520,6 +3542,9 @@ def check(val,unit=None,h=1,s=1,us=0):
result = Timestamp(NaT)
self.assertIs(result, NaT)
+ result = Timestamp('NaT')
+ self.assertIs(result, NaT)
+
def test_roundtrip(self):
# test value to string and back conversions
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 168bb754250e3..a2fc9b07b16a1 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -58,7 +58,7 @@ from dateutil.relativedelta import relativedelta
from dateutil.parser import DEFAULTPARSER
from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
-from pandas.compat import parse_date, string_types, iteritems, StringIO
+from pandas.compat import parse_date, string_types, iteritems, StringIO, callable
import operator
import collections
@@ -640,15 +640,10 @@ class NaTType(_NaT):
def __long__(self):
return NPY_NAT
- def weekday(self):
- return np.nan
-
- def toordinal(self):
- return -1
-
def __reduce__(self):
return (__nat_unpickle, (None, ))
+
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
'week', 'dayofyear', 'days_in_month', 'daysinmonth', 'dayofweek']
@@ -656,6 +651,50 @@ for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
+# GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or return NaT
+# create functions that raise, for binding to NaTType
+def _make_error_func(func_name):
+ def f(*args, **kwargs):
+ raise ValueError("NaTType does not support " + func_name)
+ f.__name__ = func_name
+ return f
+
+def _make_nat_func(func_name):
+ def f(*args, **kwargs):
+ return NaT
+ f.__name__ = func_name
+ return f
+
+def _make_nan_func(func_name):
+ def f(*args, **kwargs):
+ return np.nan
+ f.__name__ = func_name
+ return f
+
+_nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
+
+_nan_methods = ['weekday', 'isoweekday']
+
+_implemented_methods = ['to_datetime64']
+_implemented_methods.extend(_nat_methods)
+_implemented_methods.extend(_nan_methods)
+
+for _method_name in _nat_methods:
+ # not all methods exist in all versions of Python
+ if hasattr(NaTType, _method_name):
+ setattr(NaTType, _method_name, _make_nat_func(_method_name))
+
+for _method_name in _nan_methods:
+ if hasattr(NaTType, _method_name):
+ setattr(NaTType, _method_name, _make_nan_func(_method_name))
+
+for _maybe_method_name in dir(NaTType):
+ _maybe_method = getattr(NaTType, _maybe_method_name)
+ if (callable(_maybe_method)
+ and not _maybe_method_name.startswith("_")
+ and _maybe_method_name not in _implemented_methods):
+ setattr(NaTType, _maybe_method_name, _make_error_func(_maybe_method_name))
+
def __nat_unpickle(*args):
# return constant defined in the module
return NaT
| This is to close #9513. `NaT` methods now raise `ValueError` or return `np.nan`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10372 | 2015-06-17T06:16:37Z | 2015-07-17T16:40:41Z | null | 2015-07-17T16:40:41Z |
CI: use versioneer to have PEP440 versions | diff --git a/.binstar.yml b/.binstar.yml
index 6f7c2c5ba4c7a..c70add11c55b0 100644
--- a/.binstar.yml
+++ b/.binstar.yml
@@ -1,22 +1,21 @@
package: pandas
user: jreback
-platform:
- #- osx-64
- #- linux-32
- - linux-64
- - win-64
- #- win-32
-
-engine:
- #- python=2.6
- - python=2.7
- #- python=3.3
- #- python=3.4
+install:
+ - conda config --add channels pandas
before_script:
- python -V
+platform:
+ - linux-64
+ #- linux-32
+ - osx-64
+ #- win-32
+ - win-64
+engine:
+ - python=2.7
+ #- python=3.4
script:
- conda build conda.recipe --quiet
@@ -27,12 +26,3 @@ build_targets: conda
notifications:
email:
recipients: ['jeff@reback.net']
-
----
-platform: win-32
-engine: python=2.6
-exclude: true
----
-platform: win-64
-engine: python=2.6
-exclude: true
diff --git a/.gitattributes b/.gitattributes
index 0ef16e42a0660..736fa09d070fe 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -13,3 +13,4 @@
*.dta binary
*.xls binary
*.xlsx binary
+pandas/_version.py export-subst
diff --git a/.gitignore b/.gitignore
index c0f576178ecc0..6b00558fb3b19 100644
--- a/.gitignore
+++ b/.gitignore
@@ -78,10 +78,6 @@ scikits
*.c
*.cpp
-# Things specific to this project #
-###################################
-pandas/version.py
-
# Documentation generated files #
#################################
doc/source/generated
diff --git a/MANIFEST.in b/MANIFEST.in
index 69174f7f05b98..2d26fbfd6adaf 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -24,3 +24,5 @@ global-exclude *.png
# recursive-include doc/source *
# recursive-include doc/sphinxext *
# recursive-include LICENSES *
+include versioneer.py
+include pandas/_version.py
diff --git a/conda.recipe/bld.bat b/conda.recipe/bld.bat
index cc977c65dcbe1..6c5499f6ccb7e 100644
--- a/conda.recipe/bld.bat
+++ b/conda.recipe/bld.bat
@@ -1,2 +1,5 @@
@echo off
-%PYTHON% setup.py install --quiet
+
+conda remove jinja2 --quiet
+conda install jinja2 --quiet
+%PYTHON% setup.py install
diff --git a/conda.recipe/build.sh b/conda.recipe/build.sh
index bce23bf0c6549..8bf1cb09364a6 100644
--- a/conda.recipe/build.sh
+++ b/conda.recipe/build.sh
@@ -1,2 +1,3 @@
-#!/bin/bash
-$PYTHON setup.py install --quiet
+#!/bin/sh
+
+$PYTHON setup.py install
diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml
index 6817fbc9b43e0..6f0fd4fda47a3 100644
--- a/conda.recipe/meta.yaml
+++ b/conda.recipe/meta.yaml
@@ -1,6 +1,6 @@
package:
- name: pandas
- version: {{ environ.get('GIT_DESCRIBE_TAG', '') }}
+ name: pandas
+ version: {{ environ.get('GIT_DESCRIBE_TAG', '').replace('.dev', 'dev') }}
build:
number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }}
@@ -28,10 +28,9 @@ requirements:
test:
requires:
- nose
- - coverage
commands:
- - python -c "import pandas"
+ - nosetests --exe -A "not slow and not network and not disabled" pandas
about:
home: http://pandas.pydata.org
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 71341a02b0237..e4619fe26d25a 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -14,6 +14,7 @@ users upgrade to this version.
Highlights include:
- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>`
+ - Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 0e7bc628fdb6a..dbc697410da80 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -29,7 +29,6 @@
_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
-from pandas.version import version as __version__
from pandas.info import __doc__
@@ -57,3 +56,8 @@
from pandas.util.print_versions import show_versions
import pandas.util.testing
+# use the closest tagged version if possible
+from ._version import get_versions
+v = get_versions()
+__version__ = v.get('closest-tag',v['version'])
+del get_versions, v
diff --git a/pandas/_version.py b/pandas/_version.py
new file mode 100644
index 0000000000000..61e9f3ff187ea
--- /dev/null
+++ b/pandas/_version.py
@@ -0,0 +1,460 @@
+
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.15 (https://github.com/warner/python-versioneer)
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+ # these strings will be replaced by git during git-archive.
+ # setup.py/versioneer.py will grep for the variable names, so they must
+ # each be defined on a line of their own. _version.py will just call
+ # get_keywords().
+ git_refnames = "$Format:%d$"
+ git_full = "$Format:%H$"
+ keywords = {"refnames": git_refnames, "full": git_full}
+ return keywords
+
+
+class VersioneerConfig:
+ pass
+
+
+def get_config():
+ # these strings are filled in when 'setup.py versioneer' creates
+ # _version.py
+ cfg = VersioneerConfig()
+ cfg.VCS = "git"
+ cfg.style = "pep440"
+ cfg.tag_prefix = "v"
+ cfg.parentdir_prefix = "pandas-"
+ cfg.versionfile_source = "pandas/_version.py"
+ cfg.verbose = False
+ return cfg
+
+
+class NotThisMethod(Exception):
+ pass
+
+
+LONG_VERSION_PY = {}
+HANDLERS = {}
+
+
+def register_vcs_handler(vcs, method): # decorator
+ def decorate(f):
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+ assert isinstance(commands, list)
+ p = None
+ for c in commands:
+ try:
+ dispcmd = str([c] + args)
+ # remember shell=False, so use git.cmd on windows, not just git
+ p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr
+ else None))
+ break
+ except EnvironmentError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ENOENT:
+ continue
+ if verbose:
+ print("unable to run %s" % dispcmd)
+ print(e)
+ return None
+ else:
+ if verbose:
+ print("unable to find command, tried %s" % (commands,))
+ return None
+ stdout = p.communicate()[0].strip()
+ if sys.version_info[0] >= 3:
+ stdout = stdout.decode()
+ if p.returncode != 0:
+ if verbose:
+ print("unable to run %s (error)" % dispcmd)
+ return None
+ return stdout
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ # Source tarballs conventionally unpack into a directory that includes
+ # both the project name and a version string.
+ dirname = os.path.basename(root)
+ if not dirname.startswith(parentdir_prefix):
+ if verbose:
+ print("guessing rootdir is '%s', but '%s' doesn't start with "
+ "prefix '%s'" % (root, dirname, parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None}
+
+
+@register_vcs_handler("git", "get_keywords")
+def git_get_keywords(versionfile_abs):
+ # the code embedded in _version.py can just fetch the value of these
+ # keywords. When used from setup.py, we don't want to import _version.py,
+ # so we do it with a regexp instead. This function is not used from
+ # _version.py.
+ keywords = {}
+ try:
+ f = open(versionfile_abs, "r")
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ f.close()
+ except EnvironmentError:
+ pass
+ return keywords
+
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ if not keywords:
+ raise NotThisMethod("no keywords at all, weird")
+ refnames = keywords["refnames"].strip()
+ if refnames.startswith("$Format"):
+ if verbose:
+ print("keywords are unexpanded, not using")
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
+ # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+ # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+ TAG = "tag: "
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ if not tags:
+ # Either we're using git < 1.8.3, or there really are no tags. We use
+ # a heuristic: assume all version tags have a digit. The old git %d
+ # expansion behaves like git log --decorate=short and strips out the
+ # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+ # between branches and tags. By ignoring refnames without digits, we
+ # filter out many common branch names like "release" and
+ # "stabilization", as well as "HEAD" and "master".
+ tags = set([r for r in refs if re.search(r'\d', r)])
+ if verbose:
+ print("discarding '%s', no digits" % ",".join(refs-tags))
+ if verbose:
+ print("likely tags: %s" % ",".join(sorted(tags)))
+ for ref in sorted(tags):
+ # sorting will prefer e.g. "2.0" over "2.0rc1"
+ if ref.startswith(tag_prefix):
+ r = ref[len(tag_prefix):]
+ if verbose:
+ print("picking %s" % r)
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None
+ }
+ # no suitable tags, so version is "0+unknown", but full hex is still there
+ if verbose:
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags"}
+
+
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ # this runs 'git' from the root of the source tree. This only gets called
+ # if the git-archive 'subst' keywords were *not* expanded, and
+ # _version.py hasn't already been rewritten with a short version string,
+ # meaning we're inside a checked out source tree.
+
+ if not os.path.exists(os.path.join(root, ".git")):
+ if verbose:
+ print("no .git in %s" % root)
+ raise NotThisMethod("no .git directory")
+
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+ # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
+ # if there are no tags, this yields HEX[-dirty] (no NUM)
+ describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long"],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%s'"
+ % describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
+ % (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ return pieces
+
+
+def plus_or_dot(pieces):
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ # now build up version string, with post-release "local version
+ # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ # exceptions:
+ # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ # TAG[.post.devDISTANCE] . No -dirty
+
+ # exceptions:
+ # 1: no tags. 0.post.devDISTANCE
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%d" % pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%d" % pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
+ # .dev0 sorts backwards (a dirty tree will appear "older" than the
+ # corresponding clean one), but you shouldn't be releasing software with
+ # -dirty anyways.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%s" % pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%s" % pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
+ # --always'
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
+ # --always -long'. The distance/hash is unconditional.
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"]}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%s'" % style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None}
+
+
+def get_versions():
+ # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+ # __file__, we can work backwards from there to the root. Some
+ # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+ # case we can only use expanded keywords.
+
+ cfg = get_config()
+ verbose = cfg.verbose
+
+ try:
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
+ verbose)
+ except NotThisMethod:
+ pass
+
+ try:
+ root = os.path.realpath(__file__)
+ # versionfile_source is the relative path from the top of the source
+ # tree (where the .git directory might live) to this file. Invert
+ # this to find the root from __file__.
+ for i in cfg.versionfile_source.split('/'):
+ root = os.path.dirname(root)
+ except NameError:
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree"}
+
+ try:
+ pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
+ return render(pieces, cfg.style)
+ except NotThisMethod:
+ pass
+
+ try:
+ if cfg.parentdir_prefix:
+ return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ except NotThisMethod:
+ pass
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version"}
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000..8798e2ce6a5a5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,12 @@
+
+# See the docstring in versioneer.py for instructions. Note that you must
+# re-run 'versioneer.py setup' after changing this section, and commit the
+# resulting files.
+
+[versioneer]
+VCS = git
+style = pep440
+versionfile_source = pandas/_version.py
+versionfile_build = pandas/_version.py
+tag_prefix = v
+parentdir_prefix = pandas-
diff --git a/setup.py b/setup.py
index 01364892cd0f3..f20b0ac0a5fb5 100755
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,10 @@
import re
from distutils.version import LooseVersion
+# versioning
+import versioneer
+cmdclass = versioneer.get_cmdclass()
+
# may need to work around setuptools bug by providing a fake Pyrex
min_cython_ver = '0.19.1'
try:
@@ -74,7 +78,6 @@
from distutils.extension import Extension
from distutils.command.build import build
-from distutils.command.sdist import sdist
from distutils.command.build_ext import build_ext as _build_ext
try:
@@ -191,76 +194,6 @@ def build_extensions(self):
'Topic :: Scientific/Engineering',
]
-MAJOR = 0
-MINOR = 16
-MICRO = 2
-ISRELEASED = False
-VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
-QUALIFIER = ''
-
-FULLVERSION = VERSION
-write_version = True
-
-if not ISRELEASED:
- import subprocess
- FULLVERSION += '.dev'
-
- pipe = None
- for cmd in ['git','git.cmd']:
- try:
- pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
- stdout=subprocess.PIPE)
- (so,serr) = pipe.communicate()
- if pipe.returncode == 0:
- break
- except:
- pass
-
- if pipe is None or pipe.returncode != 0:
- # no git, or not in git dir
- if os.path.exists('pandas/version.py'):
- warnings.warn("WARNING: Couldn't get git revision, using existing pandas/version.py")
- write_version = False
- else:
- warnings.warn("WARNING: Couldn't get git revision, using generic version string")
- else:
- # have git, in git dir, but may have used a shallow clone (travis does this)
- rev = so.strip()
- # makes distutils blow up on Python 2.7
- if sys.version_info[0] >= 3:
- rev = rev.decode('ascii')
-
- if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
- # partial clone, manually construct version string
- # this is the format before we started using git-describe
- # to get an ordering on dev version strings.
- rev ="v%s.dev-%s" % (VERSION, rev)
-
- # Strip leading v from tags format "vx.y.z" to get th version string
- FULLVERSION = rev.lstrip('v')
-
-else:
- FULLVERSION += QUALIFIER
-
-
-def write_version_py(filename=None):
- cnt = """\
-version = '%s'
-short_version = '%s'
-"""
- if not filename:
- filename = os.path.join(
- os.path.dirname(__file__), 'pandas', 'version.py')
-
- a = open(filename, 'w')
- try:
- a.write(cnt % (FULLVERSION, VERSION))
- finally:
- a.close()
-
-if write_version:
- write_version_py()
-
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
@@ -323,7 +256,11 @@ def run(self):
pass
-class CheckSDist(sdist):
+# we need to inherit from the versioneer
+# class as it encodes the version info
+sdist_class = cmdclass['sdist']
+
+class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/lib.pyx',
@@ -336,7 +273,7 @@ class CheckSDist(sdist):
'pandas/src/testing.pyx']
def initialize_options(self):
- sdist.initialize_options(self)
+ sdist_class.initialize_options(self)
'''
self._pyxfiles = []
@@ -355,7 +292,7 @@ def run(self):
msg = "C-source file '%s' not found." % (cfile) +\
" Run 'setup.py cython' before sdist."
assert os.path.isfile(cfile), msg
- sdist.run(self)
+ sdist_class.run(self)
class CheckingBuildExt(build_ext):
@@ -397,9 +334,8 @@ def finalize_options(self):
def run(self):
pass
-cmdclass = {'clean': CleanCommand,
- 'build': build,
- 'sdist': CheckSDist}
+cmdclass.update({'clean': CleanCommand,
+ 'build': build})
try:
from wheel.bdist_wheel import bdist_wheel
@@ -575,8 +511,8 @@ def pxd(name):
# if you change something, be careful.
setup(name=DISTNAME,
- version=FULLVERSION,
maintainer=AUTHOR,
+ version=versioneer.get_version(),
packages=['pandas',
'pandas.compat',
'pandas.computation',
diff --git a/versioneer.py b/versioneer.py
new file mode 100644
index 0000000000000..c010f63e3ead8
--- /dev/null
+++ b/versioneer.py
@@ -0,0 +1,1699 @@
+
+# Version: 0.15
+
+"""
+The Versioneer
+==============
+
+* like a rocketeer, but for versions!
+* https://github.com/warner/python-versioneer
+* Brian Warner
+* License: Public Domain
+* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
+* [![Latest Version]
+(https://pypip.in/version/versioneer/badge.svg?style=flat)
+](https://pypi.python.org/pypi/versioneer/)
+* [![Build Status]
+(https://travis-ci.org/warner/python-versioneer.png?branch=master)
+](https://travis-ci.org/warner/python-versioneer)
+
+This is a tool for managing a recorded version number in distutils-based
+python projects. The goal is to remove the tedious and error-prone "update
+the embedded version string" step from your release process. Making a new
+release should be as easy as recording a new tag in your version-control
+system, and maybe making new tarballs.
+
+
+## Quick Install
+
+* `pip install versioneer` to somewhere to your $PATH
+* add a `[versioneer]` section to your setup.cfg (see below)
+* run `versioneer install` in your source tree, commit the results
+
+## Version Identifiers
+
+Source trees come from a variety of places:
+
+* a version-control system checkout (mostly used by developers)
+* a nightly tarball, produced by build automation
+* a snapshot tarball, produced by a web-based VCS browser, like github's
+ "tarball from tag" feature
+* a release tarball, produced by "setup.py sdist", distributed through PyPI
+
+Within each source tree, the version identifier (either a string or a number,
+this tool is format-agnostic) can come from a variety of places:
+
+* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
+ about recent "tags" and an absolute revision-id
+* the name of the directory into which the tarball was unpacked
+* an expanded VCS keyword ($Id$, etc)
+* a `_version.py` created by some earlier build step
+
+For released software, the version identifier is closely related to a VCS
+tag. Some projects use tag names that include more than just the version
+string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
+needs to strip the tag prefix to extract the version identifier. For
+unreleased software (between tags), the version identifier should provide
+enough information to help developers recreate the same tree, while also
+giving them an idea of roughly how old the tree is (after version 1.2, before
+version 1.3). Many VCS systems can report a description that captures this,
+for example `git describe --tags --dirty --always` reports things like
+"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
+0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
+uncommitted changes.
+
+The version identifier is used for multiple purposes:
+
+* to allow the module to self-identify its version: `myproject.__version__`
+* to choose a name and prefix for a 'setup.py sdist' tarball
+
+## Theory of Operation
+
+Versioneer works by adding a special `_version.py` file into your source
+tree, where your `__init__.py` can import it. This `_version.py` knows how to
+dynamically ask the VCS tool for version information at import time.
+
+`_version.py` also contains `$Revision$` markers, and the installation
+process marks `_version.py` to have this marker rewritten with a tag name
+during the `git archive` command. As a result, generated tarballs will
+contain enough information to get the proper version.
+
+To allow `setup.py` to compute a version too, a `versioneer.py` is added to
+the top level of your source tree, next to `setup.py` and the `setup.cfg`
+that configures it. This overrides several distutils/setuptools commands to
+compute the version when invoked, and changes `setup.py build` and `setup.py
+sdist` to replace `_version.py` with a small static file that contains just
+the generated version data.
+
+## Installation
+
+First, decide on values for the following configuration variables:
+
+* `VCS`: the version control system you use. Currently accepts "git".
+
+* `style`: the style of version string to be produced. See "Styles" below for
+ details. Defaults to "pep440", which looks like
+ `TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
+
+* `versionfile_source`:
+
+ A project-relative pathname into which the generated version strings should
+ be written. This is usually a `_version.py` next to your project's main
+ `__init__.py` file, so it can be imported at runtime. If your project uses
+ `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
+ This file should be checked in to your VCS as usual: the copy created below
+ by `setup.py setup_versioneer` will include code that parses expanded VCS
+ keywords in generated tarballs. The 'build' and 'sdist' commands will
+ replace it with a copy that has just the calculated version string.
+
+ This must be set even if your project does not have any modules (and will
+ therefore never import `_version.py`), since "setup.py sdist" -based trees
+ still need somewhere to record the pre-calculated version strings. Anywhere
+ in the source tree should do. If there is a `__init__.py` next to your
+ `_version.py`, the `setup.py setup_versioneer` command (described below)
+ will append some `__version__`-setting assignments, if they aren't already
+ present.
+
+* `versionfile_build`:
+
+ Like `versionfile_source`, but relative to the build directory instead of
+ the source directory. These will differ when your setup.py uses
+ 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
+ then you will probably have `versionfile_build='myproject/_version.py'` and
+ `versionfile_source='src/myproject/_version.py'`.
+
+ If this is set to None, then `setup.py build` will not attempt to rewrite
+ any `_version.py` in the built tree. If your project does not have any
+ libraries (e.g. if it only builds a script), then you should use
+ `versionfile_build = None` and override `distutils.command.build_scripts`
+ to explicitly insert a copy of `versioneer.get_version()` into your
+ generated script.
+
+* `tag_prefix`:
+
+ a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
+ If your tags look like 'myproject-1.2.0', then you should use
+ tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
+ should be an empty string.
+
+* `parentdir_prefix`:
+
+ a optional string, frequently the same as tag_prefix, which appears at the
+ start of all unpacked tarball filenames. If your tarball unpacks into
+ 'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
+ just omit the field from your `setup.cfg`.
+
+This tool provides one script, named `versioneer`. That script has one mode,
+"install", which writes a copy of `versioneer.py` into the current directory
+and runs `versioneer.py setup` to finish the installation.
+
+To versioneer-enable your project:
+
+* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
+ populating it with the configuration values you decided earlier (note that
+ the option names are not case-sensitive):
+
+ ````
+ [versioneer]
+ VCS = git
+ style = pep440
+ versionfile_source = src/myproject/_version.py
+ versionfile_build = myproject/_version.py
+ tag_prefix = ""
+ parentdir_prefix = myproject-
+ ````
+
+* 2: Run `versioneer install`. This will do the following:
+
+ * copy `versioneer.py` into the top of your source tree
+ * create `_version.py` in the right place (`versionfile_source`)
+ * modify your `__init__.py` (if one exists next to `_version.py`) to define
+ `__version__` (by calling a function from `_version.py`)
+ * modify your `MANIFEST.in` to include both `versioneer.py` and the
+ generated `_version.py` in sdist tarballs
+
+ `versioneer install` will complain about any problems it finds with your
+ `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
+ the problems.
+
+* 3: add a `import versioneer` to your setup.py, and add the following
+ arguments to the setup() call:
+
+ version=versioneer.get_version(),
+ cmdclass=versioneer.get_cmdclass(),
+
+* 4: commit these changes to your VCS. To make sure you won't forget,
+ `versioneer install` will mark everything it touched for addition using
+ `git add`. Don't forget to add `setup.py` and `setup.cfg` too.
+
+## Post-Installation Usage
+
+Once established, all uses of your tree from a VCS checkout should get the
+current version string. All generated tarballs should include an embedded
+version string (so users who unpack them will not need a VCS tool installed).
+
+If you distribute your project through PyPI, then the release process should
+boil down to two steps:
+
+* 1: git tag 1.0
+* 2: python setup.py register sdist upload
+
+If you distribute it through github (i.e. users use github to generate
+tarballs with `git archive`), the process is:
+
+* 1: git tag 1.0
+* 2: git push; git push --tags
+
+Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
+least one tag in its history.
+
+## Version-String Flavors
+
+Code which uses Versioneer can learn about its version string at runtime by
+importing `_version` from your main `__init__.py` file and running the
+`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
+import the top-level `versioneer.py` and run `get_versions()`.
+
+Both functions return a dictionary with different flavors of version
+information:
+
+* `['version']`: A condensed version string, rendered using the selected
+ style. This is the most commonly used value for the project's version
+ string. The default "pep440" style yields strings like `0.11`,
+ `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
+ below for alternative styles.
+
+* `['full-revisionid']`: detailed revision identifier. For Git, this is the
+ full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
+
+* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
+ this is only accurate if run in a VCS checkout, otherwise it is likely to
+ be False or None
+
+* `['error']`: if the version string could not be computed, this will be set
+ to a string describing the problem, otherwise it will be None. It may be
+ useful to throw an exception in setup.py if this is set, to avoid e.g.
+ creating tarballs with a version string of "unknown".
+
+Some variants are more useful than others. Including `full-revisionid` in a
+bug report should allow developers to reconstruct the exact code being tested
+(or indicate the presence of local changes that should be shared with the
+developers). `version` is suitable for display in an "about" box or a CLI
+`--version` output: it can be easily compared against release notes and lists
+of bugs fixed in various releases.
+
+The installer adds the following text to your `__init__.py` to place a basic
+version in `YOURPROJECT.__version__`:
+
+ from ._version import get_versions
+ __version__ = get_versions()['version']
+ del get_versions
+
+## Styles
+
+The setup.cfg `style=` configuration controls how the VCS information is
+rendered into a version string.
+
+The default style, "pep440", produces a PEP440-compliant string, equal to the
+un-prefixed tag name for actual releases, and containing an additional "local
+version" section with more detail for in-between builds. For Git, this is
+TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
+--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
+tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
+that this commit is two revisions ("+2") beyond the "0.11" tag. For released
+software (exactly equal to a known tag), the identifier will only contain the
+stripped tag, e.g. "0.11".
+
+Other styles are available. See details.md in the Versioneer source tree for
+descriptions.
+
+## Debugging
+
+Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
+to return a version of "0+unknown". To investigate the problem, run `setup.py
+version`, which will run the version-lookup code in a verbose mode, and will
+display the full contents of `get_versions()` (including the `error` string,
+which may help identify what went wrong).
+
+## Updating Versioneer
+
+To upgrade your project to a new release of Versioneer, do the following:
+
+* install the new Versioneer (`pip install -U versioneer` or equivalent)
+* edit `setup.cfg`, if necessary, to include any new configuration settings
+ indicated by the release notes
+* re-run `versioneer install` in your source tree, to replace
+ `SRC/_version.py`
+* commit any changed files
+
+### Upgrading to 0.15
+
+Starting with this version, Versioneer is configured with a `[versioneer]`
+section in your `setup.cfg` file. Earlier versions required the `setup.py` to
+set attributes on the `versioneer` module immediately after import. The new
+version will refuse to run (raising an exception during import) until you
+have provided the necessary `setup.cfg` section.
+
+In addition, the Versioneer package provides an executable named
+`versioneer`, and the installation process is driven by running `versioneer
+install`. In 0.14 and earlier, the executable was named
+`versioneer-installer` and was run without an argument.
+
+### Upgrading to 0.14
+
+0.14 changes the format of the version string. 0.13 and earlier used
+hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
+plus-separated "local version" section strings, with dot-separated
+components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
+format, but should be ok with the new one.
+
+### Upgrading from 0.11 to 0.12
+
+Nothing special.
+
+### Upgrading from 0.10 to 0.11
+
+You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
+`setup.py setup_versioneer`. This will enable the use of additional
+version-control systems (SVN, etc) in the future.
+
+## Future Directions
+
+This tool is designed to make it easily extended to other version-control
+systems: all VCS-specific components are in separate directories like
+src/git/ . The top-level `versioneer.py` script is assembled from these
+components by running make-versioneer.py . In the future, make-versioneer.py
+will take a VCS name as an argument, and will construct a version of
+`versioneer.py` that is specific to the given VCS. It might also take the
+configuration arguments that are currently provided manually during
+installation by editing setup.py . Alternatively, it might go the other
+direction and include code from all supported VCS systems, reducing the
+number of intermediate scripts.
+
+
+## License
+
+To make Versioneer easier to embed, all its code is hereby released into the
+public domain. The `_version.py` that it creates is also in the public
+domain.
+
+"""
+
+from __future__ import print_function
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+import errno
+import json
+import os
+import re
+import subprocess
+import sys
+
+
+class VersioneerConfig:
+ pass
+
+
+def get_root():
+ # we require that all commands are run from the project root, i.e. the
+ # directory that contains setup.py, setup.cfg, and versioneer.py .
+ root = os.path.realpath(os.path.abspath(os.getcwd()))
+ setup_py = os.path.join(root, "setup.py")
+ versioneer_py = os.path.join(root, "versioneer.py")
+ if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
+ # allow 'python path/to/setup.py COMMAND'
+ root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
+ setup_py = os.path.join(root, "setup.py")
+ versioneer_py = os.path.join(root, "versioneer.py")
+ if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
+ err = ("Versioneer was unable to run the project root directory. "
+ "Versioneer requires setup.py to be executed from "
+ "its immediate directory (like 'python setup.py COMMAND'), "
+ "or in a way that lets it use sys.argv[0] to find the root "
+ "(like 'python path/to/setup.py COMMAND').")
+ raise VersioneerBadRootError(err)
+ try:
+ # Certain runtime workflows (setup.py install/develop in a setuptools
+ # tree) execute all dependencies in a single python process, so
+ # "versioneer" may be imported multiple times, and python's shared
+ # module-import table will cache the first one. So we can't use
+ # os.path.dirname(__file__), as that will find whichever
+ # versioneer.py was first imported, even in later projects.
+ me = os.path.realpath(os.path.abspath(__file__))
+ if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
+ print("Warning: build in %s is using versioneer.py from %s"
+ % (os.path.dirname(me), versioneer_py))
+ except NameError:
+ pass
+ return root
+
+
+def get_config_from_root(root):
+ # This might raise EnvironmentError (if setup.cfg is missing), or
+ # configparser.NoSectionError (if it lacks a [versioneer] section), or
+ # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
+ # the top of versioneer.py for instructions on writing your setup.cfg .
+ setup_cfg = os.path.join(root, "setup.cfg")
+ parser = configparser.SafeConfigParser()
+ with open(setup_cfg, "r") as f:
+ parser.readfp(f)
+ VCS = parser.get("versioneer", "VCS") # mandatory
+
+ def get(parser, name):
+ if parser.has_option("versioneer", name):
+ return parser.get("versioneer", name)
+ return None
+ cfg = VersioneerConfig()
+ cfg.VCS = VCS
+ cfg.style = get(parser, "style") or ""
+ cfg.versionfile_source = get(parser, "versionfile_source")
+ cfg.versionfile_build = get(parser, "versionfile_build")
+ cfg.tag_prefix = get(parser, "tag_prefix")
+ cfg.parentdir_prefix = get(parser, "parentdir_prefix")
+ cfg.verbose = get(parser, "verbose")
+ return cfg
+
+
+class NotThisMethod(Exception):
+ pass
+
+# these dictionaries contain VCS-specific tools
+LONG_VERSION_PY = {}
+HANDLERS = {}
+
+
+def register_vcs_handler(vcs, method): # decorator
+ def decorate(f):
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+ assert isinstance(commands, list)
+ p = None
+ for c in commands:
+ try:
+ dispcmd = str([c] + args)
+ # remember shell=False, so use git.cmd on windows, not just git
+ p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr
+ else None))
+ break
+ except EnvironmentError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ENOENT:
+ continue
+ if verbose:
+ print("unable to run %s" % dispcmd)
+ print(e)
+ return None
+ else:
+ if verbose:
+ print("unable to find command, tried %s" % (commands,))
+ return None
+ stdout = p.communicate()[0].strip()
+ if sys.version_info[0] >= 3:
+ stdout = stdout.decode()
+ if p.returncode != 0:
+ if verbose:
+ print("unable to run %s (error)" % dispcmd)
+ return None
+ return stdout
+LONG_VERSION_PY['git'] = '''
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.15 (https://github.com/warner/python-versioneer)
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+ # these strings will be replaced by git during git-archive.
+ # setup.py/versioneer.py will grep for the variable names, so they must
+ # each be defined on a line of their own. _version.py will just call
+ # get_keywords().
+ git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
+ git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
+ keywords = {"refnames": git_refnames, "full": git_full}
+ return keywords
+
+
+class VersioneerConfig:
+ pass
+
+
+def get_config():
+ # these strings are filled in when 'setup.py versioneer' creates
+ # _version.py
+ cfg = VersioneerConfig()
+ cfg.VCS = "git"
+ cfg.style = "%(STYLE)s"
+ cfg.tag_prefix = "%(TAG_PREFIX)s"
+ cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
+ cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
+ cfg.verbose = False
+ return cfg
+
+
+class NotThisMethod(Exception):
+ pass
+
+
+LONG_VERSION_PY = {}
+HANDLERS = {}
+
+
+def register_vcs_handler(vcs, method): # decorator
+ def decorate(f):
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+ assert isinstance(commands, list)
+ p = None
+ for c in commands:
+ try:
+ dispcmd = str([c] + args)
+ # remember shell=False, so use git.cmd on windows, not just git
+ p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr
+ else None))
+ break
+ except EnvironmentError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ENOENT:
+ continue
+ if verbose:
+ print("unable to run %%s" %% dispcmd)
+ print(e)
+ return None
+ else:
+ if verbose:
+ print("unable to find command, tried %%s" %% (commands,))
+ return None
+ stdout = p.communicate()[0].strip()
+ if sys.version_info[0] >= 3:
+ stdout = stdout.decode()
+ if p.returncode != 0:
+ if verbose:
+ print("unable to run %%s (error)" %% dispcmd)
+ return None
+ return stdout
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ # Source tarballs conventionally unpack into a directory that includes
+ # both the project name and a version string.
+ dirname = os.path.basename(root)
+ if not dirname.startswith(parentdir_prefix):
+ if verbose:
+ print("guessing rootdir is '%%s', but '%%s' doesn't start with "
+ "prefix '%%s'" %% (root, dirname, parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None}
+
+
+@register_vcs_handler("git", "get_keywords")
+def git_get_keywords(versionfile_abs):
+ # the code embedded in _version.py can just fetch the value of these
+ # keywords. When used from setup.py, we don't want to import _version.py,
+ # so we do it with a regexp instead. This function is not used from
+ # _version.py.
+ keywords = {}
+ try:
+ f = open(versionfile_abs, "r")
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ f.close()
+ except EnvironmentError:
+ pass
+ return keywords
+
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ if not keywords:
+ raise NotThisMethod("no keywords at all, weird")
+ refnames = keywords["refnames"].strip()
+ if refnames.startswith("$Format"):
+ if verbose:
+ print("keywords are unexpanded, not using")
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
+ # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+ # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+ TAG = "tag: "
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ if not tags:
+ # Either we're using git < 1.8.3, or there really are no tags. We use
+ # a heuristic: assume all version tags have a digit. The old git %%d
+ # expansion behaves like git log --decorate=short and strips out the
+ # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+ # between branches and tags. By ignoring refnames without digits, we
+ # filter out many common branch names like "release" and
+ # "stabilization", as well as "HEAD" and "master".
+ tags = set([r for r in refs if re.search(r'\d', r)])
+ if verbose:
+ print("discarding '%%s', no digits" %% ",".join(refs-tags))
+ if verbose:
+ print("likely tags: %%s" %% ",".join(sorted(tags)))
+ for ref in sorted(tags):
+ # sorting will prefer e.g. "2.0" over "2.0rc1"
+ if ref.startswith(tag_prefix):
+ r = ref[len(tag_prefix):]
+ if verbose:
+ print("picking %%s" %% r)
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None
+ }
+ # no suitable tags, so version is "0+unknown", but full hex is still there
+ if verbose:
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags"}
+
+
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ # this runs 'git' from the root of the source tree. This only gets called
+ # if the git-archive 'subst' keywords were *not* expanded, and
+ # _version.py hasn't already been rewritten with a short version string,
+ # meaning we're inside a checked out source tree.
+
+ if not os.path.exists(os.path.join(root, ".git")):
+ if verbose:
+ print("no .git in %%s" %% root)
+ raise NotThisMethod("no .git directory")
+
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+ # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
+ # if there are no tags, this yields HEX[-dirty] (no NUM)
+ describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long"],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%%s'"
+ %% describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%%s' doesn't start with prefix '%%s'"
+ print(fmt %% (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
+ %% (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ return pieces
+
+
+def plus_or_dot(pieces):
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ # now build up version string, with post-release "local version
+ # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ # exceptions:
+ # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ # TAG[.post.devDISTANCE] . No -dirty
+
+ # exceptions:
+ # 1: no tags. 0.post.devDISTANCE
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%%d" %% pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%%d" %% pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
+ # .dev0 sorts backwards (a dirty tree will appear "older" than the
+ # corresponding clean one), but you shouldn't be releasing software with
+ # -dirty anyways.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%%s" %% pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%%s" %% pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
+ # --always'
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
+ # --always -long'. The distance/hash is unconditional.
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"]}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%%s'" %% style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None}
+
+
+def get_versions():
+ # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+ # __file__, we can work backwards from there to the root. Some
+ # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+ # case we can only use expanded keywords.
+
+ cfg = get_config()
+ verbose = cfg.verbose
+
+ try:
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
+ verbose)
+ except NotThisMethod:
+ pass
+
+ try:
+ root = os.path.realpath(__file__)
+ # versionfile_source is the relative path from the top of the source
+ # tree (where the .git directory might live) to this file. Invert
+ # this to find the root from __file__.
+ for i in cfg.versionfile_source.split('/'):
+ root = os.path.dirname(root)
+ except NameError:
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree"}
+
+ try:
+ pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
+ return render(pieces, cfg.style)
+ except NotThisMethod:
+ pass
+
+ try:
+ if cfg.parentdir_prefix:
+ return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ except NotThisMethod:
+ pass
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version"}
+'''
+
+
+@register_vcs_handler("git", "get_keywords")
+def git_get_keywords(versionfile_abs):
+ # the code embedded in _version.py can just fetch the value of these
+ # keywords. When used from setup.py, we don't want to import _version.py,
+ # so we do it with a regexp instead. This function is not used from
+ # _version.py.
+ keywords = {}
+ try:
+ f = open(versionfile_abs, "r")
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ f.close()
+ except EnvironmentError:
+ pass
+ return keywords
+
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ if not keywords:
+ raise NotThisMethod("no keywords at all, weird")
+ refnames = keywords["refnames"].strip()
+ if refnames.startswith("$Format"):
+ if verbose:
+ print("keywords are unexpanded, not using")
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
+ # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+ # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+ TAG = "tag: "
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ if not tags:
+ # Either we're using git < 1.8.3, or there really are no tags. We use
+ # a heuristic: assume all version tags have a digit. The old git %d
+ # expansion behaves like git log --decorate=short and strips out the
+ # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+ # between branches and tags. By ignoring refnames without digits, we
+ # filter out many common branch names like "release" and
+ # "stabilization", as well as "HEAD" and "master".
+ tags = set([r for r in refs if re.search(r'\d', r)])
+ if verbose:
+ print("discarding '%s', no digits" % ",".join(refs-tags))
+ if verbose:
+ print("likely tags: %s" % ",".join(sorted(tags)))
+ for ref in sorted(tags):
+ # sorting will prefer e.g. "2.0" over "2.0rc1"
+ if ref.startswith(tag_prefix):
+ r = ref[len(tag_prefix):]
+ if verbose:
+ print("picking %s" % r)
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None
+ }
+ # no suitable tags, so version is "0+unknown", but full hex is still there
+ if verbose:
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags"}
+
+
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ # this runs 'git' from the root of the source tree. This only gets called
+ # if the git-archive 'subst' keywords were *not* expanded, and
+ # _version.py hasn't already been rewritten with a short version string,
+ # meaning we're inside a checked out source tree.
+
+ if not os.path.exists(os.path.join(root, ".git")):
+ if verbose:
+ print("no .git in %s" % root)
+ raise NotThisMethod("no .git directory")
+
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+ # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
+ # if there are no tags, this yields HEX[-dirty] (no NUM)
+ describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long"],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%s'"
+ % describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
+ % (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ return pieces
+
+
+def do_vcs_install(manifest_in, versionfile_source, ipy):
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+ files = [manifest_in, versionfile_source]
+ if ipy:
+ files.append(ipy)
+ try:
+ me = __file__
+ if me.endswith(".pyc") or me.endswith(".pyo"):
+ me = os.path.splitext(me)[0] + ".py"
+ versioneer_file = os.path.relpath(me)
+ except NameError:
+ versioneer_file = "versioneer.py"
+ files.append(versioneer_file)
+ present = False
+ try:
+ f = open(".gitattributes", "r")
+ for line in f.readlines():
+ if line.strip().startswith(versionfile_source):
+ if "export-subst" in line.strip().split()[1:]:
+ present = True
+ f.close()
+ except EnvironmentError:
+ pass
+ if not present:
+ f = open(".gitattributes", "a+")
+ f.write("%s export-subst\n" % versionfile_source)
+ f.close()
+ files.append(".gitattributes")
+ run_command(GITS, ["add", "--"] + files)
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ # Source tarballs conventionally unpack into a directory that includes
+ # both the project name and a version string.
+ dirname = os.path.basename(root)
+ if not dirname.startswith(parentdir_prefix):
+ if verbose:
+ print("guessing rootdir is '%s', but '%s' doesn't start with "
+ "prefix '%s'" % (root, dirname, parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None}
+
+SHORT_VERSION_PY = """
+# This file was generated by 'versioneer.py' (0.15) from
+# revision-control system data, or from the parent directory name of an
+# unpacked source archive. Distribution tarballs contain a pre-generated copy
+# of this file.
+
+import json
+import sys
+
+version_json = '''
+%s
+''' # END VERSION_JSON
+
+
+def get_versions():
+ return json.loads(version_json)
+"""
+
+
+def versions_from_file(filename):
+ try:
+ with open(filename) as f:
+ contents = f.read()
+ except EnvironmentError:
+ raise NotThisMethod("unable to read _version.py")
+ mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
+ contents, re.M | re.S)
+ if not mo:
+ raise NotThisMethod("no version_json in _version.py")
+ return json.loads(mo.group(1))
+
+
+def write_to_version_file(filename, versions):
+ os.unlink(filename)
+ contents = json.dumps(versions, sort_keys=True,
+ indent=1, separators=(",", ": "))
+ with open(filename, "w") as f:
+ f.write(SHORT_VERSION_PY % contents)
+
+ print("set %s to '%s'" % (filename, versions["version"]))
+
+
+def plus_or_dot(pieces):
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ # now build up version string, with post-release "local version
+ # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ # exceptions:
+ # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ # TAG[.post.devDISTANCE] . No -dirty
+
+ # exceptions:
+ # 1: no tags. 0.post.devDISTANCE
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%d" % pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%d" % pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
+ # .dev0 sorts backwards (a dirty tree will appear "older" than the
+ # corresponding clean one), but you shouldn't be releasing software with
+ # -dirty anyways.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%s" % pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%s" % pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
+ # --always'
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
+ # --always -long'. The distance/hash is unconditional.
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"]}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%s'" % style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None}
+
+
+class VersioneerBadRootError(Exception):
+ pass
+
+
+def get_versions(verbose=False):
+ # returns dict with two keys: 'version' and 'full'
+
+ if "versioneer" in sys.modules:
+ # see the discussion in cmdclass.py:get_cmdclass()
+ del sys.modules["versioneer"]
+
+ root = get_root()
+ cfg = get_config_from_root(root)
+
+ assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
+ handlers = HANDLERS.get(cfg.VCS)
+ assert handlers, "unrecognized VCS '%s'" % cfg.VCS
+ verbose = verbose or cfg.verbose
+ assert cfg.versionfile_source is not None, \
+ "please set versioneer.versionfile_source"
+ assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
+
+ versionfile_abs = os.path.join(root, cfg.versionfile_source)
+
+ # extract version from first of: _version.py, VCS command (e.g. 'git
+ # describe'), parentdir. This is meant to work for developers using a
+ # source checkout, for users of a tarball created by 'setup.py sdist',
+ # and for users of a tarball/zipball created by 'git archive' or github's
+ # download-from-tag feature or the equivalent in other VCSes.
+
+ get_keywords_f = handlers.get("get_keywords")
+ from_keywords_f = handlers.get("keywords")
+ if get_keywords_f and from_keywords_f:
+ try:
+ keywords = get_keywords_f(versionfile_abs)
+ ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
+ if verbose:
+ print("got version from expanded keyword %s" % ver)
+ return ver
+ except NotThisMethod:
+ pass
+
+ try:
+ ver = versions_from_file(versionfile_abs)
+ if verbose:
+ print("got version from file %s %s" % (versionfile_abs, ver))
+ return ver
+ except NotThisMethod:
+ pass
+
+ from_vcs_f = handlers.get("pieces_from_vcs")
+ if from_vcs_f:
+ try:
+ pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
+ ver = render(pieces, cfg.style)
+ if verbose:
+ print("got version from VCS %s" % ver)
+ return ver
+ except NotThisMethod:
+ pass
+
+ try:
+ if cfg.parentdir_prefix:
+ ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ if verbose:
+ print("got version from parentdir %s" % ver)
+ return ver
+ except NotThisMethod:
+ pass
+
+ if verbose:
+ print("unable to compute version")
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None, "error": "unable to compute version"}
+
+
+def get_version():
+ return get_versions()["version"]
+
+
+def get_cmdclass():
+ if "versioneer" in sys.modules:
+ del sys.modules["versioneer"]
+ # this fixes the "python setup.py develop" case (also 'install' and
+ # 'easy_install .'), in which subdependencies of the main project are
+ # built (using setup.py bdist_egg) in the same python process. Assume
+ # a main project A and a dependency B, which use different versions
+ # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
+ # sys.modules by the time B's setup.py is executed, causing B to run
+ # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
+ # sandbox that restores sys.modules to it's pre-build state, so the
+ # parent is protected against the child's "import versioneer". By
+ # removing ourselves from sys.modules here, before the child build
+ # happens, we protect the child from the parent's versioneer too.
+ # Also see https://github.com/warner/python-versioneer/issues/52
+
+ cmds = {}
+
+ # we add "version" to both distutils and setuptools
+ from distutils.core import Command
+
+ class cmd_version(Command):
+ description = "report generated version string"
+ user_options = []
+ boolean_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ vers = get_versions(verbose=True)
+ print("Version: %s" % vers["version"])
+ print(" full-revisionid: %s" % vers.get("full-revisionid"))
+ print(" dirty: %s" % vers.get("dirty"))
+ if vers["error"]:
+ print(" error: %s" % vers["error"])
+ cmds["version"] = cmd_version
+
+ # we override "build_py" in both distutils and setuptools
+ #
+ # most invocation pathways end up running build_py:
+ # distutils/build -> build_py
+ # distutils/install -> distutils/build ->..
+ # setuptools/bdist_wheel -> distutils/install ->..
+ # setuptools/bdist_egg -> distutils/install_lib -> build_py
+ # setuptools/install -> bdist_egg ->..
+ # setuptools/develop -> ?
+
+ from distutils.command.build_py import build_py as _build_py
+
+ class cmd_build_py(_build_py):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ _build_py.run(self)
+ # now locate _version.py in the new build/ directory and replace
+ # it with an updated value
+ if cfg.versionfile_build:
+ target_versionfile = os.path.join(self.build_lib,
+ cfg.versionfile_build)
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+ cmds["build_py"] = cmd_build_py
+
+ if "cx_Freeze" in sys.modules: # cx_freeze enabled?
+ from cx_Freeze.dist import build_exe as _build_exe
+
+ class cmd_build_exe(_build_exe):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ target_versionfile = cfg.versionfile_source
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+
+ _build_exe.run(self)
+ os.unlink(target_versionfile)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(LONG %
+ {"DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ })
+ cmds["build_exe"] = cmd_build_exe
+ del cmds["build_py"]
+
+ # we override different "sdist" commands for both environments
+ if "setuptools" in sys.modules:
+ from setuptools.command.sdist import sdist as _sdist
+ else:
+ from distutils.command.sdist import sdist as _sdist
+
+ class cmd_sdist(_sdist):
+ def run(self):
+ versions = get_versions()
+ self._versioneer_generated_versions = versions
+ # unless we update this, the command will keep using the old
+ # version
+ self.distribution.metadata.version = versions["version"]
+ return _sdist.run(self)
+
+ def make_release_tree(self, base_dir, files):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ _sdist.make_release_tree(self, base_dir, files)
+ # now locate _version.py in the new base_dir directory
+ # (remembering that it may be a hardlink) and replace it with an
+ # updated value
+ target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile,
+ self._versioneer_generated_versions)
+ cmds["sdist"] = cmd_sdist
+
+ return cmds
+
+
+CONFIG_ERROR = """
+setup.cfg is missing the necessary Versioneer configuration. You need
+a section like:
+
+ [versioneer]
+ VCS = git
+ style = pep440
+ versionfile_source = src/myproject/_version.py
+ versionfile_build = myproject/_version.py
+ tag_prefix = ""
+ parentdir_prefix = myproject-
+
+You will also need to edit your setup.py to use the results:
+
+ import versioneer
+ setup(version=versioneer.get_version(),
+ cmdclass=versioneer.get_cmdclass(), ...)
+
+Please read the docstring in ./versioneer.py for configuration instructions,
+edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
+"""
+
+SAMPLE_CONFIG = """
+# See the docstring in versioneer.py for instructions. Note that you must
+# re-run 'versioneer.py setup' after changing this section, and commit the
+# resulting files.
+
+[versioneer]
+#VCS = git
+#style = pep440
+#versionfile_source =
+#versionfile_build =
+#tag_prefix =
+#parentdir_prefix =
+
+"""
+
+INIT_PY_SNIPPET = """
+from ._version import get_versions
+__version__ = get_versions()['version']
+del get_versions
+"""
+
+
+def do_setup():
+ root = get_root()
+ try:
+ cfg = get_config_from_root(root)
+ except (EnvironmentError, configparser.NoSectionError,
+ configparser.NoOptionError) as e:
+ if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
+ print("Adding sample versioneer config to setup.cfg",
+ file=sys.stderr)
+ with open(os.path.join(root, "setup.cfg"), "a") as f:
+ f.write(SAMPLE_CONFIG)
+ print(CONFIG_ERROR, file=sys.stderr)
+ return 1
+
+ print(" creating %s" % cfg.versionfile_source)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(LONG % {"DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ })
+
+ ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
+ "__init__.py")
+ if os.path.exists(ipy):
+ try:
+ with open(ipy, "r") as f:
+ old = f.read()
+ except EnvironmentError:
+ old = ""
+ if INIT_PY_SNIPPET not in old:
+ print(" appending to %s" % ipy)
+ with open(ipy, "a") as f:
+ f.write(INIT_PY_SNIPPET)
+ else:
+ print(" %s unmodified" % ipy)
+ else:
+ print(" %s doesn't exist, ok" % ipy)
+ ipy = None
+
+ # Make sure both the top-level "versioneer.py" and versionfile_source
+ # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
+ # they'll be copied into source distributions. Pip won't be able to
+ # install the package without this.
+ manifest_in = os.path.join(root, "MANIFEST.in")
+ simple_includes = set()
+ try:
+ with open(manifest_in, "r") as f:
+ for line in f:
+ if line.startswith("include "):
+ for include in line.split()[1:]:
+ simple_includes.add(include)
+ except EnvironmentError:
+ pass
+ # That doesn't cover everything MANIFEST.in can do
+ # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
+ # it might give some false negatives. Appending redundant 'include'
+ # lines is safe, though.
+ if "versioneer.py" not in simple_includes:
+ print(" appending 'versioneer.py' to MANIFEST.in")
+ with open(manifest_in, "a") as f:
+ f.write("include versioneer.py\n")
+ else:
+ print(" 'versioneer.py' already in MANIFEST.in")
+ if cfg.versionfile_source not in simple_includes:
+ print(" appending versionfile_source ('%s') to MANIFEST.in" %
+ cfg.versionfile_source)
+ with open(manifest_in, "a") as f:
+ f.write("include %s\n" % cfg.versionfile_source)
+ else:
+ print(" versionfile_source already in MANIFEST.in")
+
+ # Make VCS-specific changes. For git, this means creating/changing
+ # .gitattributes to mark _version.py for export-time keyword
+ # substitution.
+ do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
+ return 0
+
+
+def scan_setup_py():
+ found = set()
+ setters = False
+ errors = 0
+ with open("setup.py", "r") as f:
+ for line in f.readlines():
+ if "import versioneer" in line:
+ found.add("import")
+ if "versioneer.get_cmdclass()" in line:
+ found.add("cmdclass")
+ if "versioneer.get_version()" in line:
+ found.add("get_version")
+ if "versioneer.VCS" in line:
+ setters = True
+ if "versioneer.versionfile_source" in line:
+ setters = True
+ if len(found) != 3:
+ print("")
+ print("Your setup.py appears to be missing some important items")
+ print("(but I might be wrong). Please make sure it has something")
+ print("roughly like the following:")
+ print("")
+ print(" import versioneer")
+ print(" setup( version=versioneer.get_version(),")
+ print(" cmdclass=versioneer.get_cmdclass(), ...)")
+ print("")
+ errors += 1
+ if setters:
+ print("You should remove lines like 'versioneer.VCS = ' and")
+ print("'versioneer.versionfile_source = ' . This configuration")
+ print("now lives in setup.cfg, and should be removed from setup.py")
+ print("")
+ errors += 1
+ return errors
+
+if __name__ == "__main__":
+ cmd = sys.argv[1]
+ if cmd == "setup":
+ errors = do_setup()
+ errors += scan_setup_py()
+ if errors:
+ sys.exit(1)
| - use versioneer, for PEP440 version strings, closes #9518
- use binstar build
- OSX stack testing, closes #7127
This removes all of the original manual-ish logic to create/manage the versioning in `setup.py`, and replaces with `versioneer.py` (bundled inline) to handle correct versioning for release, development, and conda builds.
This actually was tricky simply because of the way the tags are setup, they are like: `v0.16.2` which is pretty non-standard, e.g. most would simply have `0.16.0`. This messes with comparision using `LooseVersion` (of course you _can_ replace this tag as versioneer does, its called the `tag_prefix`, but his makes other things require a slightly different method).
So bottom line we get PEP compliant versions like this:
```
In [1]: pd.__version__
Out[1]: '0.16.2+10.gc01927b'
```
IOW, 10 commits past the last tag and the git hash.
Now also able to build on `anaconda.org` for all platforms (`linux-32,linux-64,OSX,win-32,win-64`). This part is not 100% done, but quite close. The intention is to have testing and artifact building (well at least for say 2.7/3.4) for linux,osx,win that can be directly `conda install pandas -c pandas` to get the dev version.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10370 | 2015-06-17T01:11:24Z | 2015-07-16T00:08:25Z | 2015-07-16T00:08:25Z | 2015-07-16T11:44:37Z |
BUG: drop_duplicates drops name(s). | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index fc2e6b1cb936f..d4c49b82ed2e4 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -63,7 +63,7 @@ Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
-
+- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index fad71c94cc417..dc2da1177330e 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2573,14 +2573,12 @@ def drop(self, labels, errors='raise'):
@Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, take_last=False):
- result = super(Index, self).drop_duplicates(take_last=take_last)
- return self._constructor(result)
+ return super(Index, self).drop_duplicates(take_last=take_last)
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
return super(Index, self).duplicated(take_last=take_last)
-
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index f422c3b49b691..7dab82ea2b63d 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -207,10 +207,19 @@ def test_duplicates(self):
if not len(ind):
continue
+ if isinstance(ind, MultiIndex):
+ continue
idx = self._holder([ind[0]]*5)
self.assertFalse(idx.is_unique)
self.assertTrue(idx.has_duplicates)
+ # GH 10115
+ # preserve names
+ idx.name = 'foo'
+ result = idx.drop_duplicates()
+ self.assertEqual(result.name, 'foo')
+ self.assert_index_equal(result, Index([ind[0]],name='foo'))
+
def test_sort(self):
for ind in self.indices.values():
self.assertRaises(TypeError, ind.sort)
@@ -1830,10 +1839,13 @@ def test_reindexing(self):
def test_duplicates(self):
- idx = CategoricalIndex([0, 0, 0])
+ idx = CategoricalIndex([0, 0, 0], name='foo')
self.assertFalse(idx.is_unique)
self.assertTrue(idx.has_duplicates)
+ expected = CategoricalIndex([0], name='foo')
+ self.assert_index_equal(idx.drop_duplicates(), expected)
+
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'),categories=list('edabc'))
@@ -4603,6 +4615,19 @@ def check(nlevels, with_nulls):
self.assert_array_equal(mi.duplicated(),
np.zeros(len(mi), dtype='bool'))
+ def test_duplicate_meta_data(self):
+ # GH 10115
+ index = MultiIndex(levels=[[0, 1], [0, 1, 2]],
+ labels=[[0, 0, 0, 0, 1, 1, 1],
+ [0, 1, 2, 0, 0, 1, 2]])
+ for idx in [index,
+ index.set_names([None, None]),
+ index.set_names([None, 'Num']),
+ index.set_names(['Upper','Num']),
+ ]:
+ self.assertTrue(idx.has_duplicates)
+ self.assertEqual(idx.drop_duplicates().names, idx.names)
+
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index fc432d5236f62..dc0bc14ce1ea6 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -330,6 +330,20 @@ def test_getitem(self):
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
+ def test_drop_duplicates_metadata(self):
+ #GH 10115
+ idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+ result = idx.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertEqual(idx.freq, result.freq)
+
+ idx_dup = idx.append(idx)
+ self.assertIsNone(idx_dup.freq) # freq is reset
+ result = idx_dup.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertIsNone(result.freq)
+
+
class TestTimedeltaIndexOps(Ops):
def setUp(self):
@@ -802,6 +816,20 @@ def test_getitem(self):
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
+ def test_drop_duplicates_metadata(self):
+ #GH 10115
+ idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
+ result = idx.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertEqual(idx.freq, result.freq)
+
+ idx_dup = idx.append(idx)
+ self.assertIsNone(idx_dup.freq) # freq is reset
+ result = idx_dup.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertIsNone(result.freq)
+
+
class TestPeriodIndexOps(Ops):
def setUp(self):
@@ -1228,6 +1256,18 @@ def test_value_counts_unique(self):
tm.assert_index_equal(idx.unique(), exp_idx)
+ def test_drop_duplicates_metadata(self):
+ #GH 10115
+ idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+ result = idx.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertEqual(idx.freq, result.freq)
+
+ idx_dup = idx.append(idx) # freq will not be reset
+ result = idx_dup.drop_duplicates()
+ self.assert_index_equal(idx, result)
+ self.assertEqual(idx.freq, result.freq)
+
if __name__ == '__main__':
import nose
| Closes #10115. Closes #10116.
Based on @seth-p and @jreback 's fix, added tets for datetime-likes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10367 | 2015-06-16T21:58:38Z | 2015-06-23T14:11:23Z | 2015-06-23T14:11:22Z | 2015-06-23T14:12:49Z |
DOC: consistent imports (GH9886) part III | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 9221f2685d79b..5a62e7dccea34 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -1,18 +1,23 @@
.. currentmodule:: pandas
-.. _dsintro:
-
.. ipython:: python
:suppress:
import numpy as np
- from pandas import *
- randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
- set_option('display.precision', 4, 'display.max_columns', 8)
- options.display.max_rows=15
import pandas as pd
+ pd.set_option('display.precision', 4, 'display.max_columns', 8)
+ pd.options.display.max_rows = 15
+
+ import matplotlib
+ try:
+ matplotlib.style.use('ggplot')
+ except AttributeError:
+ pd.options.display.mpl_style = 'default'
+ import matplotlib.pyplot as plt
+ plt.close('all')
+.. _dsintro:
************************
Intro to Data Structures
@@ -26,9 +31,7 @@ objects. To get started, import numpy and load pandas into your namespace:
.. ipython:: python
import numpy as np
- # will use a lot in examples
- randn = np.random.randn
- from pandas import *
+ import pandas as pd
Here is a basic tenet to keep in mind: **data alignment is intrinsic**. The link
between labels and data will not be broken unless done so explicitly by you.
@@ -36,13 +39,6 @@ between labels and data will not be broken unless done so explicitly by you.
We'll give a brief intro to the data structures, then consider all of the broad
categories of functionality and methods in separate sections.
-When using pandas, we recommend the following import convention:
-
-.. code-block:: python
-
- import pandas as pd
-
-
.. _basics.series:
Series
@@ -60,7 +56,7 @@ labels are collectively referred to as the **index**. The basic method to create
::
- >>> s = Series(data, index=index)
+ >>> s = pd.Series(data, index=index)
Here, ``data`` can be many different things:
@@ -78,11 +74,11 @@ index is passed, one will be created having values ``[0, ..., len(data) - 1]``.
.. ipython:: python
- s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e'])
+ s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
s
s.index
- Series(randn(5))
+ pd.Series(np.random.randn(5))
.. note::
@@ -101,8 +97,8 @@ constructed from the sorted keys of the dict, if possible.
.. ipython:: python
d = {'a' : 0., 'b' : 1., 'c' : 2.}
- Series(d)
- Series(d, index=['b', 'c', 'd', 'a'])
+ pd.Series(d)
+ pd.Series(d, index=['b', 'c', 'd', 'a'])
.. note::
@@ -113,7 +109,7 @@ provided. The value will be repeated to match the length of **index**
.. ipython:: python
- Series(5., index=['a', 'b', 'c', 'd', 'e'])
+ pd.Series(5., index=['a', 'b', 'c', 'd', 'e'])
Series is ndarray-like
~~~~~~~~~~~~~~~~~~~~~~
@@ -211,7 +207,7 @@ Series can also have a ``name`` attribute:
.. ipython:: python
- s = Series(np.random.randn(5), name='something')
+ s = pd.Series(np.random.randn(5), name='something')
s
s.name
@@ -254,13 +250,13 @@ keys.
.. ipython:: python
- d = {'one' : Series([1., 2., 3.], index=['a', 'b', 'c']),
- 'two' : Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
- df = DataFrame(d)
+ d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
+ 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
+ df = pd.DataFrame(d)
df
- DataFrame(d, index=['d', 'b', 'a'])
- DataFrame(d, index=['d', 'b', 'a'], columns=['two', 'three'])
+ pd.DataFrame(d, index=['d', 'b', 'a'])
+ pd.DataFrame(d, index=['d', 'b', 'a'], columns=['two', 'three'])
The row and column labels can be accessed respectively by accessing the
**index** and **columns** attributes:
@@ -286,8 +282,8 @@ result will be ``range(n)``, where ``n`` is the array length.
d = {'one' : [1., 2., 3., 4.],
'two' : [4., 3., 2., 1.]}
- DataFrame(d)
- DataFrame(d, index=['a', 'b', 'c', 'd'])
+ pd.DataFrame(d)
+ pd.DataFrame(d, index=['a', 'b', 'c', 'd'])
From structured or record array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -296,12 +292,12 @@ This case is handled identically to a dict of arrays.
.. ipython:: python
- data = np.zeros((2,),dtype=[('A', 'i4'),('B', 'f4'),('C', 'a10')])
- data[:] = [(1,2.,'Hello'),(2,3.,"World")]
+ data = np.zeros((2,), dtype=[('A', 'i4'),('B', 'f4'),('C', 'a10')])
+ data[:] = [(1,2.,'Hello'), (2,3.,"World")]
- DataFrame(data)
- DataFrame(data, index=['first', 'second'])
- DataFrame(data, columns=['C', 'A', 'B'])
+ pd.DataFrame(data)
+ pd.DataFrame(data, index=['first', 'second'])
+ pd.DataFrame(data, columns=['C', 'A', 'B'])
.. note::
@@ -316,9 +312,9 @@ From a list of dicts
.. ipython:: python
data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
- DataFrame(data2)
- DataFrame(data2, index=['first', 'second'])
- DataFrame(data2, columns=['a', 'b'])
+ pd.DataFrame(data2)
+ pd.DataFrame(data2, index=['first', 'second'])
+ pd.DataFrame(data2, columns=['a', 'b'])
.. _basics.dataframe.from_dict_of_tuples:
@@ -329,11 +325,11 @@ You can automatically create a multi-indexed frame by passing a tuples dictionar
.. ipython:: python
- DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
- ('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
- ('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
- ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
- ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
+ pd.DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
+ ('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
+ ('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
+ ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
+ ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
.. _basics.dataframe.from_series:
@@ -376,7 +372,7 @@ For example:
.. ipython:: python
data
- DataFrame.from_records(data, index='C')
+ pd.DataFrame.from_records(data, index='C')
.. _basics.dataframe.from_items:
@@ -391,15 +387,15 @@ of columns:
.. ipython:: python
- DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
+ pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
If you pass ``orient='index'``, the keys will be the row labels. But in this
case you must also pass the desired column names:
.. ipython:: python
- DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
- orient='index', columns=['one', 'two', 'three'])
+ pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
+ orient='index', columns=['one', 'two', 'three'])
Column selection, addition, deletion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -465,7 +461,7 @@ derived from existing columns.
.. ipython:: python
- iris = read_csv('data/iris.data')
+ iris = pd.read_csv('data/iris.data')
iris.head()
(iris.assign(sepal_ratio = iris['SepalWidth'] / iris['SepalLength'])
@@ -564,8 +560,8 @@ union of the column and row labels.
.. ipython:: python
- df = DataFrame(randn(10, 4), columns=['A', 'B', 'C', 'D'])
- df2 = DataFrame(randn(7, 3), columns=['A', 'B', 'C'])
+ df = pd.DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
+ df2 = pd.DataFrame(np.random.randn(7, 3), columns=['A', 'B', 'C'])
df + df2
When doing an operation between DataFrame and Series, the default behavior is
@@ -583,8 +579,8 @@ also contains dates, the broadcasting will be column-wise:
.. ipython:: python
:okwarning:
- index = date_range('1/1/2000', periods=8)
- df = DataFrame(randn(8, 3), index=index, columns=list('ABC'))
+ index = pd.date_range('1/1/2000', periods=8)
+ df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=list('ABC'))
df
type(df['A'])
df - df['A']
@@ -619,8 +615,8 @@ Boolean operators work as well:
.. ipython:: python
- df1 = DataFrame({'a' : [1, 0, 1], 'b' : [0, 1, 1] }, dtype=bool)
- df2 = DataFrame({'a' : [0, 1, 1], 'b' : [1, 1, 0] }, dtype=bool)
+ df1 = pd.DataFrame({'a' : [1, 0, 1], 'b' : [0, 1, 1] }, dtype=bool)
+ df2 = pd.DataFrame({'a' : [0, 1, 1], 'b' : [1, 1, 0] }, dtype=bool)
df1 & df2
df1 | df2
df1 ^ df2
@@ -660,7 +656,7 @@ Similarly, the dot method on Series implements dot product:
.. ipython:: python
- s1 = Series(np.arange(5,10))
+ s1 = pd.Series(np.arange(5,10))
s1.dot(s1)
DataFrame is not intended to be a drop-in replacement for ndarray as its
@@ -682,7 +678,7 @@ R package):
.. ipython:: python
- baseball = read_csv('data/baseball.csv')
+ baseball = pd.read_csv('data/baseball.csv')
print(baseball)
baseball.info()
@@ -704,21 +700,21 @@ default:
.. ipython:: python
- DataFrame(randn(3, 12))
+ pd.DataFrame(np.random.randn(3, 12))
You can change how much to print on a single row by setting the ``display.width``
option:
.. ipython:: python
- set_option('display.width', 40) # default is 80
+ pd.set_option('display.width', 40) # default is 80
- DataFrame(randn(3, 12))
+ pd.DataFrame(np.random.randn(3, 12))
.. ipython:: python
:suppress:
- reset_option('display.width')
+ pd.reset_option('display.width')
You can also disable this feature via the ``expand_frame_repr`` option.
This will print the table in one block.
@@ -731,8 +727,8 @@ accessed like attributes:
.. ipython:: python
- df = DataFrame({'foo1' : np.random.randn(5),
- 'foo2' : np.random.randn(5)})
+ df = pd.DataFrame({'foo1' : np.random.randn(5),
+ 'foo2' : np.random.randn(5)})
df
df.foo1
@@ -770,9 +766,9 @@ From 3D ndarray with optional axis labels
.. ipython:: python
- wp = Panel(randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
+ wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
wp
@@ -781,9 +777,9 @@ From dict of DataFrame objects
.. ipython:: python
- data = {'Item1' : DataFrame(randn(4, 3)),
- 'Item2' : DataFrame(randn(4, 2))}
- Panel(data)
+ data = {'Item1' : pd.DataFrame(np.random.randn(4, 3)),
+ 'Item2' : pd.DataFrame(np.random.randn(4, 2))}
+ pd.Panel(data)
Note that the values in the dict need only be **convertible to
DataFrame**. Thus, they can be any of the other valid inputs to DataFrame as
@@ -803,7 +799,7 @@ For example, compare to the construction above:
.. ipython:: python
- Panel.from_dict(data, orient='minor')
+ pd.Panel.from_dict(data, orient='minor')
Orient is especially useful for mixed-type DataFrames. If you pass a dict of
DataFrame objects with mixed-type columns, all of the data will get upcasted to
@@ -811,11 +807,11 @@ DataFrame objects with mixed-type columns, all of the data will get upcasted to
.. ipython:: python
- df = DataFrame({'a': ['foo', 'bar', 'baz'],
- 'b': np.random.randn(3)})
+ df = pd.DataFrame({'a': ['foo', 'bar', 'baz'],
+ 'b': np.random.randn(3)})
df
data = {'item1': df, 'item2': df}
- panel = Panel.from_dict(data, orient='minor')
+ panel = pd.Panel.from_dict(data, orient='minor')
panel['a']
panel['b']
panel['b'].dtypes
@@ -838,8 +834,8 @@ a DataFrame with a two-level index to a Panel.
.. ipython:: python
- midx = MultiIndex(levels=[['one', 'two'], ['x','y']], labels=[[1,1,0,0],[1,0,1,0]])
- df = DataFrame({'A' : [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=midx)
+ midx = pd.MultiIndex(levels=[['one', 'two'], ['x','y']], labels=[[1,1,0,0],[1,0,1,0]])
+ df = pd.DataFrame({'A' : [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=midx)
df.to_panel()
.. _dsintro.panel_item_selection:
@@ -897,7 +893,7 @@ Another way to change the dimensionality of an object is to ``squeeze`` a 1-len
.. ipython:: python
wp.reindex(items=['Item1']).squeeze()
- wp.reindex(items=['Item1'],minor=['B']).squeeze()
+ wp.reindex(items=['Item1'], minor=['B']).squeeze()
Conversion to DataFrame
@@ -910,9 +906,9 @@ method:
.. ipython:: python
- panel = Panel(np.random.randn(3, 5, 4), items=['one', 'two', 'three'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['a', 'b', 'c', 'd'])
+ panel = pd.Panel(np.random.randn(3, 5, 4), items=['one', 'two', 'three'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['a', 'b', 'c', 'd'])
panel.to_frame()
@@ -931,7 +927,6 @@ containers.
DataFrames
- **minor_axis**: axis 3, it is the **columns** of each of the DataFrames
-
``Panel4D`` is a sub-class of ``Panel``, so most methods that work on Panels are
applicable to Panel4D. The following methods are disabled:
@@ -944,11 +939,11 @@ From 4D ndarray with optional axis labels
.. ipython:: python
- p4d = Panel4D(randn(2, 2, 5, 4),
- labels=['Label1','Label2'],
- items=['Item1', 'Item2'],
- major_axis=date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
+ p4d = pd.Panel4D(np.random.randn(2, 2, 5, 4),
+ labels=['Label1','Label2'],
+ items=['Item1', 'Item2'],
+ major_axis=pd.date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
p4d
@@ -957,9 +952,9 @@ From dict of Panel objects
.. ipython:: python
- data = { 'Label1' : Panel({ 'Item1' : DataFrame(randn(4, 3)) }),
- 'Label2' : Panel({ 'Item2' : DataFrame(randn(4, 2)) }) }
- Panel4D(data)
+ data = { 'Label1' : pd.Panel({ 'Item1' : pd.DataFrame(np.random.randn(4, 3)) }),
+ 'Label2' : pd.Panel({ 'Item2' : pd.DataFrame(np.random.randn(4, 2)) }) }
+ pd.Panel4D(data)
Note that the values in the dict need only be **convertible to Panels**.
Thus, they can be any of the other valid inputs to Panel as per above.
@@ -1022,7 +1017,7 @@ Here we slice to a Panel4D.
orders = [ 'cool', 'labels','items','major_axis','minor_axis'],
slices = { 'labels' : 'labels', 'items' : 'items',
'major_axis' : 'major_axis', 'minor_axis' : 'minor_axis' },
- slicer = Panel4D,
+ slicer = pd.Panel4D,
aliases = { 'major' : 'major_axis', 'minor' : 'minor_axis' },
stat_axis = 2)
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 517c91c93d821..4ada4d4bbdfe5 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -5,17 +5,14 @@
.. ipython:: python
:suppress:
- import os
- import csv
- from pandas import DataFrame, Series
- import pandas as pd
- pd.options.display.max_rows=15
-
import numpy as np
np.random.seed(123456)
- randn = np.random.randn
- randint = np.random.randint
np.set_printoptions(precision=4, suppress=True)
+ import pandas as pd
+ pd.options.display.max_rows=15
+
+ import os
+ import csv
*********************
@@ -49,7 +46,10 @@ We have a DataFrame to which we want to apply a function row-wise.
.. ipython:: python
- df = DataFrame({'a': randn(1000), 'b': randn(1000),'N': randint(100, 1000, (1000)), 'x': 'x'})
+ df = pd.DataFrame({'a': np.random.randn(1000),
+ 'b': np.random.randn(1000),
+ 'N': np.random.randint(100, 1000, (1000)),
+ 'x': 'x'})
df
Here's the function in pure python:
@@ -94,7 +94,8 @@ hence we'll concentrate our efforts cythonizing these two functions.
Plain cython
~~~~~~~~~~~~
-First we're going to need to import the cython magic function to ipython:
+First we're going to need to import the cython magic function to ipython (for
+cython versions >=0.21 you can use ``%load_ext Cython``):
.. ipython:: python
@@ -335,7 +336,7 @@ We simply take the plain python code from above and annotate with the ``@jit`` d
def compute_numba(df):
result = apply_integrate_f_numba(df['a'].values, df['b'].values, df['N'].values)
- return Series(result, index=df.index, name='result')
+ return pd.Series(result, index=df.index, name='result')
Similar to above, we directly pass ``numpy`` arrays directly to the numba function. Further
we are wrapping the results to provide a nice interface by passing/returning pandas objects.
@@ -433,18 +434,13 @@ First let's create a few decent-sized arrays to play with:
.. ipython:: python
- import pandas as pd
- from pandas import DataFrame, Series
- from numpy.random import randn
- import numpy as np
nrows, ncols = 20000, 100
- df1, df2, df3, df4 = [DataFrame(randn(nrows, ncols)) for _ in range(4)]
+ df1, df2, df3, df4 = [pd.DataFrame(np.random.randn(nrows, ncols)) for _ in range(4)]
Now let's compare adding them together using plain ol' Python versus
:func:`~pandas.eval`:
-
.. ipython:: python
%timeit df1 + df2 + df3 + df4
@@ -467,10 +463,9 @@ Now let's do the same thing but with comparisons:
:func:`~pandas.eval` also works with unaligned pandas objects:
-
.. ipython:: python
- s = Series(randn(50))
+ s = pd.Series(np.random.randn(50))
%timeit df1 + df2 + df3 + df4 + s
.. ipython:: python
@@ -515,7 +510,7 @@ evaluate an expression in the "context" of a :class:`~pandas.DataFrame`.
.. ipython:: python
- df = DataFrame(randn(5, 2), columns=['a', 'b'])
+ df = pd.DataFrame(np.random.randn(5, 2), columns=['a', 'b'])
df.eval('a + b')
Any expression that is a valid :func:`pandas.eval` expression is also a valid
@@ -530,7 +525,7 @@ it must be a valid Python identifier.
.. ipython:: python
- df = DataFrame(dict(a=range(5), b=range(5, 10)))
+ df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
df.eval('c = a + b')
df.eval('d = a + b + c')
df.eval('a = 1')
@@ -540,7 +535,7 @@ The equivalent in standard Python would be
.. ipython:: python
- df = DataFrame(dict(a=range(5), b=range(5, 10)))
+ df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
df['c'] = df.a + df.b
df['d'] = df.a + df.b + df.c
df['a'] = 1
@@ -555,8 +550,8 @@ For example,
.. code-block:: python
- df = DataFrame(randn(5, 2), columns=['a', 'b'])
- newcol = randn(len(df))
+ df = pd.DataFrame(np.random.randn(5, 2), columns=['a', 'b'])
+ newcol = np.random.randn(len(df))
df.eval('b + newcol')
UndefinedVariableError: name 'newcol' is not defined
@@ -567,8 +562,8 @@ expression by placing the ``@`` character in front of the name. For example,
.. ipython:: python
- df = DataFrame(randn(5, 2), columns=list('ab'))
- newcol = randn(len(df))
+ df = pd.DataFrame(np.random.randn(5, 2), columns=list('ab'))
+ newcol = np.random.randn(len(df))
df.eval('b + @newcol')
df.query('b < @newcol')
@@ -582,7 +577,7 @@ name in an expression.
.. ipython:: python
- a = randn()
+ a = np.random.randn()
df.query('@a < a')
df.loc[a < df.a] # same as the previous expression
@@ -710,8 +705,8 @@ you have an expression--for example
.. ipython:: python
- df = DataFrame({'strings': np.repeat(list('cba'), 3),
- 'nums': np.repeat(range(3), 3)})
+ df = pd.DataFrame({'strings': np.repeat(list('cba'), 3),
+ 'nums': np.repeat(range(3), 3)})
df
df.query('strings == "a" and nums == 1')
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 32290839ad71d..0ac33db8495c8 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -8,26 +8,18 @@ Frequently Asked Questions (FAQ)
.. ipython:: python
:suppress:
- from datetime import datetime
import numpy as np
np.random.seed(123456)
- from pandas import *
- options.display.max_rows=15
- randn = np.random.randn
- randint = np.random.randint
np.set_printoptions(precision=4, suppress=True)
- from dateutil.relativedelta import relativedelta
- from pandas.tseries.api import *
- from pandas.tseries.offsets import *
- import matplotlib.pyplot as plt
- plt.close('all')
+ import pandas as pd
+ pd.options.display.max_rows = 15
import matplotlib
try:
matplotlib.style.use('ggplot')
except AttributeError:
- options.display.mpl_style = 'default'
- from pandas.compat import lrange
-
+ pd.options.display.mpl_style = 'default'
+ import matplotlib.pyplot as plt
+ plt.close('all')
.. _df-memory-usage:
@@ -45,11 +37,11 @@ when calling ``df.info()``:
.. ipython:: python
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
- 'complex128', 'object', 'bool']
+ 'complex128', 'object', 'bool']
n = 5000
data = dict([ (t, np.random.randint(100, size=n).astype(t))
for t in dtypes])
- df = DataFrame(data)
+ df = pd.DataFrame(data)
df['categorical'] = df['object'].astype('category')
df.info()
@@ -126,14 +118,14 @@ pandas ``Period`` and ``PeriodIndex``:
.. ipython:: python
- pnow('D') # scikits.timeseries.now()
- Period(year=2007, month=3, day=15, freq='D')
- p = Period('1984Q3')
+ pd.pnow('D') # scikits.timeseries.now()
+ pd.Period(year=2007, month=3, day=15, freq='D')
+ p = pd.Period('1984Q3')
p
p.asfreq('D', 'start')
p.asfreq('D', 'end')
(p + 3).asfreq('T') + 6 * 60 + 30
- rng = period_range('1990', '2010', freq='A')
+ rng = pd.period_range('1990', '2010', freq='A')
rng
rng.asfreq('B', 'end') - 3
@@ -173,8 +165,8 @@ works on panels (3D). Here is some code that resamples daily data to montly:
.. ipython:: python
- rng = period_range('Jan-2000', periods=50, freq='M')
- data = Series(np.random.randn(50), index=rng)
+ rng = pd.period_range('Jan-2000', periods=50, freq='M')
+ data = pd.Series(np.random.randn(50), index=rng)
data
data.resample('A', how=np.mean)
@@ -186,11 +178,11 @@ adopted to pandas's data structures. For example:
.. ipython:: python
- rng = period_range('1987Q2', periods=10, freq='Q-DEC')
- data = Series(np.random.randn(10), index=rng)
+ rng = pd.period_range('1987Q2', periods=10, freq='Q-DEC')
+ data = pd.Series(np.random.randn(10), index=rng)
@savefig skts_ts_plot.png
- plt.figure(); data.plot()
+ data.plot()
Converting to and from period format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -215,8 +207,8 @@ with a DatetimeIndex to PeriodIndex:
.. ipython:: python
- rng = date_range('1/1/2000', periods=200, freq='D')
- data = Series(np.random.randn(200), index=rng)
+ rng = pd.date_range('1/1/2000', periods=200, freq='D')
+ data = pd.Series(np.random.randn(200), index=rng)
data[:10]
data.index
data.resample('M', kind='period')
@@ -226,8 +218,8 @@ interval (``'start'`` or ``'end'``) convention:
.. ipython:: python
- rng = period_range('Jan-2000', periods=50, freq='M')
- data = Series(np.random.randn(50), index=rng)
+ rng = pd.period_range('Jan-2000', periods=50, freq='M')
+ data = pd.Series(np.random.randn(50), index=rng)
resampled = data.resample('A', kind='timestamp', convention='end')
resampled.index
@@ -244,7 +236,7 @@ using something similar to the following:
x = np.array(list(range(10)), '>i4') # big endian
newx = x.byteswap().newbyteorder() # force native byteorder
- s = Series(newx)
+ s = pd.Series(newx)
See `the NumPy documentation on byte order
<http://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index addeddcb0bdde..cf4a86d530180 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -4,13 +4,11 @@
.. ipython:: python
:suppress:
- import os
import numpy as np
- from pandas import *
- options.display.max_rows=15
- randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
- from pandas.compat import lrange
+ import pandas as pd
+ pd.options.display.max_rows=15
+
*******************
Caveats and Gotchas
@@ -27,7 +25,7 @@ what the result of
.. code-block:: python
- >>> if Series([False, True, False]):
+ >>> if pd.Series([False, True, False]):
...
should be. Should it be ``True`` because it's not zero-length? ``False`` because there are ``False`` values?
@@ -64,10 +62,10 @@ To evaluate single-element pandas objects in a boolean context, use the method `
.. ipython:: python
- Series([True]).bool()
- Series([False]).bool()
- DataFrame([[True]]).bool()
- DataFrame([[False]]).bool()
+ pd.Series([True]).bool()
+ pd.Series([False]).bool()
+ pd.DataFrame([[True]]).bool()
+ pd.DataFrame([[False]]).bool()
Bitwise boolean
~~~~~~~~~~~~~~~
@@ -147,7 +145,7 @@ arrays. For example:
.. ipython:: python
- s = Series([1, 2, 3, 4, 5], index=list('abcde'))
+ s = pd.Series([1, 2, 3, 4, 5], index=list('abcde'))
s
s.dtype
@@ -228,9 +226,9 @@ following code will generate exceptions:
.. code-block:: python
- s = Series(range(5))
+ s = pd.Series(range(5))
s[-1]
- df = DataFrame(np.random.randn(5, 4))
+ df = pd.DataFrame(np.random.randn(5, 4))
df
df.ix[-2:]
@@ -255,7 +253,7 @@ consider the following Series:
.. ipython:: python
- s = Series(randn(6), index=list('abcdef'))
+ s = pd.Series(np.random.randn(6), index=list('abcdef'))
s
Suppose we wished to slice from ``c`` to ``e``, using integers this would be
@@ -294,8 +292,8 @@ concise means of selecting data from a pandas object:
.. ipython:: python
- df = DataFrame(randn(6, 4), columns=['one', 'two', 'three', 'four'],
- index=list('abcdef'))
+ df = pd.DataFrame(np.random.randn(6, 4), columns=['one', 'two', 'three', 'four'],
+ index=list('abcdef'))
df
df.ix[['b', 'c', 'e']]
@@ -326,7 +324,7 @@ cases where an index contains, say, both integers and strings:
.. ipython:: python
- s = Series([1, 2, 3], index=['a', 0, 1])
+ s = pd.Series([1, 2, 3], index=['a', 0, 1])
s
s.ix[[0, 1]]
s.reindex([0, 1])
@@ -345,10 +343,10 @@ The use of ``reindex_like`` can potentially change the dtype of a ``Series``.
.. ipython:: python
- series = Series([1, 2, 3])
- x = Series([True])
+ series = pd.Series([1, 2, 3])
+ x = pd.Series([True])
x.dtype
- x = Series([True]).reindex_like(series)
+ x = pd.Series([True]).reindex_like(series)
x.dtype
This is because ``reindex_like`` silently inserts ``NaNs`` and the ``dtype``
@@ -371,10 +369,10 @@ can be represented using a 64-bit integer is limited to approximately 584 years:
.. ipython:: python
- begin = Timestamp.min
+ begin = pd.Timestamp.min
begin
- end = Timestamp.max
+ end = pd.Timestamp.max
end
See :ref:`here <timeseries.oob>` for ways to represent data outside these bound.
@@ -404,10 +402,10 @@ of the new set of columns rather than the original ones:
print(open('tmp.csv').read())
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
- df = read_csv('tmp.csv', header=None,
- parse_dates=date_spec,
- keep_date_col=True,
- index_col=0)
+ df = pd.read_csv('tmp.csv', header=None,
+ parse_dates=date_spec,
+ keep_date_col=True,
+ index_col=0)
# index_col=0 refers to the combined column "nominal" and not the original
# first column of 'KORD' strings
@@ -417,6 +415,7 @@ of the new set of columns rather than the original ones:
.. ipython:: python
:suppress:
+ import os
os.remove('tmp.csv')
@@ -569,7 +568,7 @@ using something similar to the following:
x = np.array(list(range(10)), '>i4') # big endian
newx = x.byteswap().newbyteorder() # force native byteorder
- s = Series(newx)
+ s = pd.Series(newx)
See `the NumPy documentation on byte order
<http://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more
| Further work on #9886
| https://api.github.com/repos/pandas-dev/pandas/pulls/10359 | 2015-06-15T14:58:10Z | 2015-06-23T12:42:59Z | 2015-06-23T12:42:59Z | 2015-06-23T12:43:04Z |
Fixing the code sample in pandas/core/categorical.py | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index a9e5d1f3f0ebd..05f2e6516292a 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -183,6 +183,7 @@ class Categorical(PandasObject):
Categories (3, object): [a < b < c]
>>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'])
+ >>> a = a.as_ordered()
>>> a.min()
'c'
"""
| Fix for #10356
| https://api.github.com/repos/pandas-dev/pandas/pulls/10358 | 2015-06-15T12:19:49Z | 2015-08-19T21:40:10Z | null | 2015-08-19T21:40:11Z |
BUG: closes bug in apply when function returns categorical | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 164ab73def894..4a513f3122390 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -58,3 +58,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 7d83e45098ae1..4c4d940f8077c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1670,6 +1670,9 @@ def is_view(self):
def to_dense(self):
return self.values.to_dense().view()
+ def convert(self, copy=True, **kwargs):
+ return [self.copy() if copy else self]
+
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx
index add9a03642bed..09f8e0ab42924 100644
--- a/pandas/src/reduce.pyx
+++ b/pandas/src/reduce.pyx
@@ -6,6 +6,18 @@ from distutils.version import LooseVersion
is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2'
+cdef _get_result_array(object obj,
+ Py_ssize_t size,
+ Py_ssize_t cnt):
+
+ if isinstance(obj, np.ndarray) \
+ or isinstance(obj, list) and len(obj) == cnt \
+ or getattr(obj, 'shape', None) == (cnt,):
+ raise ValueError('function does not reduce')
+
+ return np.empty(size, dtype='O')
+
+
cdef class Reducer:
'''
Performs generic reduction operation on a C or Fortran-contiguous ndarray
@@ -124,7 +136,9 @@ cdef class Reducer:
if hasattr(res,'values'):
res = res.values
if i == 0:
- result = self._get_result_array(res)
+ result = _get_result_array(res,
+ self.nresults,
+ len(self.dummy))
it = <flatiter> PyArray_IterNew(result)
PyArray_SETITEM(result, PyArray_ITER_DATA(it), res)
@@ -143,17 +157,6 @@ cdef class Reducer:
return result
- def _get_result_array(self, object res):
- try:
- assert(not isinstance(res, np.ndarray))
- assert(not (isinstance(res, list) and len(res) == len(self.dummy)))
-
- result = np.empty(self.nresults, dtype='O')
- result[0] = res
- except Exception:
- raise ValueError('function does not reduce')
- return result
-
cdef class SeriesBinGrouper:
'''
@@ -257,8 +260,10 @@ cdef class SeriesBinGrouper:
res = self.f(cached_typ)
res = _extract_result(res)
if not initialized:
- result = self._get_result_array(res)
initialized = 1
+ result = _get_result_array(res,
+ self.ngroups,
+ len(self.dummy_arr))
util.assign_value_1d(result, i, res)
@@ -277,16 +282,6 @@ cdef class SeriesBinGrouper:
return result, counts
- def _get_result_array(self, object res):
- try:
- assert(not isinstance(res, np.ndarray))
- assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr)))
-
- result = np.empty(self.ngroups, dtype='O')
- except Exception:
- raise ValueError('function does not reduce')
- return result
-
cdef class SeriesGrouper:
'''
@@ -388,8 +383,10 @@ cdef class SeriesGrouper:
res = self.f(cached_typ)
res = _extract_result(res)
if not initialized:
- result = self._get_result_array(res)
initialized = 1
+ result = _get_result_array(res,
+ self.ngroups,
+ len(self.dummy_arr))
util.assign_value_1d(result, lab, res)
counts[lab] = group_size
@@ -410,15 +407,6 @@ cdef class SeriesGrouper:
return result, counts
- def _get_result_array(self, object res):
- try:
- assert(not isinstance(res, np.ndarray))
- assert(not (isinstance(res, list) and len(res) == len(self.dummy_arr)))
-
- result = np.empty(self.ngroups, dtype='O')
- except Exception:
- raise ValueError('function does not reduce')
- return result
cdef inline _extract_result(object res):
''' extract the result object, it might be a 0-dim ndarray
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 4b1954a3be64e..a4abe481cfe81 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10382,6 +10382,13 @@ def test_apply(self):
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
+ # GH9573
+ df = DataFrame({'c0':['A','A','B','B'], 'c1':['C','C','D','D']})
+ df = df.apply(lambda ts: ts.astype('category'))
+ self.assertEqual(df.shape, (4, 2))
+ self.assertTrue(isinstance(df['c0'].dtype, com.CategoricalDtype))
+ self.assertTrue(isinstance(df['c1'].dtype, com.CategoricalDtype))
+
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
| closes https://github.com/pydata/pandas/issues/9573
| https://api.github.com/repos/pandas-dev/pandas/pulls/10354 | 2015-06-15T02:53:41Z | 2015-06-15T10:44:29Z | 2015-06-15T10:44:29Z | 2015-08-22T11:33:42Z |
CI: run doc-tests always | diff --git a/ci/script.sh b/ci/script.sh
index d5082234024d5..1126e8249646c 100755
--- a/ci/script.sh
+++ b/ci/script.sh
@@ -15,8 +15,8 @@ fi
if [ "$BUILD_TEST" ]; then
echo "We are not running nosetests as this is simply a build test."
else
- echo nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
- nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
+ echo nosetests --exe -A "$NOSE_ARGS" pandas --doctest-tests --with-xunit --xunit-file=/tmp/nosetests.xml
+ nosetests --exe -A "$NOSE_ARGS" pandas --doctest-tests --with-xunit --xunit-file=/tmp/nosetests.xml
fi
RET="$?"
| https://api.github.com/repos/pandas-dev/pandas/pulls/10352 | 2015-06-14T14:35:23Z | 2015-06-14T14:36:19Z | 2015-06-14T14:36:19Z | 2015-06-14T15:57:40Z | |
BUG: frequencies.get_freq_code raises an error against offset with n != 1 | diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 71ff0f6c9c56c..15f69b38febce 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -318,8 +318,7 @@ def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
- from pandas.tseries.frequencies import get_reso_string
- return get_reso_string(self._resolution)
+ return Resolution.get_str(self._resolution)
def _convert_scalar_indexer(self, key, kind=None):
"""
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 4af8c68110978..9fb06d2854b11 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -32,6 +32,8 @@ class FreqGroup(object):
class Resolution(object):
+ # defined in period.pyx
+ # note that these are different from freq codes
RESO_US = period.US_RESO
RESO_MS = period.MS_RESO
RESO_SEC = period.S_RESO
@@ -65,30 +67,104 @@ class Resolution(object):
@classmethod
def get_str(cls, reso):
+ """
+ Return resolution str against resolution code.
+
+ Example
+ -------
+ >>> Resolution.get_str(Resolution.RESO_SEC)
+ 'second'
+ """
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
+ """
+ Return resolution str against resolution code.
+
+ Example
+ -------
+ >>> Resolution.get_reso('second')
+ 2
+
+ >>> Resolution.get_reso('second') == Resolution.RESO_SEC
+ True
+ """
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
+ @classmethod
+ def get_freq_group(cls, resostr):
+ """
+ Return frequency str against resolution str.
+
+ Example
+ -------
+ >>> f.Resolution.get_freq_group('day')
+ 4000
+ """
+ return get_freq_group(cls.get_freq(resostr))
+
@classmethod
def get_freq(cls, resostr):
+ """
+ Return frequency str against resolution str.
+
+ Example
+ -------
+ >>> f.Resolution.get_freq('day')
+ 'D'
+ """
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
+ """
+ Return resolution str against frequency str.
+
+ Example
+ -------
+ >>> Resolution.get_str_from_freq('H')
+ 'hour'
+ """
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
- return cls.get_reso(cls.get_str_from_freq(freq))
+ """
+ Return resolution code against frequency str.
+ Example
+ -------
+ >>> Resolution.get_reso_from_freq('H')
+ 4
-def get_reso_string(reso):
- return Resolution.get_str(reso)
+ >>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR
+ True
+ """
+ return cls.get_reso(cls.get_str_from_freq(freq))
def get_to_timestamp_base(base):
+ """
+ Return frequency code group used for base of to_timestamp against
+ frequency code.
+
+ Example
+ -------
+ # Return day freq code against longer freq than day
+ >>> get_to_timestamp_base(get_freq_code('D')[0])
+ 6000
+ >>> get_to_timestamp_base(get_freq_code('W')[0])
+ 6000
+ >>> get_to_timestamp_base(get_freq_code('M')[0])
+ 6000
+
+ # Return second freq code against hour between second
+ >>> get_to_timestamp_base(get_freq_code('H')[0])
+ 9000
+ >>> get_to_timestamp_base(get_freq_code('S')[0])
+ 9000
+ """
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
@@ -97,6 +173,17 @@ def get_to_timestamp_base(base):
def get_freq_group(freq):
+ """
+ Return frequency code group of given frequency str.
+
+ Example
+ -------
+ >>> get_freq_group('W-MON')
+ 4000
+
+ >>> get_freq_group('W-FRI')
+ 4000
+ """
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
@@ -104,6 +191,18 @@ def get_freq_group(freq):
def get_freq(freq):
+ """
+ Return frequency code of given frequency str.
+ If input is not string, return input as it is.
+
+ Example
+ -------
+ >>> get_freq('A')
+ 1000
+
+ >>> get_freq('3A')
+ 1000
+ """
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
@@ -112,15 +211,29 @@ def get_freq(freq):
def get_freq_code(freqstr):
"""
+ Return freq str or tuple to freq code and stride (mult)
Parameters
----------
+ freqstr : str or tuple
Returns
-------
+ return : tuple of base frequency code and stride (mult)
+
+ Example
+ -------
+ >>> get_freq_code('3D')
+ (6000, 3)
+
+ >>> get_freq_code('D')
+ (6000, 1)
+
+ >>> get_freq_code(('D', 3))
+ (6000, 3)
"""
if isinstance(freqstr, DateOffset):
- freqstr = (get_offset_name(freqstr), freqstr.n)
+ freqstr = (freqstr.rule_code, freqstr.n)
if isinstance(freqstr, tuple):
if (com.is_integer(freqstr[0]) and
@@ -386,6 +499,7 @@ def get_base_alias(freqstr):
"""
return _base_and_stride(freqstr)[0]
+
_dont_uppercase = set(('MS', 'ms'))
@@ -637,14 +751,6 @@ def _period_alias_dictionary():
return alias_dict
-def _infer_period_group(freqstr):
- return _period_group(Resolution._reso_freq_map[freqstr])
-
-
-def _period_group(freqstr):
- base, mult = get_freq_code(freqstr)
- return base // 1000 * 1000
-
_period_alias_dict = _period_alias_dictionary()
@@ -671,7 +777,7 @@ def _period_str_to_code(freqstr):
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
- uncertain, a warning will be printed.
+ uncertain, a warning will be printed.
Parameters
----------
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 95bbc5016237c..7606bd0bd86b8 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -528,8 +528,8 @@ def get_value(self, series, key):
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
- grp = frequencies._infer_period_group(reso)
- freqn = frequencies._period_group(self.freq)
+ grp = frequencies.Resolution.get_freq_group(reso)
+ freqn = frequencies.get_freq_group(self.freq)
vals = self.values
@@ -655,8 +655,8 @@ def _get_string_slice(self, key):
key, parsed, reso = parse_time_string(key, self.freq)
- grp = frequencies._infer_period_group(reso)
- freqn = frequencies._period_group(self.freq)
+ grp = frequencies.Resolution.get_freq_group(reso)
+ freqn = frequencies.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 823c762c692e5..29152551f5ddf 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -132,6 +132,117 @@ def test_anchored_shortcuts():
expected = frequencies.to_offset('Q-DEC')
assert(result == expected)
+class TestFrequencyCode(tm.TestCase):
+
+ def test_freq_code(self):
+ self.assertEqual(frequencies.get_freq('A'), 1000)
+ self.assertEqual(frequencies.get_freq('3A'), 1000)
+ self.assertEqual(frequencies.get_freq('-1A'), 1000)
+
+ self.assertEqual(frequencies.get_freq('W'), 4000)
+ self.assertEqual(frequencies.get_freq('W-MON'), 4001)
+ self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
+
+ for freqstr, code in compat.iteritems(frequencies._period_code_map):
+ result = frequencies.get_freq(freqstr)
+ self.assertEqual(result, code)
+
+ result = frequencies.get_freq_group(freqstr)
+ self.assertEqual(result, code // 1000 * 1000)
+
+ result = frequencies.get_freq_group(code)
+ self.assertEqual(result, code // 1000 * 1000)
+
+ def test_get_to_timestamp_base(self):
+ tsb = frequencies.get_to_timestamp_base
+
+ self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
+ frequencies.get_freq_code('D')[0])
+ self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
+ frequencies.get_freq_code('D')[0])
+ self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
+ frequencies.get_freq_code('D')[0])
+
+ self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
+ frequencies.get_freq_code('S')[0])
+ self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
+ frequencies.get_freq_code('S')[0])
+ self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
+ frequencies.get_freq_code('S')[0])
+
+
+ def test_freq_to_reso(self):
+ Reso = frequencies.Resolution
+
+ self.assertEqual(Reso.get_str_from_freq('A'), 'year')
+ self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
+ self.assertEqual(Reso.get_str_from_freq('M'), 'month')
+ self.assertEqual(Reso.get_str_from_freq('D'), 'day')
+ self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
+ self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
+ self.assertEqual(Reso.get_str_from_freq('S'), 'second')
+ self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
+ self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
+ self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
+
+ for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
+ # check roundtrip
+ result = Reso.get_freq(Reso.get_str_from_freq(freq))
+ self.assertEqual(freq, result)
+
+ for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
+ result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
+ self.assertEqual(freq, result)
+
+ def test_get_freq_code(self):
+ # freqstr
+ self.assertEqual(frequencies.get_freq_code('A'),
+ (frequencies.get_freq('A'), 1))
+ self.assertEqual(frequencies.get_freq_code('3D'),
+ (frequencies.get_freq('D'), 3))
+ self.assertEqual(frequencies.get_freq_code('-2M'),
+ (frequencies.get_freq('M'), -2))
+
+ # tuple
+ self.assertEqual(frequencies.get_freq_code(('D', 1)),
+ (frequencies.get_freq('D'), 1))
+ self.assertEqual(frequencies.get_freq_code(('A', 3)),
+ (frequencies.get_freq('A'), 3))
+ self.assertEqual(frequencies.get_freq_code(('M', -2)),
+ (frequencies.get_freq('M'), -2))
+ # numeric tuple
+ self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
+
+ # offsets
+ self.assertEqual(frequencies.get_freq_code(offsets.Day()),
+ (frequencies.get_freq('D'), 1))
+ self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
+ (frequencies.get_freq('D'), 3))
+ self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
+ (frequencies.get_freq('D'), -2))
+
+ self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
+ (frequencies.get_freq('M'), 1))
+ self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
+ (frequencies.get_freq('M'), 3))
+ self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
+ (frequencies.get_freq('M'), -2))
+
+ self.assertEqual(frequencies.get_freq_code(offsets.Week()),
+ (frequencies.get_freq('W'), 1))
+ self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
+ (frequencies.get_freq('W'), 3))
+ self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
+ (frequencies.get_freq('W'), -2))
+
+ # monday is weekday=0
+ self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
+ (frequencies.get_freq('W-TUE'), 1))
+ self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
+ (frequencies.get_freq('W-MON'), 3))
+ self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
+ (frequencies.get_freq('W-FRI'), -2))
+
_dti = DatetimeIndex
@@ -333,7 +444,6 @@ def test_infer_freq_tz_transition(self):
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
- print(idx)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index a051560617604..275fcd4d987ed 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -19,7 +19,7 @@
get_offset, get_offset_name, get_standard_freq)
from pandas import Series
-from pandas.tseries.frequencies import _offset_map
+from pandas.tseries.frequencies import _offset_map, get_freq_code, _get_freq_str
from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range
from pandas.tseries.tools import parse_time_string, DateParseError
import pandas.tseries.offsets as offsets
@@ -211,6 +211,27 @@ def test_return_type(self):
self.assertTrue(NaT - offset is NaT)
self.assertTrue((-offset).apply(NaT) is NaT)
+ def test_offset_n(self):
+ for offset_klass in self.offset_types:
+ offset = self._get_offset(offset_klass)
+ self.assertEqual(offset.n, 1)
+
+ neg_offset = offset * -1
+ self.assertEqual(neg_offset.n, -1)
+
+ mul_offset = offset * 3
+ self.assertEqual(mul_offset.n, 3)
+
+ def test_offset_freqstr(self):
+ for offset_klass in self.offset_types:
+ offset = self._get_offset(offset_klass)
+
+ freqstr = offset.freqstr
+ if freqstr not in ('<Easter>', "<DateOffset: kwds={'days': 1}>",
+ 'LWOM-SAT', ):
+ code = get_offset(freqstr)
+ self.assertEqual(offset.rule_code, code)
+
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
offset_s = self._get_offset(offset, normalize=normalize)
@@ -3695,6 +3716,12 @@ def test_rule_code(self):
self.assertEqual(alias, get_offset(alias).rule_code)
self.assertEqual(alias, (get_offset(alias) * 5).rule_code)
+ lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
+ for k in lst:
+ code, stride = get_freq_code('3' + k)
+ self.assertTrue(isinstance(code, int))
+ self.assertEqual(stride, 3)
+ self.assertEqual(k, _get_freq_str(code))
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
| Fixed a `frequencies.get_freq_code` raises `ValueError` against offset instance with n!=1. This is needed for #7832.
```
# OK
>>> import pandas.tseries.frequencies as f
>>> f.get_freq_code(('D', 3))
(6000, 3)
>>> import pandas.tseries.offsets as offsets
>>> f.get_freq_code(offsets.Day())
(6000, 1)
# NG!
>>> f.get_freq_code(offsets.Day(3))
ValueError: Unknown freqstr: 3D
```
Also, added docstrings and tests which make frequencies behavior understandable.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10350 | 2015-06-13T22:54:48Z | 2015-06-15T12:57:04Z | 2015-06-15T12:57:03Z | 2015-06-20T02:09:55Z |
DOC: fix docstrings | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7e7479b2c464c..399286e6dc58b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -53,6 +53,11 @@ pandas 0.16.2
This is a minor release from 0.16.1 and includes a large number of bug fixes
along with several new features, enhancements, and performance improvements.
+Highlights include:
+
+- A new ``pipe`` method, see :ref:`here <whatsnew_0162.enhancements.pipe>`
+- Documentation on how to use `numba <http://numba.pydata.org>`_ with *pandas*, see :ref:`here <enhancingperf.numba>`
+
See the :ref:`v0.16.2 Whatsnew <whatsnew_0162>` overview for an extensive list
of all enhancements and bugs that have been fixed in 0.16.2.
diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 8408412d66271..bfe44290e49d2 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -13,7 +13,6 @@ Highlights include:
- A new ``pipe`` method, see :ref:`here <whatsnew_0162.enhancements.pipe>`
- Documentation on how to use numba_ with *pandas*, see :ref:`here <enhancingperf.numba>`
-Check the :ref:`API Changes <whatsnew_0162.api>` before updating.
.. contents:: What's new in v0.16.2
:local:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 112ace3df08e2..35db4051c60c8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2050,7 +2050,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
return self.take(locs, axis=axis)
_shared_docs['pipe'] = ("""
- Apply func(self, *args, **kwargs)
+ Apply func(self, \*args, \*\*kwargs)
.. versionadded:: 0.16.2
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 0659e34c3f27b..2c1333326b701 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -640,34 +640,39 @@ def json_normalize(data, record_path=None, meta=None,
path to records is ['foo', 'bar']
meta_prefix : string, default None
+ Returns
+ -------
+ frame : DataFrame
+
Examples
--------
- data = [{'state': 'Florida',
- 'shortname': 'FL',
- 'info': {
- 'governor': 'Rick Scott'
- },
- 'counties': [{'name': 'Dade', 'population': 12345},
- {'name': 'Broward', 'population': 40000},
- {'name': 'Palm Beach', 'population': 60000}]},
- {'state': 'Ohio',
- 'shortname': 'OH',
- 'info': {
- 'governor': 'John Kasich'
- },
- 'counties': [{'name': 'Summit', 'population': 1234},
- {'name': 'Cuyahoga', 'population': 1337}]}]
-
- result = json_normalize(data, 'counties', ['state', 'shortname',
- ['info', 'governor']])
-
- state governor
- Florida Rick Scott
+ >>> data = [{'state': 'Florida',
+ ... 'shortname': 'FL',
+ ... 'info': {
+ ... 'governor': 'Rick Scott'
+ ... },
+ ... 'counties': [{'name': 'Dade', 'population': 12345},
+ ... {'name': 'Broward', 'population': 40000},
+ ... {'name': 'Palm Beach', 'population': 60000}]},
+ ... {'state': 'Ohio',
+ ... 'shortname': 'OH',
+ ... 'info': {
+ ... 'governor': 'John Kasich'
+ ... },
+ ... 'counties': [{'name': 'Summit', 'population': 1234},
+ ... {'name': 'Cuyahoga', 'population': 1337}]}]
+ >>> from pandas.io.json import json_normalize
+ >>> result = json_normalize(data, 'counties', ['state', 'shortname',
+ ... ['info', 'governor']])
+ >>> result
+ name population info.governor state shortname
+ 0 Dade 12345 Rick Scott Florida FL
+ 1 Broward 40000 Rick Scott Florida FL
+ 2 Palm Beach 60000 Rick Scott Florida FL
+ 3 Summit 1234 John Kasich Ohio OH
+ 4 Cuyahoga 1337 John Kasich Ohio OH
- Returns
- -------
- frame : DataFrame
"""
def _pull_field(js, spec):
result = js
| Some errors in newly build docstrings + add highlights in release notes.
This is applied on the online docs, but can be merged normally now
| https://api.github.com/repos/pandas-dev/pandas/pulls/10348 | 2015-06-13T13:49:36Z | 2015-06-13T13:49:53Z | 2015-06-13T13:49:53Z | 2015-06-13T13:49:53Z |
BUG: df.to_html(index=False) renders index.name | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index ad3b966575427..00290bb9a1484 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -464,6 +464,8 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
+- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 6a05f819908af..81d47d9e1f36c 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1037,7 +1037,7 @@ def _column_header():
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
- if self.fmt.has_index_names:
+ if self.fmt.has_index_names and self.fmt.index:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index b733cacc01e05..5af12070f5a2d 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -632,6 +632,10 @@ def test_to_html_multiindex_index_false(self):
</table>"""
self.assertEqual(result, expected)
+ df.index = Index(df.index.values, name='idx')
+ result = df.to_html(index=False)
+ self.assertEqual(result, expected)
+
def test_to_html_multiindex_sparsify_false_multi_sparse(self):
with option_context('display.multi_sparse', False):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
@@ -1922,15 +1926,195 @@ def test_to_html_index(self):
'C': ['one', 'two', np.NaN]},
columns=['A', 'B', 'C'],
index=index)
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th>foo</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>baz</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+
+ expected_without_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
result = df.to_html(index=False)
for i in index:
self.assertNotIn(i, result)
+ self.assertEqual(result, expected_without_index)
+ df.index = Index(['foo', 'bar', 'baz'], name='idx')
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>idx</th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th>foo</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>baz</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+ self.assertEqual(df.to_html(index=False), expected_without_index)
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
+
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th rowspan="2" valign="top">foo</th>\n'
+ ' <th>car</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bike</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <th>car</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
self.assertNotIn(i, result)
+ # must be the same result as normal index
+ self.assertEqual(result, expected_without_index)
+
+ df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
+ expected_with_index = ('<table border="1" class="dataframe">\n'
+ ' <thead>\n'
+ ' <tr style="text-align: right;">\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th>A</th>\n'
+ ' <th>B</th>\n'
+ ' <th>C</th>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>idx1</th>\n'
+ ' <th>idx2</th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' <th></th>\n'
+ ' </tr>\n'
+ ' </thead>\n'
+ ' <tbody>\n'
+ ' <tr>\n'
+ ' <th rowspan="2" valign="top">foo</th>\n'
+ ' <th>car</th>\n'
+ ' <td>1</td>\n'
+ ' <td>1.2</td>\n'
+ ' <td>one</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bike</th>\n'
+ ' <td>2</td>\n'
+ ' <td>3.4</td>\n'
+ ' <td>two</td>\n'
+ ' </tr>\n'
+ ' <tr>\n'
+ ' <th>bar</th>\n'
+ ' <th>car</th>\n'
+ ' <td>3</td>\n'
+ ' <td>5.6</td>\n'
+ ' <td>NaN</td>\n'
+ ' </tr>\n'
+ ' </tbody>\n'
+ '</table>')
+ self.assertEqual(df.to_html(), expected_with_index)
+ self.assertEqual(df.to_html(index=False), expected_without_index)
def test_repr_html(self):
self.frame._repr_html_()
| Closes #10344.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10346 | 2015-06-13T11:14:58Z | 2015-08-05T10:29:55Z | 2015-08-05T10:29:55Z | 2015-08-05T10:30:19Z |
DEPR: deprecating series asof GH10343 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 1cbe55ddbacb6..620cef3f7fc74 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -449,7 +449,6 @@ Time series-related
:toctree: generated/
Series.asfreq
- Series.asof
Series.shift
Series.first_valid_index
Series.last_valid_index
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 921decdd03ca9..2cd096261a931 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -521,6 +521,11 @@ Deprecations
- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
+- :meth:`Series.asof` is deprecated. Please use :meth:`Series.reindex(where, method='ffill')`
+ instead. This method does not drop missing values in the original series.
+
+ So `Series.asof(where)` is equivalent to `Series.dropna().reindex(where, method='ffill')`.
+
.. _whatsnew_0170.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6586fa10935e6..c8ae78926b28b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2452,6 +2452,11 @@ def last_valid_index(self):
def asof(self, where):
"""
+ DEPRECATED. Please use :meth:`Series.reindex` instead.
+
+ So a `Series.asof(where)` can be replaced by
+ `Series.dropna().reindex(where, method='ffill')`.
+
Return last good (non-NaN) value in TimeSeries if value is NaN for
requested date.
@@ -2468,7 +2473,14 @@ def asof(self, where):
Returns
-------
value or NaN
+
+ See Also
+ --------
+ pandas.Series.reindex
+
"""
+ warnings.warn("`Series.asof` is deprecated, use "
+ "`Series.reindex` instead.", FutureWarning)
if isinstance(where, compat.string_types):
where = datetools.to_datetime(where)
| closes #10343 and discussion on #10266 - deprecating `Series.asof(where)` in favour of `Series.reindex(where, method='ffill')`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10345 | 2015-06-13T03:10:35Z | 2015-08-13T10:20:30Z | null | 2015-08-13T21:55:58Z |
BUG: Check complib values | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 8314c1cfff0dd..8d62fce0130b9 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -174,3 +174,5 @@ Bug Fixes
- Bug in ``read_hdf`` where ``auto_close`` could not be passed (:issue:`9327`).
- Bug in ``read_hdf`` where open stores could not be used (:issue:`10330`).
+
+- Bug in ``to_hdf`` and ``HDFStore`` which did not check that complib choices were valid (:issue:`4582`, :issue:`8874`).
\ No newline at end of file
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index eca855a38d725..31f649c498c14 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -389,6 +389,10 @@ def __init__(self, path, mode=None, complevel=None, complib=None,
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem importing'.format(ex=str(ex)))
+ if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'):
+ raise ValueError("complib only supports 'blosc', 'bzip2', lzo' "
+ "or 'zlib' compression.")
+
self._path = path
if mode is None:
mode = 'a'
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 4fc24c4cd1870..6aaeb6652f2b6 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -4718,6 +4718,13 @@ def test_read_hdf_errors(self):
with open(path, mode='r') as store:
self.assertRaises(NotImplementedError, read_hdf, store, 'df')
+ def test_invalid_complib(self):
+ df = DataFrame(np.random.rand(4, 5),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+ with ensure_clean_path(self.path) as path:
+ self.assertRaises(ValueError, df.to_hdf, path, 'df', complib='blosc:zlib')
+
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
| Add check for complib when opening a HDFStore
closes #4582
closes #8874
| https://api.github.com/repos/pandas-dev/pandas/pulls/10341 | 2015-06-12T19:17:00Z | 2015-06-12T21:49:39Z | null | 2015-06-15T13:56:20Z |
DOC: add versionadded to docstrings for 0.16.0 / 0.16.1 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed01323eb9a27..2411fde2d07af 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1168,6 +1168,9 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for European data
+
+ .. versionadded:: 0.16.0
+
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
@@ -3613,6 +3616,9 @@ def diff(self, periods=1, axis=0):
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
+ Take difference over rows (0) or columns (1).
+
+ .. versionadded: 0.16.1
Returns
-------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b747f0a2ceacb..7012654b43d2b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1577,6 +1577,8 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
+ .. versionadded:: 0.16.1
+
Returns
-------
dropped : type of caller
@@ -1953,6 +1955,8 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No
"""
Returns a random sample of items from an axis of object.
+ .. versionadded:: 0.16.1
+
Parameters
----------
n : int, optional
diff --git a/pandas/core/index.py b/pandas/core/index.py
index de30fee4009f4..4a7031fefd794 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2793,6 +2793,8 @@ class CategoricalIndex(Index, PandasDelegate):
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
+ .. versionadded:: 0.16.1
+
Parameters
----------
data : array-like or Categorical, (1-dimensional)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 3225b4aa33ac2..fd786fa30f842 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -959,6 +959,8 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
sparse : bool, default False
Whether the returned DataFrame should be sparse or not.
+ .. versionadded:: 0.16.1
+
Returns
-------
dummies : DataFrame
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index f4ac0166cf44b..7f0ed79f638bd 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -710,6 +710,8 @@ def str_split(arr, pat=None, n=None):
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
+
+ .. versionadded:: 0.16.1
return_type : deprecated, use `expand`
Returns
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 2c328e51b5090..14a97a01f1d0a 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -667,6 +667,8 @@ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
row_levels and column_levels are the names (labels) or numbers of the levels.
{row_levels, column_levels} must be a partition of the MultiIndex level names (or numbers).
+ .. versionadded:: 0.16.0
+
Parameters
----------
row_levels : tuple/list
@@ -716,6 +718,8 @@ def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
+ .. versionadded:: 0.16.0
+
Parameters
----------
A : scipy.sparse.coo_matrix
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index bd0869b9525b7..d3a4917a7ea66 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1397,7 +1397,7 @@ def _set_freq(self, value):
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
- days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month")
+ days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
is_month_start = _field_accessor('is_month_start', 'is_month_start', "Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor('is_month_end', 'is_month_end', "Logical indicating if last day of month (defined by frequency)")
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 67e27bbffbf73..3bebd0daa6d29 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -61,7 +61,7 @@ def wrapper(self, other):
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
-
+
result = Timestamp(result)
if self.normalize:
result = result.normalize()
@@ -387,7 +387,7 @@ def freqstr(self):
return fstr
class SingleConstructorOffset(DateOffset):
-
+
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
@@ -538,6 +538,9 @@ def onOffset(self, dt):
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
+
+ .. versionadded: 0.16.1
+
"""
_prefix = 'BH'
_anchor = 0
@@ -923,7 +926,7 @@ def onOffset(self, dt):
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
-
+
@property
def name(self):
if self.isAnchored:
@@ -1270,9 +1273,9 @@ class WeekOfMonth(DateOffset):
5: Saturdays
6: Sundays
"""
-
+
_adjust_dst = True
-
+
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
@@ -2210,7 +2213,7 @@ class Easter(DateOffset):
1583-4099.
'''
_adjust_dst = True
-
+
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
| part of #10215
| https://api.github.com/repos/pandas-dev/pandas/pulls/10339 | 2015-06-12T11:21:17Z | 2015-06-12T13:17:23Z | 2015-06-12T13:17:23Z | 2015-06-12T13:18:00Z |
DOC: small clean up whatsnew 0.16.2 | diff --git a/doc/source/options.rst b/doc/source/options.rst
index 4b69015353612..9ede87422b21c 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -142,6 +142,7 @@ More information can be found in the `ipython documentation
pd.set_option('display.max_rows', 999)
pd.set_option('precision', 5)
+.. _options.frequently_used:
Frequently Used Options
-----------------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 1c861ae60dcaf..d8f8d8cef7682 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -52,10 +52,9 @@ pandas 0.16.2
This is a minor release from 0.16.1 and includes a large number of bug fixes
along with several new features, enhancements, and performance improvements.
-A small number of API changes were necessary to fix existing bugs.
See the :ref:`v0.16.2 Whatsnew <whatsnew_0162>` overview for an extensive list
-of all API changes, enhancements and bugs that have been fixed in 0.16.2.
+of all enhancements and bugs that have been fixed in 0.16.2.
Thanks
~~~~~~
diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 8314c1cfff0dd..a7f523505fd39 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -4,13 +4,14 @@ v0.16.2 (June 12, 2015)
-----------------------
This is a minor bug-fix release from 0.16.1 and includes a a large number of
-bug fixes along several new features, enhancements, and performance improvements.
+bug fixes along some new features (:meth:`~DataFrame.pipe` method), enhancements, and performance improvements.
+
We recommend that all users upgrade to this version.
Highlights include:
-- Documentation on how to use ``numba`` with *pandas*, see :ref:`here <enhancingperf.numba>`
- A new ``pipe`` method, see :ref:`here <whatsnew_0162.enhancements.pipe>`
+- Documentation on how to use ``numba`` with *pandas*, see :ref:`here <enhancingperf.numba>`
Check the :ref:`API Changes <whatsnew_0162.api>` before updating.
@@ -86,17 +87,26 @@ See the :ref:`documentation <basics.pipe>` for more. (:issue:`10129`)
Other Enhancements
^^^^^^^^^^^^^^^^^^
-- Removed the duplicate scroll bars in the ``DataFrame`` HTML representation when displaying in an ``IPython notebook`` v3.0 or greater. Note that the notebook has a ``toggle output scrolling`` feature to automate the display of very large frames. (:issue:`10231`)
+- Removed the hard-coded size limits on the ``DataFrame`` HTML representation
+ in the IPython notebook, and leave this to IPython itself (only for IPython
+ v3.0 or greater). This eliminates the duplicate scroll bars that appeared in
+ the notebook with large frames (:issue:`10231`).
+
+ Note that the notebook has a ``toggle output scrolling`` feature to limit the
+ display of very large frames (by clicking left of the output).
+ You can also configure the way DataFrames are displayed using the pandas
+ options, see here :ref:`here <options.frequently_used>`.
+
+- ``axis`` parameter of ``DataFrame.quantile`` now accepts also ``index``
+ and ``column``. (:issue:`9543`)
.. _whatsnew_0162.api:
-Other API Changes
-^^^^^^^^^^^^^^^^^
+API Changes
+~~~~~~~~~~~
- ``Holiday`` now raises ``NotImplementedError`` if both ``offset`` and ``observance`` are used in constructor instead of returning an incorrect result (:issue:`10217`).
-- ``axis`` parameter of ``DataFrame.quantile`` now accepts also ``index`` and``column``. (:issue:`9543`)
-- Adding empty ``DataFrame``s results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`)
.. _whatsnew_0162.performance:
@@ -116,61 +126,42 @@ Bug Fixes
multi-indexed (:issue:`7212`)
- Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`)
- Bug in ``to_json`` with certain orients and a ``CategoricalIndex`` would segfault (:issue:`10317`)
-
- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`)
-
- Bug in ``DataFrame.quantile`` on checking that a valid axis was passed (:issue:`9543`)
-
- Bug in groupby.apply aggregation for Categorical not preserving categories (:issue:`10138`)
- Bug in ``to_csv`` where ``date_format`` is ignored if the ``datetime`` is fractional (:issue:`10209`)
- Bug in ``DataFrame.to_json`` with mixed data types (:issue:`10289`)
-
- Bug in cache updating when consolidating (:issue:`10264`)
-
- Bug in ``mean()`` where integer dtypes can overflow (:issue:`10172`)
- Bug where Panel.from_dict does not set dtype when specified (:issue:`10058`)
- Bug in ``Index.union`` raises ``AttributeError`` when passing array-likes. (:issue:`10149`)
- Bug in ``Timestamp``'s' ``microsecond``, ``quarter``, ``dayofyear``, ``week`` and ``daysinmonth`` properties return ``np.int`` type, not built-in ``int``. (:issue:`10050`)
- Bug in ``NaT`` raises ``AttributeError`` when accessing to ``daysinmonth``, ``dayofweek`` properties. (:issue:`10096`)
-
- Bug in Index repr when using the ``max_seq_items=None`` setting (:issue:`10182`).
-
- Bug in getting timezone data with ``dateutil`` on various platforms ( :issue:`9059`, :issue:`8639`, :issue:`9663`, :issue:`10121`)
- Bug in display datetimes with mixed frequencies uniformly; display 'ms' datetimes to the proper precision. (:issue:`10170`)
- Bug in ``setitem`` where type pormotion is applied to entire block (:issue:`10280`)
-
- Bug in ``Series`` arithmetic methods may incorrectly hold names (:issue:`10068`)
-
- Bug in ``GroupBy.get_group`` when grouping on multiple keys, one of which is categorical. (:issue:`10132`)
-
- Bug in ``DatetimeIndex`` and ``TimedeltaIndex`` names are lost after timedelta arithmetics ( :issue:`9926`)
- Bug in ``DataFrame`` construction from nested ``dict`` with ``datetime64`` (:issue:`10160`)
- Bug in ``Series`` construction from ``dict`` with ``datetime64`` keys (:issue:`9456`)
- Bug in `Series.plot(label="LABEL")` not correctly setting the label (:issue:`10119`)
-
- Bug in `plot` not defaulting to matplotlib `axes.grid` setting (:issue:`9792`)
-
- Bug causing strings containing an exponent but no decimal to be parsed as ints instead of floats in python csv parser. (:issue:`9565`)
-
- Bug in ``Series.align`` resets ``name`` when ``fill_value`` is specified (:issue:`10067`)
- Bug in ``read_csv`` causing index name not to be set on an empty DataFrame (:issue:`10184`)
- Bug in ``SparseSeries.abs`` resets ``name`` (:issue:`10241`)
- Bug in ``TimedeltaIndex`` slicing may reset freq (:issue:`10292`)
-
- Bug in GroupBy.get_group raises ValueError when group key contains NaT (:issue:`6992`)
- Bug in ``SparseSeries`` constructor ignores input data name (:issue:`10258`)
-
- Bug in ``Categorical.remove_categories`` causing a ValueError when removing the ``NaN`` category if underlying dtype is floating-point (:issue:`10156`)
-
- Bug where infer_freq infers timerule (WOM-5XXX) unsupported by to_offset (:issue:`9425`)
- Bug in ``DataFrame.to_hdf()`` where table format would raise a seemingly unrelated error for invalid (non-string) column names. This is now explicitly forbidden. (:issue:`9057`)
-- Bug to handle masking empty ``DataFrame``(:issue:`10126`)
-
+- Bug to handle masking empty ``DataFrame`` (:issue:`10126`).
- Bug where MySQL interface could not handle numeric table/column names (:issue:`10255`)
-
- Bug in ``read_csv`` with a ``date_parser`` that returned a ``datetime64`` array of other time resolution than ``[ns]`` (:issue:`10245`)
-
- Bug in ``Panel.apply`` when the result has ndim = 0 (:issue:`10332`)
-
- Bug in ``read_hdf`` where ``auto_close`` could not be passed (:issue:`9327`).
- Bug in ``read_hdf`` where open stores could not be used (:issue:`10330`).
+- Bug in adding empty ``DataFrame``s, now results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`).
| https://api.github.com/repos/pandas-dev/pandas/pulls/10338 | 2015-06-12T08:19:54Z | 2015-06-12T21:25:13Z | 2015-06-12T21:25:12Z | 2015-06-12T21:25:19Z | |
BUG #10228: segfault due to out-of-bounds in binning | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 4a513f3122390..cc1e4b09291cc 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -59,3 +59,5 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
+- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
+
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 5f68c1ee26e87..9b6bdf57d4509 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -2157,6 +2157,8 @@ def group_nth_bin_object(ndarray[object, ndim=2] out,
nobs = np.zeros((<object> out).shape, dtype=np.float64)
resx = np.empty((<object> out).shape, dtype=object)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -2247,6 +2249,8 @@ def group_last_bin_object(ndarray[object, ndim=2] out,
nobs = np.zeros((<object> out).shape, dtype=np.float64)
resx = np.empty((<object> out).shape, dtype=object)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 598cdff30e4f7..5d4b18b36050f 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -751,6 +751,8 @@ def group_last_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -797,6 +799,8 @@ def group_nth_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bin) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -948,6 +952,8 @@ def group_add_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1064,6 +1070,8 @@ def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
nobs = np.zeros_like(out)
prodx = np.ones_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1184,6 +1192,8 @@ def group_var_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1285,6 +1295,8 @@ def group_count_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
@@ -1329,6 +1341,8 @@ def group_min_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
minx = np.empty_like(out)
minx.fill(%(inf_val)s)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1453,6 +1467,8 @@ def group_max_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
maxx = np.empty_like(out)
maxx.fill(-%(inf_val)s)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1629,6 +1645,8 @@ def group_mean_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
sumx = np.zeros_like(out)
N, K = (<object> values).shape
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -1685,6 +1703,8 @@ def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
%(dest_type2)s vopen, vhigh, vlow, vclose, NA
bint got_first = 0
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index 428decd4dca10..83dfacba45211 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -6725,6 +6725,8 @@ def group_add_bin_float64(ndarray[float64_t, ndim=2] out,
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -6781,6 +6783,8 @@ def group_add_bin_float32(ndarray[float32_t, ndim=2] out,
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -6951,6 +6955,8 @@ def group_prod_bin_float64(ndarray[float64_t, ndim=2] out,
nobs = np.zeros_like(out)
prodx = np.ones_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7007,6 +7013,8 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out,
nobs = np.zeros_like(out)
prodx = np.ones_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7186,6 +7194,8 @@ def group_var_bin_float64(ndarray[float64_t, ndim=2] out,
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7247,6 +7257,8 @@ def group_var_bin_float32(ndarray[float32_t, ndim=2] out,
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7412,6 +7424,8 @@ def group_mean_bin_float64(ndarray[float64_t, ndim=2] out,
sumx = np.zeros_like(out)
N, K = (<object> values).shape
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7465,6 +7479,8 @@ def group_mean_bin_float32(ndarray[float32_t, ndim=2] out,
sumx = np.zeros_like(out)
N, K = (<object> values).shape
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7520,6 +7536,8 @@ def group_ohlc_float64(ndarray[float64_t, ndim=2] out,
float64_t vopen, vhigh, vlow, vclose, NA
bint got_first = 0
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7594,6 +7612,8 @@ def group_ohlc_float32(ndarray[float32_t, ndim=2] out,
float32_t vopen, vhigh, vlow, vclose, NA
bint got_first = 0
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7801,6 +7821,8 @@ def group_last_bin_float64(ndarray[float64_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7845,6 +7867,8 @@ def group_last_bin_float32(ndarray[float32_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -7889,6 +7913,8 @@ def group_last_bin_int64(ndarray[int64_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8067,6 +8093,8 @@ def group_nth_bin_float64(ndarray[float64_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bin) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8112,6 +8140,8 @@ def group_nth_bin_float32(ndarray[float32_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bin) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8157,6 +8187,8 @@ def group_nth_bin_int64(ndarray[int64_t, ndim=2] out,
nobs = np.zeros_like(out)
resx = np.empty_like(out)
+ if len(bin) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8386,6 +8418,8 @@ def group_min_bin_float64(ndarray[float64_t, ndim=2] out,
minx = np.empty_like(out)
minx.fill(np.inf)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8447,6 +8481,8 @@ def group_min_bin_float32(ndarray[float32_t, ndim=2] out,
minx = np.empty_like(out)
minx.fill(np.inf)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8508,6 +8544,8 @@ def group_min_bin_int64(ndarray[int64_t, ndim=2] out,
minx = np.empty_like(out)
minx.fill(9223372036854775807)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8750,6 +8788,8 @@ def group_max_bin_float64(ndarray[float64_t, ndim=2] out,
maxx = np.empty_like(out)
maxx.fill(-np.inf)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8810,6 +8850,8 @@ def group_max_bin_float32(ndarray[float32_t, ndim=2] out,
maxx = np.empty_like(out)
maxx.fill(-np.inf)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -8870,6 +8912,8 @@ def group_max_bin_int64(ndarray[int64_t, ndim=2] out,
maxx = np.empty_like(out)
maxx.fill(-9223372036854775807)
+ if len(bins) == 0:
+ return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
@@ -9110,6 +9154,8 @@ def group_count_bin_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
@@ -9144,6 +9190,8 @@ def group_count_bin_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
@@ -9178,6 +9226,8 @@ def group_count_bin_int64(ndarray[int64_t, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
@@ -9212,6 +9262,8 @@ def group_count_bin_object(ndarray[object, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
@@ -9246,6 +9298,8 @@ def group_count_bin_int64(ndarray[int64_t, ndim=2] out,
ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
dtype=np.int64)
+ if len(bins) == 0:
+ return
ngroups = len(bins) + (bins[len(bins) - 1] != N)
for i in range(N):
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 202ccb9438db5..3927caef58d2b 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -9,6 +9,7 @@
from pandas import (Series, TimeSeries, DataFrame, Panel, Index,
isnull, notnull, Timestamp)
+from pandas.core.groupby import DataError
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
@@ -660,6 +661,20 @@ def test_resample_empty(self):
rs = xp.resample('A')
assert_frame_equal(xp, rs)
+ # Empty series were sometimes causing a segfault (for the functions
+ # with Cython bounds-checking disabled) or an IndexError. We just run
+ # them to ensure they no longer do. (GH #10228)
+ for index in tm.all_timeseries_index_generator(0):
+ for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
+ for how in ('count', 'mean', 'min', 'ohlc', 'last', 'prod'):
+ empty_series = pd.Series([], index, dtype)
+ try:
+ empty_series.resample('d', how)
+ except DataError:
+ # Ignore these since some combinations are invalid
+ # (ex: doing mean with dtype of np.object)
+ pass
+
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 25f5f84b0b1d9..83d6b97788e91 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -856,6 +856,33 @@ def makePeriodIndex(k=10):
dr = PeriodIndex(start=dt, periods=k, freq='B')
return dr
+def all_index_generator(k=10):
+ """Generator which can be iterated over to get instances of all the various
+ index classes.
+
+ Parameters
+ ----------
+ k: length of each of the index instances
+ """
+ all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
+ makeUnicodeIndex, makeDateIndex, makePeriodIndex,
+ makeTimedeltaIndex, makeBoolIndex,
+ makeCategoricalIndex]
+ for make_index_func in all_make_index_funcs:
+ yield make_index_func(k=k)
+
+def all_timeseries_index_generator(k=10):
+ """Generator which can be iterated over to get instances of all the classes
+ which represent time-seires.
+
+ Parameters
+ ----------
+ k: length of each of the index instances
+ """
+ make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
+ for make_index_func in make_index_funcs:
+ yield make_index_func(k=k)
+
# make series
def makeFloatSeries():
| Closes #10228. I also deleted some duplicated code while I was at it.
So, I wasn't sure if I should be including a unit test for this. This issue was a segfault, which was happening when you do:
```
s = pd.Series([], index=pd.DatetimeIndex([]), dtype=np.object)
s.resample('d', how='count')
```
so I suppose I could add this code into a test, and just verify it doesn't segfault, but that seemed like a bad idea. It would be testing for something that's undefined behavior and therefore be a non-deterministic test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10337 | 2015-06-12T08:13:02Z | 2015-06-26T23:24:27Z | null | 2015-06-27T18:29:01Z |
BUG: GH10332 where Panel.apply does not handle result with ndim=0 | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index b4a1bc72ed386..8fea72089a97f 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -169,3 +169,5 @@ Bug Fixes
- Bug where MySQL interface could not handle numeric table/column names (:issue:`10255`)
- Bug in ``read_csv`` with a ``date_parser`` that returned a ``datetime64`` array of other time resolution than ``[ns]`` (:issue:`10245`)
+
+- Bug in ``Panel.apply`` when the result has ndim = 0 (:issue:`10332`)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 580510829baff..bc342d5919bb8 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1093,14 +1093,10 @@ def _construct_return_type(self, result, axes=None):
# need to assume they are the same
if ndim is None:
if isinstance(result,dict):
- ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)
-
- # a saclar result
- if ndim is None:
- ndim = 0
+ ndim = getattr(list(compat.itervalues(result))[0],'ndim',0)
# have a dict, so top-level is +1 dim
- else:
+ if ndim != 0:
ndim += 1
# scalar
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index e86551c6b9158..529d3ed68e24d 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1233,6 +1233,25 @@ def test_apply_slabs(self):
expected = p.sum(0)
assert_frame_equal(result,expected)
+ def test_apply_no_or_zero_ndim(self):
+ # GH10332
+ self.panel = Panel(np.random.rand(5, 5, 5))
+
+ result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
+ result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
+ result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
+ result_float64 = self.panel.apply(lambda df: np.float64(0.0),
+ axis=[1, 2])
+
+ expected_int = expected_int64 = Series([0] * 5)
+ expected_float = expected_float64 = Series([0.0] * 5)
+
+ assert_series_equal(result_int, expected_int)
+ assert_series_equal(result_int64, expected_int64)
+ assert_series_equal(result_float, expected_float)
+ assert_series_equal(result_float64, expected_float64)
+
+
def test_reindex(self):
ref = self.panel['ItemB']
| This is to close #10332
| https://api.github.com/repos/pandas-dev/pandas/pulls/10335 | 2015-06-12T01:35:25Z | 2015-06-12T14:28:08Z | 2015-06-12T14:28:08Z | 2015-06-12T14:28:13Z |
DOC: Remove auto_close option form read_hdf | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index b4a1bc72ed386..15a1842c8c266 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -168,4 +168,8 @@ Bug Fixes
- Bug where MySQL interface could not handle numeric table/column names (:issue:`10255`)
-- Bug in ``read_csv`` with a ``date_parser`` that returned a ``datetime64`` array of other time resolution than ``[ns]`` (:issue:`10245`)
+
+- Bug in ``read_csv`` with a ``date_parser`` that returned a ``datetime64`` array of other time resolution than ``[ns]`` (:issue:`10245`).
+
+- Bug in ``read_hdf`` where ``auto_close`` could not be passed (:issue:`9327`).
+- Bug in ``read_hdf`` where open stores could not be used (:issue:`10330`).
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 8948592358636..eca855a38d725 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -290,8 +290,6 @@ def read_hdf(path_or_buf, key, **kwargs):
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
- auto_close : optional, boolean, should automatically close the store
- when finished, default is False
Returns
-------
@@ -303,9 +301,6 @@ def read_hdf(path_or_buf, key, **kwargs):
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
- f = lambda store, auto_close: store.select(
- key, auto_close=auto_close, **kwargs)
-
if isinstance(path_or_buf, string_types):
try:
@@ -321,20 +316,28 @@ def read_hdf(path_or_buf, key, **kwargs):
# can't auto open/close if we are using an iterator
# so delegate to the iterator
store = HDFStore(path_or_buf, **kwargs)
- try:
- return f(store, True)
- except:
+ auto_close = True
- # if there is an error, close the store
- try:
- store.close()
- except:
- pass
+ elif isinstance(path_or_buf, HDFStore):
+ if not path_or_buf.is_open:
+ raise IOError('The HDFStore must be open for reading.')
- raise
+ store = path_or_buf
+ auto_close = False
+ else:
+ raise NotImplementedError('Support for generic buffers has not been '
+ 'implemented.')
+
+ try:
+ return store.select(key, auto_close=auto_close, **kwargs)
+ except:
+ # if there is an error, close the store
+ try:
+ store.close()
+ except:
+ pass
- # a passed store; user controls open/close
- f(path_or_buf, False)
+ raise
class HDFStore(StringMixin):
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index f671e61e90084..720ebc1db1466 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -13,7 +13,7 @@
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex, TimedeltaIndex, isnull)
-from pandas.io.pytables import _tables
+from pandas.io.pytables import _tables, TableIterator
try:
_tables()
except ImportError as e:
@@ -4670,6 +4670,53 @@ def test_to_hdf_with_object_column_names(self):
assert(len(result))
+ def test_read_hdf_open_store(self):
+ # GH10330
+ # No check for non-string path_or-buf, and no test of open store
+ df = DataFrame(np.random.rand(4, 5),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+ df.index.name = 'letters'
+ df = df.set_index(keys='E', append=True)
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', mode='w')
+ direct = read_hdf(path, 'df')
+ store = HDFStore(path, mode='r')
+ indirect = read_hdf(store, 'df')
+ tm.assert_frame_equal(direct, indirect)
+ self.assertTrue(store.is_open)
+ store.close()
+
+ def test_read_hdf_iterator(self):
+ df = DataFrame(np.random.rand(4, 5),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+ df.index.name = 'letters'
+ df = df.set_index(keys='E', append=True)
+
+ with ensure_clean_path(self.path) as path:
+ df.to_hdf(path, 'df', mode='w', format='t')
+ direct = read_hdf(path, 'df')
+ iterator = read_hdf(path, 'df', iterator=True)
+ self.assertTrue(isinstance(iterator, TableIterator))
+ indirect = next(iterator.__iter__())
+ tm.assert_frame_equal(direct, indirect)
+
+ def test_read_hdf_errors(self):
+ df = DataFrame(np.random.rand(4, 5),
+ index=list('abcd'),
+ columns=list('ABCDE'))
+
+ with ensure_clean_path(self.path) as path:
+ self.assertRaises(IOError, read_hdf, path, 'key')
+ df.to_hdf(path, 'df')
+ store = HDFStore(path, mode='r')
+ store.close()
+ self.assertRaises(IOError, read_hdf, store, 'df')
+ with open(path, mode='r') as store:
+ self.assertRaises(NotImplementedError, read_hdf, store, 'df')
+
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
| Remove docstring indicating auto_close can be used in read_hdf.
This value is always ignored.
Also removes unreachable code.
xref #9327
| https://api.github.com/repos/pandas-dev/pandas/pulls/10330 | 2015-06-11T12:37:05Z | 2015-06-12T14:47:53Z | null | 2015-06-15T13:56:20Z |
BUG/ENH: GH10319 added higher_precision argument to rolling_mean/sum | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index ae6680852ebeb..68cf7ab397457 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -86,6 +86,8 @@ See the :ref:`documentation <basics.pipe>` for more. (:issue:`10129`)
Other enhancements
^^^^^^^^^^^^^^^^^^
+- ``rolling_mean`` and ``rolling_sum`` accept ``higher_precision`` (``True``/``False``) argument (:issue:`10319`)
+
.. _whatsnew_0162.api:
Backwards incompatible API changes
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 5f68c1ee26e87..e244379962a95 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -861,14 +861,39 @@ def min_subseq(ndarray[double_t] arr):
(s, e, m) = max_subseq(-arr)
return (s, e, -m)
+#-------------------------------------------------------------------------------
+# double-double precision
+cdef class _DoubleDouble:
+ cdef:
+ double value
+ double roundoff
+
+cdef DoubleDouble_add_double(_DoubleDouble dd, double d):
+ # Shewchuk. Adaptive Precision Floating-Point Arithmetic and
+ # Fast Robust Geometric Predicates. 1997.
+ # This is not the main result of the paper. Shewchuk credits
+ # Knuth, but I was unable to find the algorithm in his Volume 2.
+ cdef double part_sum = dd.value + d
+ cdef double d_virtual = part_sum - dd.value
+ cdef double dd_virtual = part_sum - d_virtual
+ cdef double d_roundoff = d - d_virtual
+ cdef double dd_roundoff = dd.value - dd_virtual
+ cdef double roundoff = (d_roundoff + dd_roundoff) + dd.roundoff
+ dd.value = part_sum + roundoff
+ dd.roundoff = roundoff - (dd.value - part_sum)
#-------------------------------------------------------------------------------
# Rolling sum
-def roll_sum(ndarray[double_t] input, int win, int minp):
+def roll_sum(ndarray[double_t] input, int win, int minp,
+ bint higher_precision=False):
cdef double val, prev, sum_x = 0
cdef int nobs = 0, i
cdef int N = len(input)
+ cdef _DoubleDouble sum_dd_x = _DoubleDouble()
+
+ sum_dd_x.value = 0
+ sum_dd_x.roundoff = 0
cdef ndarray[double_t] output = np.empty(N, dtype=float)
@@ -880,7 +905,10 @@ def roll_sum(ndarray[double_t] input, int win, int minp):
# Not NaN
if val == val:
nobs += 1
- sum_x += val
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, val)
+ else:
+ sum_x += val
output[i] = NaN
@@ -889,16 +917,25 @@ def roll_sum(ndarray[double_t] input, int win, int minp):
if val == val:
nobs += 1
- sum_x += val
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, val)
+ else:
+ sum_x += val
if i > win - 1:
prev = input[i - win]
if prev == prev:
- sum_x -= prev
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, -prev)
+ else:
+ sum_x -= prev
nobs -= 1
if nobs >= minp:
- output[i] = sum_x
+ if (higher_precision):
+ output[i] = sum_dd_x.value
+ else:
+ output[i] = sum_x
else:
output[i] = NaN
@@ -908,13 +945,18 @@ def roll_sum(ndarray[double_t] input, int win, int minp):
# Rolling mean
def roll_mean(ndarray[double_t] input,
- int win, int minp):
+ int win, int minp, bint higher_precision=False):
cdef:
double val, prev, result, sum_x = 0
Py_ssize_t nobs = 0, i, neg_ct = 0
Py_ssize_t N = len(input)
cdef ndarray[double_t] output = np.empty(N, dtype=float)
+ cdef _DoubleDouble sum_dd_x = _DoubleDouble()
+
+ sum_dd_x.value = 0
+ sum_dd_x.roundoff = 0
+
minp = _check_minp(win, minp, N)
for i from 0 <= i < minp - 1:
@@ -923,7 +965,10 @@ def roll_mean(ndarray[double_t] input,
# Not NaN
if val == val:
nobs += 1
- sum_x += val
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, val)
+ else:
+ sum_x += val
if signbit(val):
neg_ct += 1
@@ -934,20 +979,29 @@ def roll_mean(ndarray[double_t] input,
if val == val:
nobs += 1
- sum_x += val
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, val)
+ else:
+ sum_x += val
if signbit(val):
neg_ct += 1
if i > win - 1:
prev = input[i - win]
if prev == prev:
- sum_x -= prev
+ if (higher_precision):
+ DoubleDouble_add_double(sum_dd_x, -prev)
+ else:
+ sum_x -= prev
nobs -= 1
if signbit(prev):
neg_ct -= 1
if nobs >= minp:
- result = sum_x / nobs
+ if (higher_precision):
+ result = sum_dd_x.value / nobs
+ else:
+ result = sum_x / nobs
if neg_ct == 0 and result < 0:
# all positive
output[i] = 0
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c80cea3ab7a7d..c58a4682e1c94 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -307,6 +307,33 @@ def test_unique_label_indices():
right = np.unique(a, return_index=True)[1][1:]
tm.assert_array_equal(left, right)
+
+class TestNumericalAccuracy(tm.TestCase):
+
+ def test_roll_mean_accuracy(self):
+ # GH10319
+ values = [1, 0.0003, -0.0, -0.0]
+ values_expected = [x + 0 for x in values]
+ dates = pd.date_range('1999-02-03', '1999-02-06')
+ s = pd.Series(data=values, index=dates)
+
+ roll_mean = pd.rolling_mean(s, 1, higher_precision=True)
+ expected = pd.Series(data=values_expected, index=dates)
+
+ tm.assert_series_equal(roll_mean, expected, check_exact=True)
+
+ def test_roll_sum_accuracy(self):
+ # GH10319
+ values = [1, 0.0003, -0.0, -0.0]
+ values_expected = [x + 0 for x in values]
+ dates = pd.date_range('1999-02-03', '1999-02-06')
+ s = pd.Series(data=values, index=dates)
+
+ roll_sum = pd.rolling_sum(s, 1, higher_precision=True)
+ expected = pd.Series(data=values_expected, index=dates)
+
+ tm.assert_series_equal(roll_sum, expected, check_exact=True)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| This is not specifically for #10319, but should close it for most cases, should users pass in `higher_precision=True`. #10319 is not really a bug in the Cython code, just precision loss when you do finite-precision arithmetic naively.
```
>>> sum([0.00012456, 0.0003, -0.00012456, -0.0003])
-5.421010862427522e-20
```
This is `2**-64` and quite negligible, although it can be made much bigger with carefully chosen numbers. I think rolling_\* functions that require summation could use some more precision, since doing everything in one pass accumulates rounding error, even if the rolling window is narrow.
The only reason why [0.00012456, 0.0003, 0.0, 0.0] currently returns the 'correct' result while [0.00012456, 0.0003, -0.0, -0.0] doesn't is because of explicit negative entry counting. This is only done in rolling_mean but not rolling_sum, and the lack of symmetry doesn't seem desirable. Perhaps we could remove the counting, or add it to rolling_sum as well?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10328 | 2015-06-11T07:17:26Z | 2015-06-11T23:57:33Z | null | 2015-06-11T23:57:33Z |
Bug in to_json causing segfault with a CategoricalIndex (GH #10317) | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index feccc19d8f70b..407699f703861 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -120,6 +120,7 @@ Bug Fixes
- Bug where read_hdf store.select modifies the passed columns list when
multi-indexed (:issue:`7212`)
- Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`)
+- Bug in ``to_json`` with certain orients and a ``CategoricalIndex`` would segfault (:issue:`10317`)
- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`)
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index be9e0eccda8a1..bb0ad58a47d88 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -4,7 +4,7 @@
import os
import numpy as np
-from pandas import Series, DataFrame, DatetimeIndex, Timestamp
+from pandas import Series, DataFrame, DatetimeIndex, Timestamp, CategoricalIndex
from datetime import timedelta
import pandas as pd
read_json = pd.read_json
@@ -23,6 +23,11 @@
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
+_cat_frame = _frame.copy()
+cat = ['bah']*5 + ['bar']*5 + ['baz']*5 + ['foo']*(len(_cat_frame)-15)
+_cat_frame.index = pd.CategoricalIndex(cat,name='E')
+_cat_frame['E'] = list(reversed(cat))
+_cat_frame['sort'] = np.arange(len(_cat_frame))
_mixed_frame = _frame.copy()
@@ -48,6 +53,7 @@ def setUp(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
+ self.categorical = _cat_frame.copy()
def tearDown(self):
del self.dirpath
@@ -128,8 +134,22 @@ def _check(df):
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
- convert_axes=True, check_dtype=True, raise_ok=None):
- df = df.sort()
+ convert_axes=True, check_dtype=True, raise_ok=None,
+ sort=None):
+ if sort is not None:
+ df = df.sort(sort)
+ else:
+ df = df.sort()
+
+ # if we are not unique, then check that we are raising ValueError
+ # for the appropriate orients
+ if not df.index.is_unique and orient in ['index','columns']:
+ self.assertRaises(ValueError, lambda : df.to_json(orient=orient))
+ return
+ if not df.columns.is_unique and orient in ['index','columns','records']:
+ self.assertRaises(ValueError, lambda : df.to_json(orient=orient))
+ return
+
dfjson = df.to_json(orient=orient)
try:
@@ -141,7 +161,10 @@ def _check_orient(df, orient, dtype=None, numpy=False,
return
raise
- unser = unser.sort()
+ if sort is not None and sort in unser.columns:
+ unser = unser.sort(sort)
+ else:
+ unser = unser.sort()
if dtype is False:
check_dtype=False
@@ -160,7 +183,9 @@ def _check_orient(df, orient, dtype=None, numpy=False,
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
- unser = unser.sort()
+
+ if sort is None:
+ unser = unser.sort()
assert_almost_equal(df.values, unser.values)
else:
if convert_axes:
@@ -169,45 +194,45 @@ def _check_orient(df, orient, dtype=None, numpy=False,
assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
- def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None):
+ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None, sort=None):
# numpy=False
if convert_axes:
- _check_orient(df, "columns", dtype=dtype)
- _check_orient(df, "records", dtype=dtype)
- _check_orient(df, "split", dtype=dtype)
- _check_orient(df, "index", dtype=dtype)
- _check_orient(df, "values", dtype=dtype)
-
- _check_orient(df, "columns", dtype=dtype, convert_axes=False)
- _check_orient(df, "records", dtype=dtype, convert_axes=False)
- _check_orient(df, "split", dtype=dtype, convert_axes=False)
- _check_orient(df, "index", dtype=dtype, convert_axes=False)
- _check_orient(df, "values", dtype=dtype ,convert_axes=False)
+ _check_orient(df, "columns", dtype=dtype, sort=sort)
+ _check_orient(df, "records", dtype=dtype, sort=sort)
+ _check_orient(df, "split", dtype=dtype, sort=sort)
+ _check_orient(df, "index", dtype=dtype, sort=sort)
+ _check_orient(df, "values", dtype=dtype, sort=sort)
+
+ _check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "values", dtype=dtype ,convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "columns", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
@@ -233,6 +258,9 @@ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None):
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
+ # categorical
+ _check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
+
# empty
_check_all_orients(self.empty_frame)
diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
index dcf107d7965e6..ac415f4d5f195 100644
--- a/pandas/src/ujson/python/objToJSON.c
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -1814,7 +1814,7 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in
void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc)
{
- PyObject *obj, *exc, *toDictFunc, *tmpObj;
+ PyObject *obj, *exc, *toDictFunc, *tmpObj, *getValuesFunc;
TypeContext *pc;
PyObjectEncoder *enc;
double val;
@@ -2082,14 +2082,25 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc)
return;
}
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->newObj = PyObject_GetAttrString(obj, "values");
- pc->iterBegin = NpyArr_iterBegin;
- pc->iterEnd = NpyArr_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
+ PyObject* getValuesFunc = PyObject_GetAttrString(obj, "get_values");
+ if (getValuesFunc)
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->newObj = PyObject_CallObject(getValuesFunc, NULL);
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+
+ Py_DECREF(getValuesFunc);
+ }
+ else
+ {
+ goto INVALID;
+ }
+
return;
}
else
| Fixed GH #10317
Stole tests from #10321
| https://api.github.com/repos/pandas-dev/pandas/pulls/10322 | 2015-06-10T01:04:09Z | 2015-06-10T10:28:59Z | 2015-06-10T10:28:59Z | 2015-09-19T00:38:31Z |
BUG: Bug in to_json with certain orients and a CategoricalIndex would segfault, closes #10317 | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index feccc19d8f70b..2c954f33e26b7 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -120,7 +120,7 @@ Bug Fixes
- Bug where read_hdf store.select modifies the passed columns list when
multi-indexed (:issue:`7212`)
- Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`)
-
+- Bug in ``to_json`` with certain orients and a ``CategoricalIndex`` would segfault (:issue:`10307`)
- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`)
- Bug in ``DataFrame.quantile`` on checking that a valid axis was passed (:issue:`9543`)
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 0659e34c3f27b..4291c4544a074 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -11,7 +11,7 @@
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer
-from pandas.core.common import AbstractMethodError
+from pandas.core.common import AbstractMethodError, is_categorical_dtype
import pandas.core.common as com
loads = _json.loads
@@ -60,11 +60,32 @@ def __init__(self, obj, orient, date_format, double_precision,
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
+ self._coerce_axes()
+ self._coerce_data()
- self.is_copy = None
- self._format_axes()
+ def _coerce_axes(self):
+ for i in range(self.obj._AXIS_LEN):
+ self._coerce_axis(i)
- def _format_axes(self):
+ def _coerce_axis(self, axis):
+ """
+ Parameters
+ ----------
+ axis : axis number
+
+ if the axis needs coercion, then copy the .obj
+ and set the index
+
+ """
+
+ # GH 10317
+ # coerce CategoricalIndexes to Index dtypes
+ ax = self.obj._get_axis(axis)
+ if is_categorical_dtype(ax):
+ self.obj = self.obj.copy()
+ self.obj.set_axis(axis, np.array(ax))
+
+ def _coerce_data(self):
raise AbstractMethodError(self)
def write(self):
@@ -81,16 +102,20 @@ def write(self):
class SeriesWriter(Writer):
_default_orient = 'index'
- def _format_axes(self):
+ def _coerce_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
+ super(SeriesWriter, self)._coerce_axes()
+ def _coerce_data(self):
+ if is_categorical_dtype(self.obj):
+ self.obj = np.array(self.obj)
class FrameWriter(Writer):
_default_orient = 'columns'
- def _format_axes(self):
+ def _coerce_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
@@ -100,7 +125,16 @@ def _format_axes(self):
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
+ super(FrameWriter, self)._coerce_axes()
+
+ def _coerce_data(self):
+ is_copy = False
+ for c, col in self.obj.iteritems():
+ if is_categorical_dtype(col):
+ if not is_copy:
+ is_copy, self.obj = True, self.obj.copy()
+ self.obj[c] = np.array(col)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index be9e0eccda8a1..bb0ad58a47d88 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -4,7 +4,7 @@
import os
import numpy as np
-from pandas import Series, DataFrame, DatetimeIndex, Timestamp
+from pandas import Series, DataFrame, DatetimeIndex, Timestamp, CategoricalIndex
from datetime import timedelta
import pandas as pd
read_json = pd.read_json
@@ -23,6 +23,11 @@
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
+_cat_frame = _frame.copy()
+cat = ['bah']*5 + ['bar']*5 + ['baz']*5 + ['foo']*(len(_cat_frame)-15)
+_cat_frame.index = pd.CategoricalIndex(cat,name='E')
+_cat_frame['E'] = list(reversed(cat))
+_cat_frame['sort'] = np.arange(len(_cat_frame))
_mixed_frame = _frame.copy()
@@ -48,6 +53,7 @@ def setUp(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
+ self.categorical = _cat_frame.copy()
def tearDown(self):
del self.dirpath
@@ -128,8 +134,22 @@ def _check(df):
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
- convert_axes=True, check_dtype=True, raise_ok=None):
- df = df.sort()
+ convert_axes=True, check_dtype=True, raise_ok=None,
+ sort=None):
+ if sort is not None:
+ df = df.sort(sort)
+ else:
+ df = df.sort()
+
+ # if we are not unique, then check that we are raising ValueError
+ # for the appropriate orients
+ if not df.index.is_unique and orient in ['index','columns']:
+ self.assertRaises(ValueError, lambda : df.to_json(orient=orient))
+ return
+ if not df.columns.is_unique and orient in ['index','columns','records']:
+ self.assertRaises(ValueError, lambda : df.to_json(orient=orient))
+ return
+
dfjson = df.to_json(orient=orient)
try:
@@ -141,7 +161,10 @@ def _check_orient(df, orient, dtype=None, numpy=False,
return
raise
- unser = unser.sort()
+ if sort is not None and sort in unser.columns:
+ unser = unser.sort(sort)
+ else:
+ unser = unser.sort()
if dtype is False:
check_dtype=False
@@ -160,7 +183,9 @@ def _check_orient(df, orient, dtype=None, numpy=False,
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
- unser = unser.sort()
+
+ if sort is None:
+ unser = unser.sort()
assert_almost_equal(df.values, unser.values)
else:
if convert_axes:
@@ -169,45 +194,45 @@ def _check_orient(df, orient, dtype=None, numpy=False,
assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
- def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None):
+ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None, sort=None):
# numpy=False
if convert_axes:
- _check_orient(df, "columns", dtype=dtype)
- _check_orient(df, "records", dtype=dtype)
- _check_orient(df, "split", dtype=dtype)
- _check_orient(df, "index", dtype=dtype)
- _check_orient(df, "values", dtype=dtype)
-
- _check_orient(df, "columns", dtype=dtype, convert_axes=False)
- _check_orient(df, "records", dtype=dtype, convert_axes=False)
- _check_orient(df, "split", dtype=dtype, convert_axes=False)
- _check_orient(df, "index", dtype=dtype, convert_axes=False)
- _check_orient(df, "values", dtype=dtype ,convert_axes=False)
+ _check_orient(df, "columns", dtype=dtype, sort=sort)
+ _check_orient(df, "records", dtype=dtype, sort=sort)
+ _check_orient(df, "split", dtype=dtype, sort=sort)
+ _check_orient(df, "index", dtype=dtype, sort=sort)
+ _check_orient(df, "values", dtype=dtype, sort=sort)
+
+ _check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
+ _check_orient(df, "values", dtype=dtype ,convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
- raise_ok=raise_ok)
+ raise_ok=raise_ok, sort=sort)
_check_orient(df, "columns", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
- convert_axes=False, raise_ok=raise_ok)
+ convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
@@ -233,6 +258,9 @@ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None):
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
+ # categorical
+ _check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
+
# empty
_check_all_orients(self.empty_frame)
| xref #10317
```
In [17]: df = DataFrame({ 'A' : pd.Series([3,2,2],index=pd.Categorical([1,2,3],categories=[1,2,3])), 'B' : pd.Categorical(list('aab')) })
In [18]: df
Out[18]:
A B
1 3 a
2 2 a
3 2 b
In [19]: df.index
Out[19]: CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')
In [20]: df.dtypes
Out[20]:
A int64
B category
dtype: object
In [31]: def f(orient):
print "orient->%s" % orient
print df.to_json(orient=orient)
....:
In [32]: f('columns')
orient->columns
{"A":{"1":3,"2":2,"3":2},"B":{"1":"a","2":"a","3":"b"}}
In [33]: f('index')
orient->index
{"1":{"A":3,"B":"a"},"2":{"A":2,"B":"a"},"3":{"A":2,"B":"b"}}
In [34]: f('split')
orient->split
{"columns":["A","B"],"index":[1,2,3],"data":[[3,"a"],[2,"a"],[2,"b"]]}
In [35]: f('records')
orient->records
[{"A":3,"B":"a"},{"A":2,"B":"a"},{"A":2,"B":"b"}]
In [36]: f('values')
orient->values
[[3,"a"],[2,"a"],[2,"b"]]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10321 | 2015-06-10T00:42:11Z | 2015-06-10T01:07:41Z | null | 2015-06-10T01:07:41Z |
Update v0.16.2.txt | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index b5394ab817cdf..4041f9f059bca 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -45,7 +45,7 @@ This can be rewritten as
(df.pipe(h)
.pipe(g, arg1=1)
- .pipe(f, arg2=2)
+ .pipe(f, arg2=2, arg3=3)
)
Now both the code and the logic flow from top to bottom. Keyword arguments are next to
| Add arg3=3 to pipe example
| https://api.github.com/repos/pandas-dev/pandas/pulls/10314 | 2015-06-08T17:01:00Z | 2015-06-08T18:11:10Z | 2015-06-08T18:11:10Z | 2015-06-08T18:13:00Z |
BUG: bug in setitem where type promotion is applied to entire block | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index eba8f8af7c00e..b5fb8218cb771 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -139,6 +139,7 @@ Bug Fixes
- Bug in getting timezone data with ``dateutil`` on various platforms ( :issue:`9059`, :issue:`8639`, :issue:`9663`, :issue:`10121`)
- Bug in display datetimes with mixed frequencies uniformly; display 'ms' datetimes to the proper precision. (:issue:`10170`)
+- Bug in ``setitem`` where type pormotion is applied to entire block (:issue:`10280`)
- Bug in ``Series`` arithmetic methods may incorrectly hold names (:issue:`10068`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index c23b691e0fe3a..02309e6e4e3b5 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -204,6 +204,15 @@ def _setitem_with_indexer(self, indexer, value):
# maybe partial set
take_split_path = self.obj._is_mixed_type
+
+ # if there is only one block/type, still have to take split path
+ # unless the block is one-dimensional or it can hold the value
+ if not take_split_path and self.obj._data.blocks:
+ blk, = self.obj._data.blocks
+ if 1 < blk.ndim: # in case of dict, keys are indices
+ val = list(value.values()) if isinstance(value,dict) else value
+ take_split_path = not blk._can_hold_element(val)
+
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 24741d99691a3..3980fb6938f93 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -2330,6 +2330,31 @@ def test_setitem_dtype_upcast(self):
expected = DataFrame([{"a": 1, "c" : 'foo'}, {"a": 3, "b": 2, "c" : np.nan}])
assert_frame_equal(df,expected)
+ # GH10280
+ df = DataFrame(np.arange(6).reshape(2, 3), index=list('ab'),
+ columns=['foo', 'bar', 'baz'])
+
+ for val in [3.14, 'wxyz']:
+ left = df.copy()
+ left.loc['a', 'bar'] = val
+ right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
+ columns=['foo', 'bar', 'baz'])
+
+ assert_frame_equal(left, right)
+ self.assertTrue(com.is_integer_dtype(left['foo']))
+ self.assertTrue(com.is_integer_dtype(left['baz']))
+
+ left = DataFrame(np.arange(6).reshape(2, 3) / 10.0, index=list('ab'),
+ columns=['foo', 'bar', 'baz'])
+ left.loc['a', 'bar'] = 'wxyz'
+
+ right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
+ columns=['foo', 'bar', 'baz'])
+
+ assert_frame_equal(left, right)
+ self.assertTrue(com.is_float_dtype(left['foo']))
+ self.assertTrue(com.is_float_dtype(left['baz']))
+
def test_setitem_iloc(self):
| closes https://github.com/pydata/pandas/issues/10280
on master:
``` python
>>> df
foo bar baz
a 0 1 2
b 3 4 5
>>> df.dtypes
foo int64
bar int64
baz int64
dtype: object
>>> df.loc['a', 'bar'] = 3.14
>>> df.dtypes
foo float64
bar float64
baz float64
dtype: object
```
on branch:
``` python
>>> df.dtypes
foo int64
bar int64
baz int64
dtype: object
>>> df.loc['a', 'bar'] = 3.14
>>> df.dtypes
foo int64
bar float64
baz int64
dtype: object
>>> df
foo bar baz
a 0 3.14 2
b 3 4.00 5
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10308 | 2015-06-07T23:34:30Z | 2015-06-09T14:43:27Z | 2015-06-09T14:43:27Z | 2015-06-09T23:30:23Z |
BUG: bug in json serialization when frame has mixed types | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 9421ab0f841ac..d991e639779f9 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -117,6 +117,7 @@ Bug Fixes
- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`)
- Bug in groupby.apply aggregation for Categorical not preserving categories (:issue:`10138`)
- Bug in ``to_csv`` where ``date_format`` is ignored if the ``datetime`` is fractional (:issue:`10209`)
+- Bug in ``DataFrame.to_json`` with mixed data types (:issue:`10289`)
- Bug in cache updating when consolidating (:issue:`10264`)
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index 39f645aef0154..be9e0eccda8a1 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -333,6 +333,33 @@ def test_frame_empty_mixedtype(self):
self.assertTrue(df._is_mixed_type)
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df)
+ def test_frame_mixedtype_orient(self): # GH10289
+ vals = [[10, 1, 'foo', .1, .01],
+ [20, 2, 'bar', .2, .02],
+ [30, 3, 'baz', .3, .03],
+ [40, 4, 'qux', .4, .04]]
+
+ df = DataFrame(vals, index=list('abcd'),
+ columns=['1st', '2nd', '3rd', '4th', '5th'])
+
+ self.assertTrue(df._is_mixed_type)
+ right = df.copy()
+
+ for orient in ['split', 'index', 'columns']:
+ inp = df.to_json(orient=orient)
+ left = read_json(inp, orient=orient, convert_axes=False)
+ assert_frame_equal(left, right)
+
+ right.index = np.arange(len(df))
+ inp = df.to_json(orient='records')
+ left = read_json(inp, orient='records', convert_axes=False)
+ assert_frame_equal(left, right)
+
+ right.columns = np.arange(df.shape[1])
+ inp = df.to_json(orient='values')
+ left = read_json(inp, orient='values', convert_axes=False)
+ assert_frame_equal(left, right)
+
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
diff --git a/pandas/src/ujson/lib/ultrajson.h b/pandas/src/ujson/lib/ultrajson.h
index 4d7af3dde1f02..ba1958723fa94 100644
--- a/pandas/src/ujson/lib/ultrajson.h
+++ b/pandas/src/ujson/lib/ultrajson.h
@@ -309,5 +309,6 @@ typedef struct __JSONObjectDecoder
} JSONObjectDecoder;
EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer);
+EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t);
#endif
diff --git a/pandas/src/ujson/lib/ultrajsondec.c b/pandas/src/ujson/lib/ultrajsondec.c
index bae075b4376b1..9c2bb21612745 100644
--- a/pandas/src/ujson/lib/ultrajsondec.c
+++ b/pandas/src/ujson/lib/ultrajsondec.c
@@ -803,7 +803,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object( struct DecoderState *ds)
return NULL;
}
- if (!ds->dec->objectAddKey (ds->prv, newObj, itemName, itemValue))
+ if (!ds->dec->objectAddKey (ds->prv, newObj, itemName, itemValue))
{
ds->dec->releaseObject(ds->prv, newObj, ds->dec);
ds->dec->releaseObject(ds->prv, itemName, ds->dec);
@@ -907,7 +907,7 @@ JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuf
setlocale(LC_NUMERIC, locale);
free(locale);
}
- else
+ else
{
ret = decode_any (&ds);
}
diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
index 38ce67e0fc28e..dcf107d7965e6 100644
--- a/pandas/src/ujson/python/objToJSON.c
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -161,6 +161,8 @@ enum PANDAS_FORMAT
//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
#define PRINTMARK()
+int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
+
// import_array() compat
#if (PY_VERSION_HEX >= 0x03000000)
void *initObjToJSON(void)
@@ -835,7 +837,10 @@ char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
}
else
{
- idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
+ idx = GET_TC(tc)->iterNext != PdBlock_iterNext
+ ? npyarr->index[npyarr->stridedim - npyarr->inc] - 1
+ : npyarr->index[npyarr->stridedim];
+
NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels);
}
return NULL;
@@ -2374,7 +2379,7 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc)
}
goto INVALID;
}
- encode (tmpObj, enc, NULL, 0);
+ encode (tmpObj, (JSONObjectEncoder*) enc, NULL, 0);
Py_DECREF(tmpObj);
goto INVALID;
}
| closes https://github.com/pydata/pandas/issues/10289
on master:
``` python
>>> df
1st 2nd 3rd 4th 5th
a 10 1 foo 0.1 0.01
b 20 2 bar 0.2 0.02
c 30 3 baz 0.3 0.03
d 40 4 qux 0.4 0.04
>>> read_json(df.to_json(orient='index'), orient='index', convert_axes=False)
1st 2nd 3rd 4th 5th
a 40 4 qux 0.4 0.04
```
on branch:
``` python
>>> read_json(df.to_json(orient='index'), orient='index', convert_axes=False)
1st 2nd 3rd 4th 5th
a 10 1 foo 0.1 0.01
b 20 2 bar 0.2 0.02
c 30 3 baz 0.3 0.03
d 40 4 qux 0.4 0.04
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10306 | 2015-06-07T01:41:01Z | 2015-06-07T23:05:24Z | 2015-06-07T23:05:24Z | 2015-08-22T11:36:04Z |
BUG: PeriodIndex.order doesnt preserve freq | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 13d61957eea00..770ad8a268f11 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -612,5 +612,8 @@ Bug Fixes
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
+
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue: `9431`)
+- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
+- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ce6c60df2fd94..a9878f493251b 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2486,7 +2486,7 @@ def get_slice_bound(self, label, side, kind):
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
- slc = lib.maybe_indices_to_slice(slc.astype('i8'))
+ slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError(
"Cannot get %s slice bound for non-unique label:"
@@ -5108,7 +5108,7 @@ def _maybe_to_slice(loc):
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
- loc = lib.maybe_indices_to_slice(loc)
+ loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 4805a33e5b496..e839210fbbada 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -633,17 +633,42 @@ def convert_timestamps(ndarray values):
return out
-def maybe_indices_to_slice(ndarray[int64_t] indices):
+
+def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len):
cdef:
Py_ssize_t i, n = len(indices)
+ int k, vstart, vlast, v
+
+ if n == 0:
+ return slice(0, 0)
- if not n or indices[0] < 0:
+ vstart = indices[0]
+ if vstart < 0 or max_len <= vstart:
return indices
- for i in range(1, n):
- if indices[i] - indices[i - 1] != 1:
- return indices
- return slice(indices[0], indices[n - 1] + 1)
+ if n == 1:
+ return slice(vstart, vstart + 1)
+
+ vlast = indices[n - 1]
+ if vlast < 0 or max_len <= vlast:
+ return indices
+
+ k = indices[1] - indices[0]
+ if k == 0:
+ return indices
+ else:
+ for i in range(2, n):
+ v = indices[i]
+ if v - indices[i - 1] != k:
+ return indices
+
+ if k > 0:
+ return slice(vstart, vlast + 1, k)
+ else:
+ if vlast == 0:
+ return slice(vstart, None, k)
+ else:
+ return slice(vstart, vlast - 1, k)
def maybe_booleans_to_slice(ndarray[uint8_t] mask):
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 2699e780f0edb..15023b77694e6 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -2266,6 +2266,16 @@ def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
+
+ # representable by slice [0:2:2]
+ # self.assertRaises(KeyError, idx.slice_locs, np.nan)
+ sliced = idx.slice_locs(np.nan)
+ self.assertTrue(isinstance(sliced, tuple))
+ self.assertEqual(sliced, (0, 3))
+
+ # not representable by slice
+ idx = Float64Index([np.nan, 1, np.nan, np.nan])
+ self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index 6d9bea29cf44d..cfc98f5c20360 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -4,7 +4,7 @@
import numpy as np
import pandas as pd
-from pandas.lib import isscalar, item_from_zerodim, max_len_string_array
+import pandas.lib as lib
import pandas.util.testing as tm
from pandas.compat import u, PY2
@@ -14,19 +14,19 @@ class TestMisc(tm.TestCase):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
- self.assertTrue(max_len_string_array(arr), 3)
+ self.assertTrue(lib.max_len_string_array(arr), 3)
# unicode
arr = a.astype('U').astype(object)
- self.assertTrue(max_len_string_array(arr), 3)
+ self.assertTrue(lib.max_len_string_array(arr), 3)
# bytes for python3
arr = a.astype('S').astype(object)
- self.assertTrue(max_len_string_array(arr), 3)
+ self.assertTrue(lib.max_len_string_array(arr), 3)
# raises
tm.assertRaises(TypeError,
- lambda: max_len_string_array(arr.astype('U')))
+ lambda: lib.max_len_string_array(arr.astype('U')))
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
@@ -39,68 +39,197 @@ def test_infer_dtype_bytes(self):
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
-
-class TestIsscalar(tm.TestCase):
+ def test_maybe_indices_to_slice_left_edge(self):
+ target = np.arange(100)
+
+ # slice
+ indices = np.array([], dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ for end in [1, 2, 5, 20, 99]:
+ for step in [1, 2, 4]:
+ indices = np.arange(0, end, step, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # reverse
+ indices = indices[::-1]
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # not slice
+ for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
+ indices = np.array(case, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ def test_maybe_indices_to_slice_right_edge(self):
+ target = np.arange(100)
+
+ # slice
+ for start in [0, 2, 5, 20, 97, 98]:
+ for step in [1, 2, 4]:
+ indices = np.arange(start, 99, step, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # reverse
+ indices = indices[::-1]
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # not slice
+ indices = np.array([97, 98, 99, 100], dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ with self.assertRaises(IndexError):
+ target[indices]
+ with self.assertRaises(IndexError):
+ target[maybe_slice]
+
+ indices = np.array([100, 99, 98, 97], dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ with self.assertRaises(IndexError):
+ target[indices]
+ with self.assertRaises(IndexError):
+ target[maybe_slice]
+
+ for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
+ indices = np.array(case, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ def test_maybe_indices_to_slice_both_edges(self):
+ target = np.arange(10)
+
+ # slice
+ for step in [1, 2, 4, 5, 8, 9]:
+ indices = np.arange(0, 9, step, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # reverse
+ indices = indices[::-1]
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # not slice
+ for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
+ indices = np.array(case, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ def test_maybe_indices_to_slice_middle(self):
+ target = np.arange(100)
+
+ # slice
+ for start, end in [(2, 10), (5, 25), (65, 97)]:
+ for step in [1, 2, 4, 20]:
+ indices = np.arange(start, end, step, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # reverse
+ indices = indices[::-1]
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertTrue(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+ # not slice
+ for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
+ indices = np.array(case, dtype=np.int64)
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
+ self.assertFalse(isinstance(maybe_slice, slice))
+ self.assert_numpy_array_equal(maybe_slice, indices)
+ self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+
+
+class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
- self.assertTrue(isscalar(None))
- self.assertTrue(isscalar(True))
- self.assertTrue(isscalar(False))
- self.assertTrue(isscalar(0.))
- self.assertTrue(isscalar(np.nan))
- self.assertTrue(isscalar('foobar'))
- self.assertTrue(isscalar(b'foobar'))
- self.assertTrue(isscalar(u('efoobar')))
- self.assertTrue(isscalar(datetime(2014, 1, 1)))
- self.assertTrue(isscalar(date(2014, 1, 1)))
- self.assertTrue(isscalar(time(12, 0)))
- self.assertTrue(isscalar(timedelta(hours=1)))
- self.assertTrue(isscalar(pd.NaT))
+ self.assertTrue(lib.isscalar(None))
+ self.assertTrue(lib.isscalar(True))
+ self.assertTrue(lib.isscalar(False))
+ self.assertTrue(lib.isscalar(0.))
+ self.assertTrue(lib.isscalar(np.nan))
+ self.assertTrue(lib.isscalar('foobar'))
+ self.assertTrue(lib.isscalar(b'foobar'))
+ self.assertTrue(lib.isscalar(u('efoobar')))
+ self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
+ self.assertTrue(lib.isscalar(date(2014, 1, 1)))
+ self.assertTrue(lib.isscalar(time(12, 0)))
+ self.assertTrue(lib.isscalar(timedelta(hours=1)))
+ self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
- self.assertFalse(isscalar({}))
- self.assertFalse(isscalar([]))
- self.assertFalse(isscalar([1]))
- self.assertFalse(isscalar(()))
- self.assertFalse(isscalar((1,)))
- self.assertFalse(isscalar(slice(None)))
- self.assertFalse(isscalar(Ellipsis))
+ self.assertFalse(lib.isscalar({}))
+ self.assertFalse(lib.isscalar([]))
+ self.assertFalse(lib.isscalar([1]))
+ self.assertFalse(lib.isscalar(()))
+ self.assertFalse(lib.isscalar((1,)))
+ self.assertFalse(lib.isscalar(slice(None)))
+ self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
- self.assertTrue(isscalar(np.int64(1)))
- self.assertTrue(isscalar(np.float64(1.)))
- self.assertTrue(isscalar(np.int32(1)))
- self.assertTrue(isscalar(np.object_('foobar')))
- self.assertTrue(isscalar(np.str_('foobar')))
- self.assertTrue(isscalar(np.unicode_(u('foobar'))))
- self.assertTrue(isscalar(np.bytes_(b'foobar')))
- self.assertTrue(isscalar(np.datetime64('2014-01-01')))
- self.assertTrue(isscalar(np.timedelta64(1, 'h')))
+ self.assertTrue(lib.isscalar(np.int64(1)))
+ self.assertTrue(lib.isscalar(np.float64(1.)))
+ self.assertTrue(lib.isscalar(np.int32(1)))
+ self.assertTrue(lib.isscalar(np.object_('foobar')))
+ self.assertTrue(lib.isscalar(np.str_('foobar')))
+ self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
+ self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
+ self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
+ self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1),
np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h'))]:
- self.assertFalse(isscalar(zerodim))
- self.assertTrue(isscalar(item_from_zerodim(zerodim)))
+ self.assertFalse(lib.isscalar(zerodim))
+ self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
- self.assertFalse(isscalar(np.array([])))
- self.assertFalse(isscalar(np.array([[]])))
- self.assertFalse(isscalar(np.matrix('1; 2')))
+ self.assertFalse(lib.isscalar(np.array([])))
+ self.assertFalse(lib.isscalar(np.array([[]])))
+ self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
- self.assertTrue(isscalar(pd.Timestamp('2014-01-01')))
- self.assertTrue(isscalar(pd.Timedelta(hours=1)))
- self.assertTrue(isscalar(pd.Period('2014-01-01')))
-
- def test_isscalar_pandas_containers(self):
- self.assertFalse(isscalar(pd.Series()))
- self.assertFalse(isscalar(pd.Series([1])))
- self.assertFalse(isscalar(pd.DataFrame()))
- self.assertFalse(isscalar(pd.DataFrame([[1]])))
- self.assertFalse(isscalar(pd.Panel()))
- self.assertFalse(isscalar(pd.Panel([[[1]]])))
- self.assertFalse(isscalar(pd.Index([])))
- self.assertFalse(isscalar(pd.Index([1])))
+ self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
+ self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
+ self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
+
+ def test_lisscalar_pandas_containers(self):
+ self.assertFalse(lib.isscalar(pd.Series()))
+ self.assertFalse(lib.isscalar(pd.Series([1])))
+ self.assertFalse(lib.isscalar(pd.DataFrame()))
+ self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
+ self.assertFalse(lib.isscalar(pd.Panel()))
+ self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
+ self.assertFalse(lib.isscalar(pd.Index([])))
+ self.assertFalse(lib.isscalar(pd.Index([1])))
+
+
+if __name__ == '__main__':
+ import nose
+
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
\ No newline at end of file
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index b3d10a80e0b50..6d20b0128f164 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -163,17 +163,26 @@ def order(self, return_indexer=False, ascending=True):
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
+ attribs = self._get_attributes_dict()
+ freq = attribs['freq']
+ from pandas.tseries.period import PeriodIndex
+ if freq is not None and not isinstance(self, PeriodIndex):
+ if freq.n > 0 and not ascending:
+ freq = freq * -1
+ elif freq.n < 0 and ascending:
+ freq = freq * -1
+ attribs['freq'] = freq
+
if not ascending:
sorted_values = sorted_values[::-1]
- attribs = self._get_attributes_dict()
- attribs['freq'] = None
+
return self._simple_new(sorted_values, **attribs)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
- maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
+ maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices), len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
return super(DatetimeIndexOpsMixin, self).take(indices, axis)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 8ee6a1bc64e4e..5471bc076341b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -180,8 +180,8 @@ def _join_i8_wrapper(joinf, **kwargs):
tz = None
offset = None
- _comparables = ['name','freqstr','tz']
- _attributes = ['name','freq','tz']
+ _comparables = ['name', 'freqstr', 'tz']
+ _attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
@@ -1550,7 +1550,7 @@ def delete(self, loc):
freq = self.freq
else:
if com.is_list_like(loc):
- loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)))
+ loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 6413ce9cd5a03..bb0eda8260704 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -267,7 +267,11 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
+
+ if freq is None:
+ raise ValueError('freq not specified')
result.freq = freq
+
result._reset_identity()
return result
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index f1871e78e21a1..d7172dd304b6b 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -126,8 +126,8 @@ def _join_i8_wrapper(joinf, **kwargs):
_engine_type = _index.TimedeltaEngine
- _comparables = ['name','freq']
- _attributes = ['name','freq']
+ _comparables = ['name', 'freq']
+ _attributes = ['name', 'freq']
_is_numeric_dtype = True
freq = None
@@ -853,7 +853,7 @@ def delete(self, loc):
freq = self.freq
else:
if com.is_list_like(loc):
- loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)))
+ loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 1b38f51ed4f71..3d9e80f351c44 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -8,6 +8,7 @@
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
+import pandas.tseries.offsets as offsets
import pandas.tslib as tslib
import nose
@@ -297,6 +298,72 @@ def test_nonunique_contains(self):
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
+ def test_order(self):
+ # with freq
+ idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx')
+ idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
+ freq='H', tz='Asia/Tokyo', name='tzidx')
+
+ for idx in [idx1, idx2]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, idx)
+ self.assertEqual(ordered.freq, idx.freq)
+
+ ordered = idx.order(ascending=False)
+ expected = idx[::-1]
+ self.assert_index_equal(ordered, expected)
+ self.assertEqual(ordered.freq, expected.freq)
+ self.assertEqual(ordered.freq.n, -1)
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, idx)
+ self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
+ self.assertEqual(ordered.freq, idx.freq)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ expected = idx[::-1]
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
+ self.assertEqual(ordered.freq, expected.freq)
+ self.assertEqual(ordered.freq.n, -1)
+
+ # without freq
+ idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
+ '2011-01-02', '2011-01-01'], name='idx1')
+ exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
+ '2011-01-03', '2011-01-05'], name='idx1')
+
+ idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
+ '2011-01-02', '2011-01-01'],
+ tz='Asia/Tokyo', name='idx2')
+ exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
+ '2011-01-03', '2011-01-05'],
+ tz='Asia/Tokyo', name='idx2')
+
+ idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
+ '2011-01-02', pd.NaT], name='idx3')
+ exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
+ '2011-01-05'], name='idx3')
+
+ for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, expected)
+ self.assertIsNone(ordered.freq)
+
+ ordered = idx.order(ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assertIsNone(ordered.freq)
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
+ self.assertIsNone(ordered.freq)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
+ self.assertIsNone(ordered.freq)
+
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
@@ -318,7 +385,7 @@ def test_getitem(self):
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
- expected = pd.date_range('2011-01-12', '2011-01-25', freq='3D',
+ expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
@@ -343,6 +410,45 @@ def test_drop_duplicates_metadata(self):
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
+ def test_take(self):
+ #GH 10295
+ idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+ idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
+
+ for idx in [idx1, idx2]:
+ result = idx.take([0])
+ self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
+
+ result = idx.take([0, 1, 2])
+ expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([0, 2, 4])
+ expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([7, 4, 1])
+ expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([3, 2, 5])
+ expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
+ freq=None, tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertIsNone(result.freq)
+
+ result = idx.take([-3, 2, 5])
+ expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
+ freq=None, tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertIsNone(result.freq)
+
class TestTimedeltaIndexOps(Ops):
@@ -762,7 +868,7 @@ def test_value_counts_unique(self):
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
- '1 days 08:00:00', '1 days 08:00:00', pd.NaT])
+ '1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
@@ -788,6 +894,66 @@ def test_unknown_attribute(self):
self.assertNotIn('foo',ts.__dict__.keys())
self.assertRaises(AttributeError,lambda : ts.foo)
+ def test_order(self):
+ #GH 10295
+ idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx')
+ idx2 = TimedeltaIndex(['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
+
+ for idx in [idx1, idx2]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, idx)
+ self.assertEqual(ordered.freq, idx.freq)
+
+ ordered = idx.order(ascending=False)
+ expected = idx[::-1]
+ self.assert_index_equal(ordered, expected)
+ self.assertEqual(ordered.freq, expected.freq)
+ self.assertEqual(ordered.freq.n, -1)
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, idx)
+ self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
+ self.assertEqual(ordered.freq, idx.freq)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, idx[::-1])
+ self.assertEqual(ordered.freq, expected.freq)
+ self.assertEqual(ordered.freq.n, -1)
+
+ idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
+ '2 hour ', '1 hour'], name='idx1')
+ exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
+ '3 hour', '5 hour'], name='idx1')
+
+ idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
+ '2 day', '1 day'], name='idx2')
+ exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
+ '3 day', '5 day'], name='idx2')
+
+ idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
+ '2 minute', pd.NaT], name='idx3')
+ exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
+ '5 minute'], name='idx3')
+
+ for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, expected)
+ self.assertIsNone(ordered.freq)
+
+ ordered = idx.order(ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assertIsNone(ordered.freq)
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
+ self.assertIsNone(ordered.freq)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
+ self.assertIsNone(ordered.freq)
+
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
@@ -806,7 +972,7 @@ def test_getitem(self):
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
- expected = pd.timedelta_range('12 day', '25 day', freq='3D', name='idx')
+ expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
@@ -829,6 +995,42 @@ def test_drop_duplicates_metadata(self):
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
+ def test_take(self):
+ #GH 10295
+ idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
+
+ for idx in [idx1]:
+ result = idx.take([0])
+ self.assertEqual(result, pd.Timedelta('1 day'))
+
+ result = idx.take([-1])
+ self.assertEqual(result, pd.Timedelta('31 day'))
+
+ result = idx.take([0, 1, 2])
+ expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([0, 2, 4])
+ expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([7, 4, 1])
+ expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([3, 2, 5])
+ expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertIsNone(result.freq)
+
+ result = idx.take([-3, 2, 5])
+ expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertIsNone(result.freq)
+
class TestPeriodIndexOps(Ops):
@@ -1268,6 +1470,209 @@ def test_drop_duplicates_metadata(self):
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
+ def test_order_compat(self):
+
+ def _check_freq(index, expected_index):
+ if isinstance(index, PeriodIndex):
+ self.assertEqual(index.freq, expected_index.freq)
+
+ pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
+ # for compatibility check
+ iidx = Index([2011, 2012, 2013], name='idx')
+ for idx in [pidx, iidx]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, idx)
+ _check_freq(ordered, idx)
+
+ ordered = idx.order(ascending=False)
+ self.assert_index_equal(ordered, idx[::-1])
+ _check_freq(ordered, idx[::-1])
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, idx)
+ self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
+ _check_freq(ordered, idx)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, idx[::-1])
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
+ _check_freq(ordered, idx[::-1])
+
+ pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A')
+ pexpected = PeriodIndex(['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
+ # for compatibility check
+ iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
+ iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
+ for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, expected)
+ _check_freq(ordered, idx)
+
+ ordered = idx.order(ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ _check_freq(ordered, idx)
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
+ _check_freq(ordered, idx)
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
+ _check_freq(ordered, idx)
+
+ pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D')
+
+ result = pidx.order()
+ expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, 'D')
+
+ result = pidx.order(ascending=False)
+ expected = PeriodIndex(['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, 'D')
+
+ def test_order(self):
+ idx1 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
+ freq='D', name='idx')
+
+ for idx in [idx1]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, idx)
+ self.assertEqual(ordered.freq, idx.freq)
+
+ ordered = idx.order(ascending=False)
+ expected = idx[::-1]
+ self.assert_index_equal(ordered, expected)
+ self.assertEqual(ordered.freq, 'D')
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, idx)
+ self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
+ self.assertEqual(ordered.freq, 'D')
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ expected = idx[::-1]
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
+ self.assertEqual(ordered.freq, 'D')
+
+ idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
+ '2011-01-02', '2011-01-01'], freq='D', name='idx1')
+ exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
+ '2011-01-03', '2011-01-05'], freq='D', name='idx1')
+
+ idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
+ '2011-01-02', '2011-01-01'],
+ freq='D', name='idx2')
+ exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
+ '2011-01-03', '2011-01-05'],
+ freq='D', name='idx2')
+
+ idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
+ '2011-01-02', pd.NaT], freq='D', name='idx3')
+ exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
+ '2011-01-05'], freq='D', name='idx3')
+
+ for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
+ ordered = idx.order()
+ self.assert_index_equal(ordered, expected)
+ self.assertEqual(ordered.freq, 'D')
+
+ ordered = idx.order(ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assertEqual(ordered.freq, 'D')
+
+ ordered, indexer = idx.order(return_indexer=True)
+ self.assert_index_equal(ordered, expected)
+ self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
+ self.assertEqual(ordered.freq, 'D')
+
+ ordered, indexer = idx.order(return_indexer=True, ascending=False)
+ self.assert_index_equal(ordered, expected[::-1])
+ self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
+ self.assertEqual(ordered.freq, 'D')
+
+ def test_getitem(self):
+ idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+
+ for idx in [idx1]:
+ result = idx[0]
+ self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
+
+ result = idx[-1]
+ self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
+
+ result = idx[0:5]
+ expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
+ name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[0:10:2]
+ expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
+ '2011-01-07', '2011-01-09'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[-20:-5:3]
+ expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18',
+ '2011-01-21', '2011-01-24'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[4::-1]
+ expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
+ '2011-01-02', '2011-01-01'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ def test_take(self):
+ #GH 10295
+ idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+
+ for idx in [idx1]:
+ result = idx.take([0])
+ self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
+
+ result = idx.take([5])
+ self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
+
+ result = idx.take([0, 1, 2])
+ expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
+ name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([0, 2, 4])
+ expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([7, 4, 1])
+ expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([3, 2, 5])
+ expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx.take([-3, 2, 5])
+ expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
+ freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
if __name__ == '__main__':
import nose
| Closes #10295 (This is based on #10292). Changed `lib.maybe_indices_to_slice` to detect slice with `step` and also check upper bound of index.
However, above change affects to the behavior of `Index.slice_locs`. Internally, the behavoir of `Index.get_slice_bound` seems to assume `lib.maybe_indices_to_slice` returns slice only when its step is "+1". Options are:
1. Change `Index.get_slice_bound` to meet updated `lib.maybe_indices_to_slice` logic.
2. Leave `lib.maybe_indices_to_slice` as it is. Let `DatetimeIndex` to use separate logic, maybe `lib.maybe_indices_to_slice_with_step`
To avoid any unexpected result, I think option 2 is preferable?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10305 | 2015-06-06T22:40:59Z | 2015-08-08T14:38:47Z | 2015-08-08T14:38:46Z | 2015-08-08T14:38:52Z |
BUG: Categorical.remove_categories(np.nan) fails when underlying dtype is float | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index c219818a62631..feccc19d8f70b 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -163,6 +163,8 @@ Bug Fixes
- Bug in GroupBy.get_group raises ValueError when group key contains NaT (:issue:`6992`)
- Bug in ``SparseSeries`` constructor ignores input data name (:issue:`10258`)
+- Bug in ``Categorical.remove_categories`` causing a ValueError when removing the ``NaN`` category if underlying dtype is floating-point (:issue:`10156`)
+
- Bug where infer_freq infers timerule (WOM-5XXX) unsupported by to_offset (:issue:`9425`)
- Bug in ``DataFrame.to_hdf()`` where table format would raise a seemingly unrelated error for invalid (non-string) column names. This is now explicitly forbidden. (:issue:`9057`)
- Bug to handle masking empty ``DataFrame``(:issue:`10126`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index c5cd8390359dc..74007d0127e4f 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -749,11 +749,19 @@ def remove_categories(self, removals, inplace=False):
"""
if not is_list_like(removals):
removals = [removals]
- removals = set(list(removals))
- not_included = removals - set(self._categories)
+
+ removal_set = set(list(removals))
+ not_included = removal_set - set(self._categories)
+ new_categories = [ c for c in self._categories if c not in removal_set ]
+
+ # GH 10156
+ if any(isnull(removals)):
+ not_included = [x for x in not_included if notnull(x)]
+ new_categories = [x for x in new_categories if notnull(x)]
+
if len(not_included) != 0:
raise ValueError("removals must all be in old categories: %s" % str(not_included))
- new_categories = [ c for c in self._categories if c not in removals ]
+
return self.set_categories(new_categories, ordered=self.ordered, rename=False,
inplace=inplace)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index bec688db99114..bc9279a8d1529 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -854,6 +854,28 @@ def test_nan_handling(self):
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,2,-1,0]))
+ # Remove null categories (GH 10156)
+ cases = [
+ ([1.0, 2.0, np.nan], [1.0, 2.0]),
+ (['a', 'b', None], ['a', 'b']),
+ ([pd.Timestamp('2012-05-01'), pd.NaT], [pd.Timestamp('2012-05-01')])
+ ]
+
+ null_values = [np.nan, None, pd.NaT]
+
+ for with_null, without in cases:
+ base = Categorical([], with_null)
+ expected = Categorical([], without)
+
+ for nullval in null_values:
+ result = base.remove_categories(nullval)
+ self.assert_categorical_equal(result, expected)
+
+ # Different null values are indistinguishable
+ for i, j in [(0, 1), (0, 2), (1, 2)]:
+ nulls = [null_values[i], null_values[j]]
+ self.assertRaises(ValueError, lambda: Categorical([], categories=nulls))
+
def test_isnull(self):
exp = np.array([False, False, True])
| Fixes GH #10156. This also makes different null values indistinguishable inside of remove_categories, but they're already indistinguishable in most other contexts:
``` .python
>>> pd.Categorical([], categories=[np.nan, None])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/categorical.py", line 289, in __init__
categories = self._validate_categories(categories)
File "pandas/core/categorical.py", line 447, in _validate_categories
raise ValueError('Categorical categories must be unique')
ValueError: Categorical categories must be unique
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10304 | 2015-06-06T18:53:16Z | 2015-06-09T23:45:58Z | 2015-06-09T23:45:58Z | 2015-06-10T13:40:04Z |
ENH: added rsplit to StringMethods | diff --git a/doc/source/api.rst b/doc/source/api.rst
index f5ba03afc9f19..5b6e536af0501 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -563,6 +563,7 @@ strings and apply several methods to it. These can be acccessed like
Series.str.slice
Series.str.slice_replace
Series.str.split
+ Series.str.rsplit
Series.str.startswith
Series.str.strip
Series.str.swapcase
diff --git a/doc/source/text.rst b/doc/source/text.rst
index d40445d8490f7..9bbb152f5a69b 100644
--- a/doc/source/text.rst
+++ b/doc/source/text.rst
@@ -88,6 +88,19 @@ Easy to expand this to return a DataFrame using ``expand``.
s2.str.split('_', expand=True)
+It is also possible to limit the number of splits:
+
+.. ipython:: python
+
+ s2.str.split('_', expand=True, n=1)
+
+``rsplit`` is similar to ``split`` except it works in the reverse direction,
+i.e., from the end of the string to the beginning of the string:
+
+.. ipython:: python
+
+ s2.str.rsplit('_', expand=True, n=1)
+
Methods like ``replace`` and ``findall`` take `regular expressions
<https://docs.python.org/2/library/re.html>`__, too:
@@ -239,6 +252,7 @@ Method Summary
:meth:`~Series.str.cat`,Concatenate strings
:meth:`~Series.str.split`,Split strings on delimiter
+ :meth:`~Series.str.rsplit`,Split strings on delimiter working from the end of the string
:meth:`~Series.str.get`,Index into each element (retrieve i-th element)
:meth:`~Series.str.join`,Join strings in each element of the Series with passed separator
:meth:`~Series.str.contains`,Return boolean array if each string contains pattern/regex
diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 9421ab0f841ac..f9214c175ca24 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -79,6 +79,8 @@ See the :ref:`documentation <basics.pipe>` for more. (:issue:`10129`)
.. _magrittr: https://github.com/smbache/magrittr
.. _R: http://www.r-project.org
+- Added `rsplit` to Index/Series StringMethods (:issue:`10303`)
+
.. _whatsnew_0162.enhancements.other:
Other enhancements
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 78ae4fba02033..59894d0800895 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -734,6 +734,35 @@ def str_split(arr, pat=None, n=None):
return res
+def str_rsplit(arr, pat=None, n=None):
+ """
+ Split each string in the Series/Index by the given delimiter
+ string, starting at the end of the string and working to the front.
+ Equivalent to :meth:`str.rsplit`.
+
+ .. versionadded:: 0.16.2
+
+ Parameters
+ ----------
+ pat : string, default None
+ Separator to split on. If None, splits on whitespace
+ n : int, default -1 (all)
+ None, 0 and -1 will be interpreted as return all splits
+ expand : bool, default False
+ * If True, return DataFrame/MultiIndex expanding dimensionality.
+ * If False, return Series/Index.
+
+ Returns
+ -------
+ split : Series/Index or DataFrame/MultiIndex of objects
+ """
+ if n is None or n == 0:
+ n = -1
+ f = lambda x: x.rsplit(pat, n)
+ res = _na_map(f, arr)
+ return res
+
+
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
@@ -1115,6 +1144,11 @@ def split(self, pat=None, n=-1, expand=False):
result = str_split(self.series, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
+ @copy(str_rsplit)
+ def rsplit(self, pat=None, n=-1, expand=False):
+ result = str_rsplit(self.series, pat, n=n)
+ return self._wrap_result_expand(result, expand=expand)
+
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index b0d8d89d65cf2..a66410320e816 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -676,6 +676,7 @@ def test_empty_str_methods(self):
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_list, empty.str.split('a'))
+ tm.assert_series_equal(empty_list, empty.str.rsplit('a'))
tm.assert_series_equal(empty_list, empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_list, empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
@@ -1212,15 +1213,15 @@ def test_split(self):
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(),
None, 1, 2.])
- rs = mixed.str.split('_')
- xp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
+ result = mixed.str.split('_')
+ exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
NA, NA, NA])
- tm.assert_isinstance(rs, Series)
- tm.assert_almost_equal(rs, xp)
+ tm.assert_isinstance(result, Series)
+ tm.assert_almost_equal(result, exp)
- rs = mixed.str.split('_', expand=False)
- tm.assert_isinstance(rs, Series)
- tm.assert_almost_equal(rs, xp)
+ result = mixed.str.split('_', expand=False)
+ tm.assert_isinstance(result, Series)
+ tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
@@ -1234,12 +1235,75 @@ def test_split(self):
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
+ # regex split
+ values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
+ result = values.str.split('[,_]')
+ exp = Series([[u('a'), u('b'), u('c')],
+ [u('c'), u('d'), u('e')], NA,
+ [u('f'), u('g'), u('h')]])
+ tm.assert_series_equal(result, exp)
+
+ def test_rsplit(self):
+ values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
+ result = values.str.rsplit('_')
+ exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
+ tm.assert_series_equal(result, exp)
+
+ # more than one char
+ values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
+ result = values.str.rsplit('__')
+ tm.assert_series_equal(result, exp)
+
+ result = values.str.rsplit('__', expand=False)
+ tm.assert_series_equal(result, exp)
+
+ # mixed
+ mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(),
+ None, 1, 2.])
+ result = mixed.str.rsplit('_')
+ exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
+ NA, NA, NA])
+ tm.assert_isinstance(result, Series)
+ tm.assert_almost_equal(result, exp)
+
+ result = mixed.str.rsplit('_', expand=False)
+ tm.assert_isinstance(result, Series)
+ tm.assert_almost_equal(result, exp)
+
+ # unicode
+ values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
+ result = values.str.rsplit('_')
+ exp = Series([[u('a'), u('b'), u('c')],
+ [u('c'), u('d'), u('e')], NA,
+ [u('f'), u('g'), u('h')]])
+ tm.assert_series_equal(result, exp)
+
+ result = values.str.rsplit('_', expand=False)
+ tm.assert_series_equal(result, exp)
+
+ # regex split is not supported by rsplit
+ values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
+ result = values.str.rsplit('[,_]')
+ exp = Series([[u('a,b_c')],
+ [u('c_d,e')],
+ NA,
+ [u('f,g,h')]])
+ tm.assert_series_equal(result, exp)
+
+ # setting max number of splits, make sure it's from reverse
+ values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
+ result = values.str.rsplit('_', n=1)
+ exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
+ tm.assert_series_equal(result, exp)
+
def test_split_noargs(self):
# #1859
s = Series(['Wes McKinney', 'Travis Oliphant'])
-
result = s.str.split()
- self.assertEqual(result[1], ['Travis', 'Oliphant'])
+ expected = ['Travis', 'Oliphant']
+ self.assertEqual(result[1], expected)
+ result = s.str.rsplit()
+ self.assertEqual(result[1], expected)
def test_split_maxsplit(self):
# re.split 0, str.split -1
@@ -1348,6 +1412,55 @@ def test_split_to_multiindex_expand(self):
with tm.assertRaisesRegexp(ValueError, "expand must be"):
idx.str.split('_', return_type="some_invalid_type")
+ def test_rsplit_to_dataframe_expand(self):
+ s = Series(['nosplit', 'alsonosplit'])
+ result = s.str.rsplit('_', expand=True)
+ exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
+ tm.assert_frame_equal(result, exp)
+
+ s = Series(['some_equal_splits', 'with_no_nans'])
+ result = s.str.rsplit('_', expand=True)
+ exp = DataFrame({0: ['some', 'with'], 1: ['equal', 'no'],
+ 2: ['splits', 'nans']})
+ tm.assert_frame_equal(result, exp)
+
+ result = s.str.rsplit('_', expand=True, n=2)
+ exp = DataFrame({0: ['some', 'with'], 1: ['equal', 'no'],
+ 2: ['splits', 'nans']})
+ tm.assert_frame_equal(result, exp)
+
+ result = s.str.rsplit('_', expand=True, n=1)
+ exp = DataFrame({0: ['some_equal', 'with_no'],
+ 1: ['splits', 'nans']})
+ tm.assert_frame_equal(result, exp)
+
+ s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
+ result = s.str.rsplit('_', expand=True)
+ exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
+ index=['preserve', 'me'])
+ tm.assert_frame_equal(result, exp)
+
+ def test_rsplit_to_multiindex_expand(self):
+ idx = Index(['nosplit', 'alsonosplit'])
+ result = idx.str.rsplit('_', expand=True)
+ exp = Index([np.array(['nosplit']), np.array(['alsonosplit'])])
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.nlevels, 1)
+
+ idx = Index(['some_equal_splits', 'with_no_nans'])
+ result = idx.str.rsplit('_', expand=True)
+ exp = MultiIndex.from_tuples([('some', 'equal', 'splits'),
+ ('with', 'no', 'nans')])
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.nlevels, 3)
+
+ idx = Index(['some_equal_splits', 'with_no_nans'])
+ result = idx.str.rsplit('_', expand=True, n=1)
+ exp = MultiIndex.from_tuples([('some_equal', 'splits'),
+ ('with_no', 'nans')])
+ tm.assert_index_equal(result, exp)
+ self.assertEqual(result.nlevels, 2)
+
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
| as a part of https://github.com/pydata/pandas/issues/9111
cc @sinhrks
| https://api.github.com/repos/pandas-dev/pandas/pulls/10303 | 2015-06-06T18:26:34Z | 2015-06-09T00:38:32Z | 2015-06-09T00:38:32Z | 2015-06-09T00:42:41Z |
BUG: read_csv does not set index name on an empty DataFrame | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 9421ab0f841ac..399402d243be7 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -142,6 +142,7 @@ Bug Fixes
- Bug in `plot` not defaulting to matplotlib `axes.grid` setting (:issue:`9792`)
- Bug in ``Series.align`` resets ``name`` when ``fill_value`` is specified (:issue:`10067`)
+- Bug in ``read_csv`` causing index name not to be set on an empty DataFrame (:issue:`10184`)
- Bug in ``SparseSeries.abs`` resets ``name`` (:issue:`10241`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 59ecb29146315..ce8ac1c93e596 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1170,7 +1170,9 @@ def read(self, nrows=None):
data = self._reader.read(nrows)
except StopIteration:
if nrows is None:
- return None, self.names, {}
+ return _get_empty_meta(self.orig_names,
+ self.index_col,
+ self.index_names)
else:
raise
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 1177149e7efa6..a8a5de38f257c 100755
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2294,6 +2294,13 @@ def test_chunk_begins_with_newline_whitespace(self):
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
+ def test_empty_with_index(self):
+ # GH 10184
+ data = 'x,y'
+ result = self.read_csv(StringIO(data), index_col=0)
+ expected = DataFrame([], columns=['y'], index=Index([], name='x'))
+ tm.assert_frame_equal(result, expected)
+
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
| Fixes GH #10184.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10302 | 2015-06-06T16:57:35Z | 2015-06-07T23:06:15Z | 2015-06-07T23:06:15Z | 2015-09-19T00:38:30Z |
DOC: #7828 Add json_normalize to api docs. | diff --git a/doc/source/api.rst b/doc/source/api.rst
index f5ba03afc9f19..29231f32c649a 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -47,11 +47,15 @@ Excel
JSON
~~~~
+.. currentmodule:: pandas.io.json
.. autosummary::
:toctree: generated/
read_json
+ json_normalize
+
+.. currentmodule:: pandas
HTML
~~~~
| closes #7828
Is this all we need?
| https://api.github.com/repos/pandas-dev/pandas/pulls/10301 | 2015-06-06T14:45:10Z | 2015-06-07T22:24:16Z | 2015-06-07T22:24:15Z | 2015-06-08T12:04:03Z |
ENH: #8750 add Series support for to_html and _repr_html_ | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 6bee0a1ceafb8..60e0e34c11db8 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -729,6 +729,7 @@ Serialization / IO / Conversion
Series.to_json
Series.to_sparse
Series.to_dense
+ Series.to_html
Series.to_string
Series.to_clipboard
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5106225cdd3c9..da9292a8f4758 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -952,6 +952,63 @@ def __unicode__(self):
return result
+ def _repr_footer(self):
+
+ namestr = u("Name: %s, ") % com.pprint_thing(
+ self.name) if self.name is not None else ""
+
+ # time series
+ if self.is_time_series:
+ if self.index.freq is not None:
+ freqstr = u('Freq: %s, ') % self.index.freqstr
+ else:
+ freqstr = u('')
+
+ return u('%s%sLength: %d') % (freqstr, namestr, len(self))
+
+ # Categorical
+ if com.is_categorical_dtype(self.dtype):
+ level_info = self.values._repr_categories_info()
+ return u('%sLength: %d, dtype: %s\n%s') % (namestr,
+ len(self),
+ str(self.dtype.name),
+ level_info)
+
+ # reg series
+ return u('%sLength: %d, dtype: %s') % (namestr,
+ len(self),
+ str(self.dtype.name))
+
+ def _repr_html_(self, *args, **kwargs):
+ df = self.to_frame()
+ if self.name is None:
+ df.columns = ['']
+ return df._repr_html_(*args, **kwargs)
+
+ def to_html(self, *args, **kwargs):
+ """
+ Render a Series as an HTML table.
+
+ `to_html`-specific options:
+
+ bold_rows : boolean, default True
+ Make the row labels bold in the output
+ classes : str or list or tuple, default None
+ CSS class(es) to apply to the resulting html table
+ escape : boolean, default True
+ Convert the characters <, >, and & to HTML-safe sequences.=
+ max_rows : int, optional
+ Maximum number of rows to show before truncating. If None, show
+ all.
+ max_cols : int, optional
+ Maximum number of columns to show before truncating. If None, show
+ all.
+ """
+ df = self.to_frame()
+ if self.name is None:
+ df.columns = ['']
+ return df.to_html(*args, **kwargs)
+
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
length=False, dtype=False, name=False, max_rows=None):
"""
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 22555a84c55de..5886d6452f9e7 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -3574,6 +3574,69 @@ def test_to_string_header(self):
exp = '0 0\n ..\n9 9'
self.assertEqual(res, exp)
+ def test_to_html(self):
+ expected_template = '''\
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>{column_name}</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ </tbody>
+</table>'''
+ column_representations = {
+ 'foo': 'foo',
+ None: '',
+ }
+ for series_name, column_name in column_representations.items():
+ s = pd.Series(range(2), dtype='int64', name=series_name)
+ result = s.to_html()
+ expected = expected_template.format(column_name=column_name)
+ self.assertEqual(result, expected)
+
+ def test_repr_html(self):
+ s = pd.Series(range(5), dtype='int64', name='foo')
+ self.assertTrue(hasattr(s, '_repr_html_'))
+ fmt.set_option('display.max_rows', 2)
+ result = s._repr_html_()
+ expected = u'''\
+<div{div_style}>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>foo</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>...</th>
+ <td>...</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ </tbody>
+</table>
+<p>5 rows \xd7 1 columns</p>
+</div>'''.format(div_style=div_style)
+ self.assertEqual(result, expected)
+
class TestEngFormatter(tm.TestCase):
_multiprocess_can_split_ = True
| closes #5563
| https://api.github.com/repos/pandas-dev/pandas/pulls/10300 | 2015-06-06T09:07:44Z | 2016-07-21T16:16:11Z | null | 2023-05-11T01:13:01Z |
BUG: plotting grouped_hist with a single row frame #10214 | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 9421ab0f841ac..c3782402e499c 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -111,6 +111,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug where ``hist`` raises an error when a one row Series was given (:issue:`10214`)
- Bug where read_hdf store.select modifies the passed columns list when
multi-indexed (:issue:`7212`)
- Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 433645448fe2b..2c8123244c53c 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -3896,6 +3896,14 @@ def test_plotting_with_float_index_works(self):
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
+ def test_hist_single_row(self):
+ # GH10214
+ bins = np.arange(80, 100 + 2, 1)
+ df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
+ df["Mark"].hist(by=df["ByCol"], bins=bins)
+ df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
+ df["Mark"].hist(by=df["ByCol"], bins=bins)
+
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 76685e2589012..35893b9de8e75 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2846,8 +2846,9 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
- if axes.ndim == 1 and len(axes) == 1:
- return axes[0]
+ if hasattr(axes, 'ndim'):
+ if axes.ndim == 1 and len(axes) == 1:
+ return axes[0]
return axes
| closes #10214
| https://api.github.com/repos/pandas-dev/pandas/pulls/10298 | 2015-06-06T05:29:13Z | 2015-06-09T11:54:08Z | null | 2015-06-09T11:54:08Z |
ENH: #3335 Pivot table support for setting name of margins column. | diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 89fe9463282b6..c5f090ea26271 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -1,6 +1,5 @@
# pylint: disable=E1103
-import warnings
from pandas import Series, DataFrame
from pandas.core.index import MultiIndex, Index
@@ -8,13 +7,16 @@
from pandas.tools.merge import concat
from pandas.tools.util import cartesian_product
from pandas.compat import range, lrange, zip
-from pandas.util.decorators import deprecate_kwarg
from pandas import compat
import pandas.core.common as com
import numpy as np
+DEFAULT_MARGIN_COLUMN_NAME = 'All'
+
+
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
- fill_value=None, margins=False, dropna=True):
+ fill_value=None, margins=False, dropna=True,
+ margins_column=DEFAULT_MARGIN_COLUMN_NAME):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in the
pivot table will be stored in MultiIndex objects (hierarchical indexes) on
@@ -40,6 +42,9 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
+ margins_column : string, default 'All'
+ Name of the row / column that will contain the totals
+ when margins is True.
Examples
--------
@@ -127,7 +132,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels))
table = table.reindex_axis(m, axis=1)
except AttributeError:
- pass # it's a single level or a series
+ pass # it's a single level or a series
if isinstance(table, DataFrame):
if isinstance(table.columns, MultiIndex):
@@ -140,7 +145,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
if margins:
table = _add_margins(table, data, values, rows=index,
- cols=columns, aggfunc=aggfunc)
+ cols=columns, aggfunc=aggfunc,
+ margins_column=margins_column)
# discard the top level
if values_passed and not values_multi:
@@ -155,28 +161,50 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
DataFrame.pivot_table = pivot_table
-def _add_margins(table, data, values, rows, cols, aggfunc):
+def _add_margins(table, data, values, rows, cols, aggfunc,
+ margins_column=DEFAULT_MARGIN_COLUMN_NAME):
+ exception_message = 'Must choose different value for margins_column'
+ for level in table.index.names:
+ if margins_column in table.index.get_level_values(level):
+ raise ValueError(exception_message)
+ # could be passed a Series object with no 'columns'
+ if hasattr(table, 'columns'):
+ for level in table.columns.names[1:]:
+ if margins_column in table.columns.get_level_values(level):
+ raise ValueError(exception_message)
- grand_margin = _compute_grand_margin(data, values, aggfunc)
+ grand_margin = _compute_grand_margin(data, values, aggfunc, margins_column)
if not values and isinstance(table, Series):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
- row_key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
- return table.append(Series({row_key: grand_margin['All']}))
+
+ if len(rows) > 1:
+ row_key = (margins_column,) + ('',) * (len(rows) - 1)
+ else:
+ row_key = margins_column
+
+ return table.append(Series({row_key: grand_margin[margins_column]}))
if values:
- marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin)
+ marginal_result_set = _generate_marginal_results(table, data, values,
+ rows, cols, aggfunc,
+ grand_margin,
+ margins_column)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
- marginal_result_set = _generate_marginal_results_without_values(table, data, rows, cols, aggfunc)
+ marginal_result_set = _generate_marginal_results_without_values(
+ table, data, rows, cols, aggfunc, margins_column)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
- key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
+ if len(rows) > 1:
+ key = (margins_column,) + ('',) * (len(rows) - 1)
+ else:
+ key = margins_column
row_margin = row_margin.reindex(result.columns)
# populate grand margin
@@ -195,7 +223,8 @@ def _add_margins(table, data, values, rows, cols, aggfunc):
return result
-def _compute_grand_margin(data, values, aggfunc):
+def _compute_grand_margin(data, values, aggfunc,
+ margins_column=DEFAULT_MARGIN_COLUMN_NAME):
if values:
grand_margin = {}
@@ -214,17 +243,19 @@ def _compute_grand_margin(data, values, aggfunc):
pass
return grand_margin
else:
- return {'All': aggfunc(data.index)}
+ return {margins_column: aggfunc(data.index)}
-def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin):
+def _generate_marginal_results(table, data, values, rows, cols, aggfunc,
+ grand_margin,
+ margins_column=DEFAULT_MARGIN_COLUMN_NAME):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
- return (key, 'All') + ('',) * (len(cols) - 1)
+ return (key, margins_column) + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows).agg(aggfunc)
@@ -269,15 +300,17 @@ def _all_key(key):
return result, margin_keys, row_margin
-def _generate_marginal_results_without_values(table, data, rows, cols, aggfunc):
+def _generate_marginal_results_without_values(
+ table, data, rows, cols, aggfunc,
+ margins_column=DEFAULT_MARGIN_COLUMN_NAME):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
- return 'All'
- return ('All', ) + ('', ) * (len(cols) - 1)
+ return margins_column
+ return (margins_column, ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows).apply(aggfunc)
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index bb95234657ec2..e82f6ba4e505a 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -7,6 +7,7 @@
from pandas import DataFrame, Series, Index, MultiIndex, Grouper
from pandas.tools.merge import concat
from pandas.tools.pivot import pivot_table, crosstab
+from pandas.tools.pivot import DEFAULT_MARGIN_COLUMN_NAME
from pandas.compat import range, u, product
import pandas.util.testing as tm
@@ -224,32 +225,44 @@ def test_pivot_with_tz(self):
tm.assert_frame_equal(pv, expected)
def test_margins(self):
- def _check_output(res, col, index=['A', 'B'], columns=['C']):
- cmarg = res['All'][:-1]
- exp = self.data.groupby(index)[col].mean()
- tm.assert_series_equal(cmarg, exp, check_names=False)
- self.assertEqual(cmarg.name, 'All')
-
- res = res.sortlevel()
- rmarg = res.xs(('All', ''))[:-1]
- exp = self.data.groupby(columns)[col].mean()
- tm.assert_series_equal(rmarg, exp, check_names=False)
- self.assertEqual(rmarg.name, ('All', ''))
-
- gmarg = res['All']['All', '']
- exp = self.data[col].mean()
- self.assertEqual(gmarg, exp)
+ def _check_output(result, values_col, index=['A', 'B'],
+ columns=['C'],
+ margins_col=DEFAULT_MARGIN_COLUMN_NAME):
+ col_margins = result.ix[:-1, margins_col]
+ expected_col_margins = self.data.groupby(index)[values_col].mean()
+ tm.assert_series_equal(col_margins, expected_col_margins,
+ check_names=False)
+ self.assertEqual(col_margins.name, margins_col)
+
+ result = result.sortlevel()
+ index_margins = result.ix[(margins_col, '')].iloc[:-1]
+ expected_ix_margins = self.data.groupby(columns)[values_col].mean()
+ tm.assert_series_equal(index_margins, expected_ix_margins,
+ check_names=False)
+ self.assertEqual(index_margins.name, (margins_col, ''))
+
+ grand_total_margins = result.loc[(margins_col, ''), margins_col]
+ expected_total_margins = self.data[values_col].mean()
+ self.assertEqual(grand_total_margins, expected_total_margins)
# column specified
- table = self.data.pivot_table('D', index=['A', 'B'], columns='C',
- margins=True, aggfunc=np.mean)
- _check_output(table, 'D')
+ result = self.data.pivot_table(values='D', index=['A', 'B'],
+ columns='C',
+ margins=True, aggfunc=np.mean)
+ _check_output(result, 'D')
+
+ # Set a different margins_column (not 'All')
+ result = self.data.pivot_table(values='D', index=['A', 'B'],
+ columns='C',
+ margins=True, aggfunc=np.mean,
+ margins_column='Totals')
+ _check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
- for valcol in table.columns.levels[0]:
- _check_output(table[valcol], valcol)
+ for value_col in table.columns.levels[0]:
+ _check_output(table[value_col], value_col)
# no col
@@ -257,49 +270,61 @@ def _check_output(res, col, index=['A', 'B'], columns=['C']):
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
- for valcol in table.columns:
- gmarg = table[valcol]['All', '']
- self.assertEqual(gmarg, self.data[valcol].mean())
-
- # this is OK
- table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
- aggfunc='mean')
+ for value_col in table.columns:
+ totals = table.loc[(DEFAULT_MARGIN_COLUMN_NAME, ''), value_col]
+ self.assertEqual(totals, self.data[value_col].mean())
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
tm.assert_isinstance(rtable, Series)
+
+ table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
+ aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
- gmarg = table[item]['All', '']
- self.assertEqual(gmarg, self.data[item].mean())
+ totals = table.loc[(DEFAULT_MARGIN_COLUMN_NAME, ''), item]
+ self.assertEqual(totals, self.data[item].mean())
# issue number #8349: pivot_table with margins and dictionary aggfunc
+ data = [
+ {'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2013,
+ 'MONTH': 12, 'DAYS': 3, 'SALARY': 17},
+ {'JOB': 'Employ', 'NAME':
+ 'Mary', 'YEAR': 2013, 'MONTH': 12, 'DAYS': 5, 'SALARY': 23},
+ {'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
+ 'MONTH': 1, 'DAYS': 10, 'SALARY': 100},
+ {'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
+ 'MONTH': 1, 'DAYS': 11, 'SALARY': 110},
+ {'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
+ 'MONTH': 1, 'DAYS': 15, 'SALARY': 200},
+ {'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
+ 'MONTH': 2, 'DAYS': 8, 'SALARY': 80},
+ {'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
+ 'MONTH': 2, 'DAYS': 5, 'SALARY': 190},
+ ]
- df=DataFrame([ {'JOB':'Worker','NAME':'Bob' ,'YEAR':2013,'MONTH':12,'DAYS': 3,'SALARY': 17},
- {'JOB':'Employ','NAME':'Mary','YEAR':2013,'MONTH':12,'DAYS': 5,'SALARY': 23},
- {'JOB':'Worker','NAME':'Bob' ,'YEAR':2014,'MONTH': 1,'DAYS':10,'SALARY':100},
- {'JOB':'Worker','NAME':'Bob' ,'YEAR':2014,'MONTH': 1,'DAYS':11,'SALARY':110},
- {'JOB':'Employ','NAME':'Mary','YEAR':2014,'MONTH': 1,'DAYS':15,'SALARY':200},
- {'JOB':'Worker','NAME':'Bob' ,'YEAR':2014,'MONTH': 2,'DAYS': 8,'SALARY': 80},
- {'JOB':'Employ','NAME':'Mary','YEAR':2014,'MONTH': 2,'DAYS': 5,'SALARY':190} ])
-
- df=df.set_index(['JOB','NAME','YEAR','MONTH'],drop=False,append=False)
-
- rs=df.pivot_table( index=['JOB','NAME'],
- columns=['YEAR','MONTH'],
- values=['DAYS','SALARY'],
- aggfunc={'DAYS':'mean','SALARY':'sum'},
- margins=True)
+ df = DataFrame(data)
- ex=df.pivot_table(index=['JOB','NAME'],columns=['YEAR','MONTH'],values=['DAYS'],aggfunc='mean',margins=True)
+ df = df.set_index(['JOB', 'NAME', 'YEAR', 'MONTH'], drop=False,
+ append=False)
- tm.assert_frame_equal(rs['DAYS'], ex['DAYS'])
+ result = df.pivot_table(index=['JOB', 'NAME'],
+ columns=['YEAR', 'MONTH'],
+ values=['DAYS', 'SALARY'],
+ aggfunc={'DAYS': 'mean', 'SALARY': 'sum'},
+ margins=True)
- ex=df.pivot_table(index=['JOB','NAME'],columns=['YEAR','MONTH'],values=['SALARY'],aggfunc='sum',margins=True)
+ expected = df.pivot_table(index=['JOB', 'NAME'],
+ columns=['YEAR', 'MONTH'], values=['DAYS'],
+ aggfunc='mean', margins=True)
- tm.assert_frame_equal(rs['SALARY'], ex['SALARY'])
+ tm.assert_frame_equal(result['DAYS'], expected['DAYS'])
+ expected = df.pivot_table(index=['JOB', 'NAME'],
+ columns=['YEAR', 'MONTH'], values=['SALARY'],
+ aggfunc='sum', margins=True)
+ tm.assert_frame_equal(result['SALARY'], expected['SALARY'])
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
@@ -402,6 +427,24 @@ def test_margins_no_values_two_row_two_cols(self):
result = self.data[['A', 'B', 'C', 'D']].pivot_table(index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0])
+ def test_pivot_table_with_margins_set_margin_column(self):
+ for margin_column in ['foo', 'one']:
+ with self.assertRaises(ValueError):
+ # multi-index index
+ pivot_table(self.data, values='D', index=['A', 'B'],
+ columns=['C'], margins=True,
+ margins_column=margin_column)
+ with self.assertRaises(ValueError):
+ # multi-index column
+ pivot_table(self.data, values='D', index=['C'],
+ columns=['A', 'B'], margins=True,
+ margins_column=margin_column)
+ with self.assertRaises(ValueError):
+ # non-multi-index index/column
+ pivot_table(self.data, values='D', index=['A'],
+ columns=['B'], margins=True,
+ margins_column=margin_column)
+
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch' : 'A A A A A A A B'.split(),
@@ -678,17 +721,17 @@ def test_crosstab_margins(self):
self.assertEqual(result.index.names, ('a',))
self.assertEqual(result.columns.names, ['b', 'c'])
- all_cols = result['All', '']
+ all_cols = result[DEFAULT_MARGIN_COLUMN_NAME, '']
exp_cols = df.groupby(['a']).size().astype('i8')
- exp_cols = exp_cols.append(Series([len(df)], index=['All']))
- exp_cols.name = ('All', '')
+ exp_cols = exp_cols.append(Series([len(df)], index=[DEFAULT_MARGIN_COLUMN_NAME]))
+ exp_cols.name = (DEFAULT_MARGIN_COLUMN_NAME, '')
tm.assert_series_equal(all_cols, exp_cols)
- all_rows = result.ix['All']
+ all_rows = result.ix[DEFAULT_MARGIN_COLUMN_NAME]
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
- exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
- exp_rows.name = 'All'
+ exp_rows = exp_rows.append(Series([len(df)], index=[(DEFAULT_MARGIN_COLUMN_NAME, '')]))
+ exp_rows.name = DEFAULT_MARGIN_COLUMN_NAME
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
| ref #3335.
Adds margin_column parameter to pivot_table so that user can set it to
something other than 'All'.
Raises ValueError exception if there is a conflict between the value of
margin_column and one of the other values appearing in the indices of
the pivot table.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10296 | 2015-06-06T04:41:00Z | 2015-11-10T01:26:51Z | null | 2015-11-12T07:52:30Z |
ENH: #2679 - DataFrame.to_html() urls_as_links parameter. | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 429051b5815fa..5d2009f79fe29 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -6,6 +6,7 @@
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
+from pandas.io.common import _is_url
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
@@ -307,7 +308,8 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
- max_cols=None, show_dimensions=False, **kwds):
+ max_cols=None, show_dimensions=False, urls_as_links=False,
+ **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
@@ -329,6 +331,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
+ self.urls_as_links = urls_as_links
if justify is None:
self.justify = get_option("display.colheader_justify")
@@ -821,6 +824,7 @@ def __init__(self, formatter, classes=None, max_rows=None, max_cols=None):
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
+ self.urls_as_links = self.fmt.urls_as_links
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
@@ -853,6 +857,11 @@ def _write_cell(self, s, kind='td', indent=0, tags=None):
else:
esc = {}
rs = com.pprint_thing(s, escape_chars=esc).strip()
+ if self.urls_as_links and isinstance(s, compat.string_types):
+ s = s.strip()
+ if _is_url(s):
+ rs = '<a href="{url}">{escaped_url}</a>'.format(url=s,
+ escaped_url=rs)
self.write(
'%s%s</%s>' % (start_tag, rs, kind), indent)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ab6f11a4b8d5b..aec047c424748 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1349,7 +1349,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
- max_rows=None, max_cols=None, show_dimensions=False):
+ max_rows=None, max_cols=None, show_dimensions=False,
+ urls_as_links=False):
"""
Render a DataFrame as an HTML table.
@@ -1367,6 +1368,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
+ urls_as_links : boolean, default False
+ Convert urls to HTML links.
"""
@@ -1387,7 +1390,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
- show_dimensions=show_dimensions)
+ show_dimensions=show_dimensions,
+ urls_as_links=urls_as_links)
formatter.to_html(classes=classes)
if buf is None:
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 4d21190e7a50d..fc119ba11f919 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -731,6 +731,77 @@ def test_to_html_multiindex_sparsify_false_multi_sparse(self):
</table>"""
self.assertEqual(result, expected)
+ def test_to_html_with_hyperlinks(self):
+ data = [
+ {
+ 'foo': 0,
+ 'bar': 'http://pandas.pydata.org/',
+ None: 'pydata.org',
+ },
+ {
+ 'foo': 0,
+ 'bar': 'http://pandas.pydata.org/?q1=a&q2=b',
+ None: 'pydata.org',
+ },
+ ]
+ df = DataFrame(data, columns=['foo', 'bar', None],
+ index=range(len(data)))
+
+ result_no_links = df.to_html()
+ result_with_links = df.to_html(urls_as_links=True)
+ expected_no_links = """\
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>foo</th>
+ <th>bar</th>
+ <th>None</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ <td>http://pandas.pydata.org/</td>
+ <td>pydata.org</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>0</td>
+ <td>http://pandas.pydata.org/?q1=a&q2=b</td>
+ <td>pydata.org</td>
+ </tr>
+ </tbody>
+</table>"""
+ expected_with_links = """\
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>foo</th>
+ <th>bar</th>
+ <th>None</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ <td><a href="http://pandas.pydata.org/">http://pandas.pydata.org/</a></td>
+ <td>pydata.org</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>0</td>
+ <td><a href="http://pandas.pydata.org/?q1=a&q2=b">http://pandas.pydata.org/?q1=a&q2=b</a></td>
+ <td>pydata.org</td>
+ </tr>
+ </tbody>
+</table>"""
+ self.assertEqual(result_with_links, expected_with_links)
+ self.assertEqual(result_no_links, expected_no_links)
+
def test_to_html_multiindex_sparsify(self):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
| New urls_as_links boolean paramater that will output urls as href html
links. ref #2679
Thanks for @tdas14 for initial code.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10294 | 2015-06-06T01:34:57Z | 2015-11-02T12:09:03Z | null | 2022-10-13T00:16:35Z |
BUG: TimedeltaIndex slicing may reset freq | diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt
index 49f143e158abf..91acd3d670e9f 100644
--- a/doc/source/whatsnew/v0.16.2.txt
+++ b/doc/source/whatsnew/v0.16.2.txt
@@ -80,7 +80,7 @@ Bug Fixes
- Bug in ``Series.align`` resets ``name`` when ``fill_value`` is specified (:issue:`10067`)
- Bug in ``SparseSeries.abs`` resets ``name`` (:issue:`10241`)
-
+- Bug in ``TimedeltaIndex`` slicing may reset freq (:issue:`10292`)
- Bug in GroupBy.get_group raises ValueError when group key contains NaT (:issue:`6992`)
- Bug in ``SparseSeries`` constructor ignores input data name (:issue:`10258`)
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 88b4117d4807c..71ff0f6c9c56c 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -69,6 +69,35 @@ def __contains__(self, key):
except (KeyError, TypeError, ValueError):
return False
+ def __getitem__(self, key):
+ getitem = self._data.__getitem__
+ if np.isscalar(key):
+ val = getitem(key)
+ return self._box_func(val)
+ else:
+ if com.is_bool_indexer(key):
+ key = np.asarray(key)
+ if key.all():
+ key = slice(0, None, None)
+ else:
+ key = lib.maybe_booleans_to_slice(key.view(np.uint8))
+
+ attribs = self._get_attributes_dict()
+
+ freq = None
+ if isinstance(key, slice):
+ if self.freq is not None and key.step is not None:
+ freq = key.step * self.freq
+ else:
+ freq = self.freq
+ attribs['freq'] = freq
+
+ result = getitem(key)
+ if result.ndim > 1:
+ return result
+
+ return self._simple_new(result, **attribs)
+
@property
def freqstr(self):
""" return the frequency object as a string if its set, otherwise None """
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 745c536914e47..35400b3588d3b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1349,32 +1349,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
else:
raise
- def __getitem__(self, key):
- getitem = self._data.__getitem__
- if np.isscalar(key):
- val = getitem(key)
- return Timestamp(val, offset=self.offset, tz=self.tz)
- else:
- if com.is_bool_indexer(key):
- key = np.asarray(key)
- if key.all():
- key = slice(0,None,None)
- else:
- key = lib.maybe_booleans_to_slice(key.view(np.uint8))
-
- new_offset = None
- if isinstance(key, slice):
- if self.offset is not None and key.step is not None:
- new_offset = key.step * self.offset
- else:
- new_offset = self.offset
-
- result = getitem(key)
- if result.ndim > 1:
- return result
-
- return self._simple_new(result, self.name, new_offset, self.tz)
-
# alias to offset
def _get_freq(self):
return self.offset
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index de68dd763d68c..f1871e78e21a1 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -244,7 +244,7 @@ def _generate(cls, start, end, periods, name, offset, closed=None):
@property
def _box_func(self):
- return lambda x: Timedelta(x,unit='ns')
+ return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
@@ -747,25 +747,6 @@ def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
- def __getitem__(self, key):
- getitem = self._data.__getitem__
- if np.isscalar(key):
- val = getitem(key)
- return Timedelta(val)
- else:
- if com.is_bool_indexer(key):
- key = np.asarray(key)
- if key.all():
- key = slice(0,None,None)
- else:
- key = lib.maybe_booleans_to_slice(key.view(np.uint8))
-
- result = getitem(key)
- if result.ndim > 1:
- return result
-
- return self._simple_new(result, self.name)
-
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_TD_DTYPE, copy=False)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 55482401a20f4..fc432d5236f62 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -297,6 +297,38 @@ def test_nonunique_contains(self):
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
+ def test_getitem(self):
+ idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
+ idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
+
+ for idx in [idx1, idx2]:
+ result = idx[0]
+ self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
+
+ result = idx[0:5]
+ expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[0:10:2]
+ expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[-20:-5:3]
+ expected = pd.date_range('2011-01-12', '2011-01-25', freq='3D',
+ tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[4::-1]
+ expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
+ '2011-01-02', '2011-01-01'],
+ freq='-1D', tz=idx.tz, name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
class TestTimedeltaIndexOps(Ops):
@@ -742,6 +774,33 @@ def test_unknown_attribute(self):
self.assertNotIn('foo',ts.__dict__.keys())
self.assertRaises(AttributeError,lambda : ts.foo)
+ def test_getitem(self):
+ idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
+
+ for idx in [idx1]:
+ result = idx[0]
+ self.assertEqual(result, pd.Timedelta('1 day'))
+
+ result = idx[0:5]
+ expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[0:10:2]
+ expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[-20:-5:3]
+ expected = pd.timedelta_range('12 day', '25 day', freq='3D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
+
+ result = idx[4::-1]
+ expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'],
+ freq='-1D', name='idx')
+ self.assert_index_equal(result, expected)
+ self.assertEqual(result.freq, expected.freq)
class TestPeriodIndexOps(Ops):
| `DatetimeIndex` preserve freq after slicing.
```
di = pd.date_range('2001', '2005', freq='D')
di[1:5:2]
# DatetimeIndex(['2001-01-02', '2001-01-04'], dtype='datetime64[ns]', freq='2D', tz=None)
```
But `TimedeltaIndex` doesn't.
```
tdi = pd.timedelta_range('1day', '5day', freq='D')
tdi[1:5:2]
# TimedeltaIndex(['2 days', '4 days'], dtype='timedelta64[ns]', freq=None)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10292 | 2015-06-05T23:31:04Z | 2015-06-07T23:02:54Z | 2015-06-07T23:02:54Z | 2015-06-08T13:20:31Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.