title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST: Moving testing method outside of TestCase | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index ef9d7d1566ec2..01f5a4dfb2e30 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -94,9 +94,7 @@ def assert_numpy_array_equal(self, np_array, assert_equal):
If the expected array includes `np.nan` use `assert_numpy_array_equivalent(...)`.
"""
- if np.array_equal(np_array, assert_equal):
- return
- raise AssertionError('{0} is not equal to {1}.'.format(np_array, assert_equal))
+ return assert_numpy_array_equal(np_array, assert_equal)
def round_trip_pickle(self, obj, path=None):
if path is None:
@@ -115,9 +113,8 @@ def assert_numpy_array_equivalent(self, np_array, assert_equal, strict_nan=False
similar to `assert_numpy_array_equal()`. If the expected array includes `np.nan` use this
function.
"""
- if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
- return
- raise AssertionError('{0} is not equivalent to {1}.'.format(np_array, assert_equal))
+ return assert_numpy_array_equivalent(np_array, assert_equal, strict_nan)
+
def assertIs(self, first, second, msg=''):
"""Checks that 'first' is 'second'"""
@@ -586,6 +583,34 @@ def isiterable(obj):
def is_sorted(seq):
return assert_almost_equal(seq, np.sort(np.array(seq)))
+def assert_numpy_array_equal(np_array, assert_equal):
+ """Checks that 'np_array' is equal to 'assert_equal'
+
+ Note that the expected array should not contain `np.nan`! Two numpy arrays are equal if all
+ elements are equal, which is not possible if `np.nan` is such an element!
+
+ If the expected array includes `np.nan` use `assert_numpy_array_equivalent(...)`.
+ """
+ if np.array_equal(np_array, assert_equal):
+ return
+ raise AssertionError('{0} is not equal to {1}.'.format(np_array, assert_equal))
+
+
+def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False):
+ """Checks that 'np_array' is equivalent to 'assert_equal'
+
+ Two numpy arrays are equivalent if the arrays have equal non-NaN elements, and
+ `np.nan` in corresponding locations.
+
+ If the the expected array does not contain `np.nan` `assert_numpy_array_equivalent` is the
+ similar to `assert_numpy_array_equal()`. If the expected array includes `np.nan` use this
+ function.
+ """
+ if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
+ return True
+ raise AssertionError('{0} is not equivalent to {1}.'.format(np_array, assert_equal))
+
+
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type=False,
| Make assert_numpy_array_equal available without subclassing TestCase; see #8023
| https://api.github.com/repos/pandas-dev/pandas/pulls/8104 | 2014-08-23T23:37:38Z | 2014-12-01T00:57:09Z | null | 2014-12-01T00:57:26Z |
BUG: pivot_table raises KeyError with nameless index and columns | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 4a39dd73da7d0..7f8a8be356e59 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -628,7 +628,7 @@ Bug Fixes
-
+- Bug in ``pivot_table`` performed with nameless ``index`` and ``columns`` raises ``KeyError`` (:issue:`8103`)
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 83df908d8033f..61150f0aeacd0 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -116,7 +116,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
table = agged
if table.index.nlevels > 1:
- to_unstack = [agged.index.names[i]
+ to_unstack = [agged.index.names[i] or i
for i in range(len(index), len(keys))]
table = agged.unstack(to_unstack)
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 7e52c8c333dbf..eded5f26f6521 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -527,6 +527,50 @@ def test_pivot_datetime_tz(self):
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
+ def test_pivot_dtaccessor(self):
+ # GH 8103
+ dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00',
+ '2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']
+ dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00', '2013-01-01 15:00:00',
+ '2013-02-01 15:00:00', '2013-02-01 15:00:00', '2013-02-01 15:00:00']
+ df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
+ 'dt1': dates1, 'dt2': dates2,
+ 'value1': np.arange(6,dtype='int64'), 'value2': [1, 2] * 3})
+ df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
+ df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
+
+ result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
+ values='value1')
+
+ exp_idx = Index(['a', 'b'], name='label')
+ expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
+ index=exp_idx, columns=[7, 8, 9])
+ tm.assert_frame_equal(result, expected)
+
+ result = pivot_table(df, index=df['dt2'].dt.month, columns=df['dt1'].dt.hour,
+ values='value1')
+
+ expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
+ index=[1, 2], columns=[7, 8, 9])
+ tm.assert_frame_equal(result, expected)
+
+ result = pivot_table(df, index=df['dt2'].dt.year,
+ columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
+ values='value1')
+
+ exp_col = MultiIndex.from_arrays([[7, 7, 8, 8, 9, 9], [1, 2] * 3])
+ expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]]),
+ index=[2013], columns=exp_col)
+ tm.assert_frame_equal(result, expected)
+
+ result = pivot_table(df, index=np.array(['X', 'X', 'X', 'X', 'Y', 'Y']),
+ columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
+ values='value1')
+ expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
+ [np.nan, np.nan, np.nan, 4, np.nan, 5]]),
+ index=['X', 'Y'], columns=exp_col)
+ tm.assert_frame_equal(result, expected)
+
class TestCrosstab(tm.TestCase):
| `pivot_table` raises `KeyError` when args passed to `index` and `columns` don't have `name` attribute. The fix looks especially useful when pivotting by `dt` accessor.
```
df = pd.DataFrame({'dt1': [datetime.datetime(2011, 1, 1), datetime.datetime(2011, 2, 1),
datetime.datetime(2011, 3, 1), datetime.datetime(2011, 4, 1),
datetime.datetime(2011, 5, 1), datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1), datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1)],
'col1': 'A A A B B B C C C'.split(),
'val1': [1, 2, 3, 4, 5, 6, 7, 8, 9]})
pd.pivot_table(df, index=df['dt1'].dt.month, columns=df['dt1'].dt.year, values='val1'))
# KeyError: 'Level None not found'
```
### After fix
```
pd.pivot_table(df, index=df['dt1'].dt.month, columns=df['dt1'].dt.year, values='val1'))
# 2011 2012
#1 1 6
#2 2 7
#3 3 8
#4 4 9
#5 5 NaN
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8103 | 2014-08-23T12:22:07Z | 2014-08-29T18:58:35Z | 2014-08-29T18:58:35Z | 2014-08-30T01:19:10Z |
BENCH: add benchmarks for SQL | diff --git a/vb_suite/io_sql.py b/vb_suite/io_sql.py
new file mode 100644
index 0000000000000..696f66ec3137c
--- /dev/null
+++ b/vb_suite/io_sql.py
@@ -0,0 +1,126 @@
+from vbench.api import Benchmark
+from datetime import datetime
+
+common_setup = """from pandas_vb_common import *
+import sqlite3
+import sqlalchemy
+from sqlalchemy import create_engine
+
+engine = create_engine('sqlite:///:memory:')
+con = sqlite3.connect(':memory:')
+"""
+
+sdate = datetime(2014, 6, 1)
+
+
+#-------------------------------------------------------------------------------
+# to_sql
+
+setup = common_setup + """
+index = [rands(10) for _ in xrange(10000)]
+df = DataFrame({'float1' : randn(10000),
+ 'float2' : randn(10000),
+ 'string1' : ['foo'] * 10000,
+ 'bool1' : [True] * 10000,
+ 'int1' : np.random.randint(0, 100000, size=10000)},
+ index=index)
+"""
+
+sql_write_sqlalchemy = Benchmark("df.to_sql('test1', engine, if_exists='replace')",
+ setup, start_date=sdate)
+
+sql_write_fallback = Benchmark("df.to_sql('test1', con, if_exists='replace')",
+ setup, start_date=sdate)
+
+
+#-------------------------------------------------------------------------------
+# read_sql
+
+setup = common_setup + """
+index = [rands(10) for _ in xrange(10000)]
+df = DataFrame({'float1' : randn(10000),
+ 'float2' : randn(10000),
+ 'string1' : ['foo'] * 10000,
+ 'bool1' : [True] * 10000,
+ 'int1' : np.random.randint(0, 100000, size=10000)},
+ index=index)
+df.to_sql('test2', engine, if_exists='replace')
+df.to_sql('test2', con, if_exists='replace')
+"""
+
+sql_read_query_sqlalchemy = Benchmark("read_sql_query('SELECT * FROM test2', engine)",
+ setup, start_date=sdate)
+
+sql_read_query_fallback = Benchmark("read_sql_query('SELECT * FROM test2', con)",
+ setup, start_date=sdate)
+
+sql_read_table_sqlalchemy = Benchmark("read_sql_table('test2', engine)",
+ setup, start_date=sdate)
+
+
+#-------------------------------------------------------------------------------
+# type specific write
+
+setup = common_setup + """
+df = DataFrame({'float' : randn(10000),
+ 'string' : ['foo'] * 10000,
+ 'bool' : [True] * 10000,
+ 'datetime' : date_range('2000-01-01', periods=10000, freq='s')})
+df.loc[1000:3000, 'float'] = np.nan
+"""
+
+sql_float_write_sqlalchemy = \
+ Benchmark("df[['float']].to_sql('test_float', engine, if_exists='replace')",
+ setup, start_date=sdate)
+
+sql_float_write_fallback = \
+ Benchmark("df[['float']].to_sql('test_float', con, if_exists='replace')",
+ setup, start_date=sdate)
+
+sql_string_write_sqlalchemy = \
+ Benchmark("df[['string']].to_sql('test_string', engine, if_exists='replace')",
+ setup, start_date=sdate)
+
+sql_string_write_fallback = \
+ Benchmark("df[['string']].to_sql('test_string', con, if_exists='replace')",
+ setup, start_date=sdate)
+
+sql_datetime_write_sqlalchemy = \
+ Benchmark("df[['datetime']].to_sql('test_datetime', engine, if_exists='replace')",
+ setup, start_date=sdate)
+
+#sql_datetime_write_fallback = \
+# Benchmark("df[['datetime']].to_sql('test_datetime', con, if_exists='replace')",
+# setup3, start_date=sdate)
+
+#-------------------------------------------------------------------------------
+# type specific read
+
+setup = common_setup + """
+df = DataFrame({'float' : randn(10000),
+ 'datetime' : date_range('2000-01-01', periods=10000, freq='s')})
+df['datetime_string'] = df['datetime'].map(str)
+
+df.to_sql('test_type', engine, if_exists='replace')
+df[['float', 'datetime_string']].to_sql('test_type', con, if_exists='replace')
+"""
+
+sql_float_read_query_sqlalchemy = \
+ Benchmark("read_sql_query('SELECT float FROM test_type', engine)",
+ setup, start_date=sdate)
+
+sql_float_read_table_sqlalchemy = \
+ Benchmark("read_sql_table('test_type', engine, columns=['float'])",
+ setup, start_date=sdate)
+
+sql_float_read_query_fallback = \
+ Benchmark("read_sql_query('SELECT float FROM test_type', con)",
+ setup, start_date=sdate)
+
+sql_datetime_read_as_native_sqlalchemy = \
+ Benchmark("read_sql_table('test_type', engine, columns=['datetime'])",
+ setup, start_date=sdate)
+
+sql_datetime_read_and_parse_sqlalchemy = \
+ Benchmark("read_sql_table('test_type', engine, columns=['datetime_string'], parse_dates=['datetime_string'])",
+ setup, start_date=sdate)
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
index 403adbf289e1f..8d3d833ed9704 100644
--- a/vb_suite/packers.py
+++ b/vb_suite/packers.py
@@ -101,6 +101,27 @@ def remove(f):
packers_write_hdf_table = Benchmark("df2.to_hdf(f,'df',table=True)", setup, cleanup="remove(f)", start_date=start_date)
+#----------------------------------------------------------------------
+# sql
+
+setup = common_setup + """
+import sqlite3
+from sqlalchemy import create_engine
+engine = create_engine('sqlite:///:memory:')
+
+df2.to_sql('table', engine, if_exists='replace')
+"""
+
+packers_read_sql= Benchmark("pd.read_sql_table('table', engine)", setup, start_date=start_date)
+
+setup = common_setup + """
+import sqlite3
+from sqlalchemy import create_engine
+engine = create_engine('sqlite:///:memory:')
+"""
+
+packers_write_sql = Benchmark("df2.to_sql('table', engine, if_exists='replace')", setup, start_date=start_date)
+
#----------------------------------------------------------------------
# json
diff --git a/vb_suite/suite.py b/vb_suite/suite.py
index be9aa03801641..a16d183ae62e2 100644
--- a/vb_suite/suite.py
+++ b/vb_suite/suite.py
@@ -12,6 +12,7 @@
'index_object',
'indexing',
'io_bench',
+ 'io_sql',
'inference',
'hdfstore_bench',
'join_merge',
| xref #6701, #6416
WIP for now: intitial benchmarks for sql functions
- Where should the benches be placed? Because there is `io_bench.py`, `packers.py` and `parsers_vb.py` which all contain io related benchmarks. Or should I create a new file?
- The first tests are a generic mixed-type frame writing/reading, both with sqlalchemy (sqlite) and sqlite fallback
- I am going to add seperate benches for different types (float, string, datetime)
- other important things to test?
First run with `./test_perf.sh -b master -t HEAD -r sql` gives:
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
sql_write_fallback | 218.5094 | 219.5977 | 0.9950 |
sql_write_sqlalchemy | 347.1383 | 347.9416 | 0.9977 |
sql_read_query_fallback | 28.5973 | 28.6067 | 0.9997 |
sql_read_table_sqlalchemy | 39.0046 | 38.9366 | 1.0017 |
sql_read_query_fallback | 35.9294 | 35.4296 | 1.0141 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [3bcc9c2] : BENCH: add benchmarks for SQL
Base [86ecb99] : BUG: fix iat and at for Float64Index
```
You can see that the sqlalchemy version is somewhat slower than the fallback version.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8102 | 2014-08-23T11:07:57Z | 2014-09-09T08:17:36Z | 2014-09-09T08:17:36Z | 2014-09-09T08:17:37Z |
Added warning about attribute access on 'reserved' words | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 04aa07a49ba8a..a8f3f5f26cd9e 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -228,11 +228,17 @@ new column.
.. warning::
- You can use this access only if the index element is a valid python identifier, e.g. ``s.1`` is not allowed.
- see `here for an explanation of valid identifiers
+ See `here for an explanation of valid identifiers
<http://docs.python.org/2.7/reference/lexical_analysis.html#identifiers>`__.
- The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed.
+ - Similarly, the attribute will not be available if it conflicts with any of the following list: ``index``,
+ ``major_axis``, ``minor_axis``, ``items``, ``labels``.
+
+ - In any of these cases, standard indexing will still work, e.g. ``s['1']``, ``s['min']``, and ``s['index']`` will
+ access the corresponding element or column.
+
- The ``Series/Panel`` accesses are available starting in 0.13.0.
If you are using the IPython environment, you may also use tab-completion to
| closes https://github.com/pydata/pandas/issues/8082
| https://api.github.com/repos/pandas-dev/pandas/pulls/8100 | 2014-08-23T03:00:19Z | 2014-09-13T22:47:00Z | null | 2014-09-13T22:47:12Z |
TST: Fix timezone test to avoit dateutil issue | diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 2cd8539d27dd3..61fc3652fb8a4 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -138,7 +138,7 @@ def test_constructor_with_stringoffset(self):
def test_repr(self):
dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']
- timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
+ timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/America/Los_Angeles']
freqs = ['D', 'M', 'S', 'N']
for date in dates:
| Closes #7993.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8099 | 2014-08-23T02:43:18Z | 2014-08-29T20:37:06Z | 2014-08-29T20:37:06Z | 2014-08-30T01:18:38Z |
TST: Fix boxplot test for python3 | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 6435f8e741f96..1d51256e751b6 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -2626,7 +2626,7 @@ class TestDataFrameGroupByPlots(TestPlotBase):
def test_boxplot(self):
grouped = self.hist_df.groupby(by='gender')
axes = _check_plot_works(grouped.boxplot, return_type='axes')
- self._check_axes_shape(axes.values(), axes_num=2, layout=(1, 2))
+ self._check_axes_shape(list(axes.values()), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
@@ -2638,7 +2638,7 @@ def test_boxplot(self):
grouped = df.groupby(level=1)
axes = _check_plot_works(grouped.boxplot, return_type='axes')
- self._check_axes_shape(axes.values(), axes_num=10, layout=(4, 3))
+ self._check_axes_shape(list(axes.values()), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
@@ -2646,7 +2646,7 @@ def test_boxplot(self):
grouped = df.unstack(level=1).groupby(level=0, axis=1)
axes = _check_plot_works(grouped.boxplot, return_type='axes')
- self._check_axes_shape(axes.values(), axes_num=3, layout=(2, 2))
+ self._check_axes_shape(list(axes.values()), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
@@ -2823,14 +2823,14 @@ def test_grouped_box_multiple_axes(self):
fig, axes = self.plt.subplots(2, 3)
returned = df.boxplot(column=['height', 'weight', 'category'], by='gender',
return_type='axes', ax=axes[0])
- returned = np.array(returned.values())
+ returned = np.array(list(returned.values()))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[0])
self.assertIs(returned[0].figure, fig)
# draw on second row
returned = df.groupby('classroom').boxplot(column=['height', 'weight', 'category'],
return_type='axes', ax=axes[1])
- returned = np.array(returned.values())
+ returned = np.array(list(returned.values()))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assert_numpy_array_equal(returned, axes[1])
self.assertIs(returned[0].figure, fig)
| Closes #8091.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8098 | 2014-08-23T02:40:35Z | 2014-08-25T05:47:44Z | 2014-08-25T05:47:44Z | 2014-08-30T01:18:56Z |
BUG: fix iat and at for Float64Index | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index ac475d637f9cf..4a39dd73da7d0 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -634,7 +634,8 @@ Bug Fixes
-
+- Bug in ``Float64Index`` where ``iat`` and ``at`` were not testing and were
+ failing (:issue:`8092`).
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 4bfeb86cd84c0..505b557fc0d85 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2309,7 +2309,8 @@ def get_value(self, series, key):
k = _values_from_object(key)
loc = self.get_loc(k)
- new_values = series.values[loc]
+ new_values = _values_from_object(series)[loc]
+
if np.isscalar(new_values) or new_values is None:
return new_values
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 967f437fc5ca1..daeef9b78b037 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -3818,6 +3818,13 @@ def test_float_index_non_scalar_assignment(self):
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df,df2)
+ def test_float_index_at_iat(self):
+ s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
+ for el, item in s.iteritems():
+ self.assertEqual(s.at[el], item)
+ for i in range(len(s)):
+ self.assertEqual(s.iat[i], i + 1)
+
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
| closes #8092
| https://api.github.com/repos/pandas-dev/pandas/pulls/8094 | 2014-08-22T14:02:19Z | 2014-08-22T15:04:25Z | 2014-08-22T15:04:25Z | 2014-08-22T15:04:27Z |
ENH: add support for datetime.date/time in to_sql (GH6932) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index ecfd7b5ada055..13931c3c104be 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -427,6 +427,7 @@ Enhancements
~~~~~~~~~~~~
- Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`)
+- Added support for writing ``datetime.date`` and ``datetime.time`` object columns with ``to_sql`` (:issue:`6932`).
- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 914ade45adaa1..40e103a8604a4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -11,6 +11,7 @@
import re
import numpy as np
+import pandas.lib as lib
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
@@ -684,13 +685,14 @@ def _get_column_names_and_types(self, dtype_mapper):
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
- self.frame.index.get_level_values(i).dtype)
+ self.frame.index.get_level_values(i))
column_names_and_types.append((idx_label, idx_type))
- column_names_and_types += zip(
- list(map(str, self.frame.columns)),
- map(dtype_mapper, self.frame.dtypes)
- )
+ column_names_and_types += [
+ (str(self.frame.columns[i]),
+ dtype_mapper(self.frame.iloc[:,i]))
+ for i in range(len(self.frame.columns))
+ ]
return column_names_and_types
def _create_table_statement(self):
@@ -756,30 +758,33 @@ def _harmonize_columns(self, parse_dates=None):
except KeyError:
pass # this column not in results
- def _sqlalchemy_type(self, arr_or_dtype):
+ def _sqlalchemy_type(self, col):
from sqlalchemy.types import (BigInteger, Float, Text, Boolean,
- DateTime, Date, Interval)
+ DateTime, Date, Time, Interval)
- if arr_or_dtype is date:
- return Date
- if com.is_datetime64_dtype(arr_or_dtype):
+ if com.is_datetime64_dtype(col):
try:
- tz = arr_or_dtype.tzinfo
+ tz = col.tzinfo
return DateTime(timezone=True)
except:
return DateTime
- if com.is_timedelta64_dtype(arr_or_dtype):
+ if com.is_timedelta64_dtype(col):
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
return BigInteger
- elif com.is_float_dtype(arr_or_dtype):
+ elif com.is_float_dtype(col):
return Float
- elif com.is_integer_dtype(arr_or_dtype):
+ elif com.is_integer_dtype(col):
# TODO: Refine integer size.
return BigInteger
- elif com.is_bool_dtype(arr_or_dtype):
+ elif com.is_bool_dtype(col):
return Boolean
+ inferred = lib.infer_dtype(com._ensure_object(col))
+ if inferred == 'date':
+ return Date
+ if inferred == 'time':
+ return Time
return Text
def _numpy_type(self, sqltype):
@@ -908,7 +913,11 @@ def _create_sql_schema(self, frame, table_name):
},
'date': {
'mysql': 'DATE',
- 'sqlite': 'TIMESTAMP',
+ 'sqlite': 'DATE',
+ },
+ 'time': {
+ 'mysql': 'TIME',
+ 'sqlite': 'TIME',
},
'bool': {
'mysql': 'BOOLEAN',
@@ -1014,8 +1023,8 @@ def _create_table_statement(self):
create_statement = template % {'name': self.name, 'columns': columns}
return create_statement
- def _sql_type_name(self, dtype):
- pytype = dtype.type
+ def _sql_type_name(self, col):
+ pytype = col.dtype.type
pytype_name = "text"
if issubclass(pytype, np.floating):
pytype_name = "float"
@@ -1029,10 +1038,14 @@ def _sql_type_name(self, dtype):
elif issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
pytype_name = "datetime"
- elif pytype is datetime.date:
- pytype_name = "date"
elif issubclass(pytype, np.bool_):
pytype_name = "bool"
+ elif issubclass(pytype, np.object):
+ pytype = lib.infer_dtype(com._ensure_object(col))
+ if pytype == "date":
+ pytype_name = "date"
+ elif pytype == "time":
+ pytype_name = "time"
return _SQL_TYPES[pytype_name][self.pd_sql.flavor]
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 68f170759b666..0d55f4c1dbcd8 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -26,7 +26,7 @@
import warnings
import numpy as np
-from datetime import datetime
+from datetime import datetime, date, time
from pandas import DataFrame, Series, Index, MultiIndex, isnull
from pandas import date_range, to_datetime, to_timedelta
@@ -35,6 +35,7 @@
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
+from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
@@ -976,6 +977,21 @@ def test_datetime_NaT(self):
else:
tm.assert_frame_equal(result, df)
+ def test_datetime_date(self):
+ # test support for datetime.date
+ df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
+ df.to_sql('test_date', self.conn, index=False)
+ res = read_sql_table('test_date', self.conn)
+ # comes back as datetime64
+ tm.assert_series_equal(res['a'], to_datetime(df['a']))
+
+ def test_datetime_time(self):
+ # test support for datetime.time
+ df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
+ df.to_sql('test_time', self.conn, index=False)
+ res = read_sql_table('test_time', self.conn)
+ tm.assert_frame_equal(res, df)
+
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
@@ -1269,6 +1285,21 @@ def test_roundtrip(self):
def test_execute_sql(self):
self._execute_sql()
+ def test_datetime_date(self):
+ # test support for datetime.date
+ df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
+ df.to_sql('test_date', self.conn, index=False, flavor=self.flavor)
+ res = read_sql_query('SELECT * FROM test_date', self.conn)
+ if self.flavor == 'sqlite':
+ # comes back as strings
+ tm.assert_frame_equal(res, df.astype(str))
+ elif self.flavor == 'mysql':
+ tm.assert_frame_equal(res, df)
+
+ def test_datetime_time(self):
+ # test support for datetime.time
+ raise nose.SkipTest("datetime.time not supported for sqlite fallback")
+
class TestMySQLLegacy(TestSQLiteLegacy):
"""
| Closes #6932
Support for writing `datetime.date` and `datetime.time` object columns with `to_sql`.
Works nicely for the sqlalchemy mode, for sqlite fallback it writes OK for `datetime.date`, but comes back as object strings (this is also the case for datetime)
Remaining question:
- when reading the data back in `TIME` columns are converted back to `datetime.time`, but `DATE` columns are converted to `datetime64` and not `datetime.date`. Is this OK? Or better try to convert to `datetime.date`?
- `datetime.time` does not work with sqlite fallback for some reason (`datetime.date` does)
| https://api.github.com/repos/pandas-dev/pandas/pulls/8090 | 2014-08-22T09:41:46Z | 2014-08-28T07:03:43Z | 2014-08-28T07:03:43Z | 2014-08-28T07:06:21Z |
CLN: PEP8 cleanup of holiday.py | diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index ea85f35cd4ca2..3b3542b760d6f 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -129,10 +129,10 @@ class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
- days_of_week:
+ days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
-
+
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
@@ -148,13 +148,13 @@ class from pandas.tseries.offsets
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
- self.name = name
- self.year = year
- self.month = month
- self.day = day
- self.offset = offset
+ self.name = name
+ self.year = year
+ self.month = month
+ self.day = day
+ self.offset = offset
self.start_date = start_date
- self.end_date = end_date
+ self.end_date = end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
@@ -200,14 +200,14 @@ def dates(self, start_date, end_date, return_name=False):
end_date = self.end_date
start_date = Timestamp(start_date)
- end_date = Timestamp(end_date)
+ end_date = Timestamp(end_date)
year_offset = DateOffset(years=1)
base_date = Timestamp(datetime(start_date.year, self.month, self.day))
dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
- holiday_dates = list(filter(lambda x: x is not None and
+ holiday_dates = list(filter(lambda x: x is not None and
x.dayofweek in self.days_of_week,
holiday_dates))
else:
@@ -235,9 +235,9 @@ def _apply_rule(self, dates):
if self.offset is not None:
if not isinstance(self.offset, list):
- offsets = [self.offset]
+ offsets = [self.offset]
else:
- offsets = self.offset
+ offsets = self.offset
for offset in offsets:
dates = list(map(lambda d: d + offset, dates))
return dates
@@ -275,7 +275,7 @@ class AbstractHolidayCalendar(object):
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
- end_date = Timestamp(datetime(2030, 12, 31))
+ end_date = Timestamp(datetime(2030, 12, 31))
_holiday_cache = None
def __init__(self, name=None, rules=None):
@@ -315,7 +315,7 @@ def holidays(self, start=None, end=None, return_name=False):
DatetimeIndex of holidays
"""
if self.rules is None:
- raise Exception('Holiday Calendar %s does not have any '\
+ raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
@@ -325,7 +325,7 @@ def holidays(self, start=None, end=None, return_name=False):
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
- end = Timestamp(end)
+ end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we get them again
@@ -359,7 +359,7 @@ def _cache(self, values):
@staticmethod
def merge_class(base, other):
"""
- Merge holiday calendars together. The base calendar
+ Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
@@ -384,7 +384,7 @@ def merge_class(base, other):
if not isinstance(base, list):
base = [base]
- base_holidays = dict([ (holiday.name,holiday) for holiday in base ])
+ base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
@@ -401,30 +401,29 @@ def merge(self, other, inplace=False):
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
- holidays = self.merge_class(self, other)
+ holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
-USMemorialDay = Holiday('MemorialDay', month=5, day=24,
- offset=DateOffset(weekday=MO(1)))
-USLaborDay = Holiday('Labor Day', month=9, day=1,
- offset=DateOffset(weekday=MO(1)))
-USColumbusDay = Holiday('Columbus Day', month=10, day=1,
- offset=DateOffset(weekday=MO(2)))
+USMemorialDay = Holiday('MemorialDay', month=5, day=24,
+ offset=DateOffset(weekday=MO(1)))
+USLaborDay = Holiday('Labor Day', month=9, day=1,
+ offset=DateOffset(weekday=MO(1)))
+USColumbusDay = Holiday('Columbus Day', month=10, day=1,
+ offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.', month=1, day=1,
offset=DateOffset(weekday=MO(3)))
-USPresidentsDay = Holiday('President''s Day', month=2, day=1,
- offset=DateOffset(weekday=MO(3)))
+USPresidentsDay = Holiday('President''s Day', month=2, day=1,
+ offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
-
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified
| https://api.github.com/repos/pandas-dev/pandas/pulls/8085 | 2014-08-21T00:26:22Z | 2014-08-22T12:21:56Z | 2014-08-22T12:21:56Z | 2015-04-25T23:33:55Z | |
BUG: When creating table, db indexes should be created from DataFrame indexes | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 09c59710e9b0c..c960a73bb0f88 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -566,17 +566,17 @@ def __init__(self, name, pandas_sql_engine, frame=None, index=True,
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
- self.table = self._create_table_statement()
+ self.table = self._create_table_setup()
self.create()
elif if_exists == 'append':
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
- self.table = self._create_table_statement()
+ self.table = self._create_table_setup()
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(if_exists))
else:
- self.table = self._create_table_statement()
+ self.table = self._create_table_setup()
self.create()
else:
# no data provided, read-only mode
@@ -703,23 +703,25 @@ def _get_column_names_and_types(self, dtype_mapper):
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index.get_level_values(i))
- column_names_and_types.append((idx_label, idx_type))
+ column_names_and_types.append((idx_label, idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]),
- dtype_mapper(self.frame.iloc[:,i]))
+ dtype_mapper(self.frame.iloc[:,i]),
+ False)
for i in range(len(self.frame.columns))
]
+
return column_names_and_types
- def _create_table_statement(self):
+ def _create_table_setup(self):
from sqlalchemy import Table, Column
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
- columns = [Column(name, typ)
- for name, typ in column_names_and_types]
+ columns = [Column(name, typ, index=is_index)
+ for name, typ, is_index in column_names_and_types]
return Table(self.name, self.pd_sql.meta, *columns, schema=self.schema)
@@ -979,10 +981,12 @@ class PandasSQLTableLegacy(PandasSQLTable):
Instead of a table variable just use the Create Table
statement"""
def sql_schema(self):
- return str(self.table)
+ return str(";\n".join(self.table))
def create(self):
- self.pd_sql.execute(self.table)
+ with self.pd_sql.con:
+ for stmt in self.table:
+ self.pd_sql.execute(stmt)
def insert_statement(self):
names = list(map(str, self.frame.columns))
@@ -1026,14 +1030,17 @@ def insert(self, chunksize=None):
cur.executemany(ins, data_list)
cur.close()
- def _create_table_statement(self):
- "Return a CREATE TABLE statement to suit the contents of a DataFrame."
+ def _create_table_setup(self):
+ """Return a list of SQL statement that create a table reflecting the
+ structure of a DataFrame. The first entry will be a CREATE TABLE
+ statement while the rest will be CREATE INDEX statements
+ """
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
- column_names = [col_name for col_name, _ in column_names_and_types]
+ column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING)
@@ -1044,13 +1051,21 @@ def _create_table_statement(self):
col_template = br_l + '%s' + br_r + ' %s'
- columns = ',\n '.join(col_template %
- x for x in column_names_and_types)
+ columns = ',\n '.join(col_template % (cname, ctype)
+ for cname, ctype, _ in column_names_and_types)
template = """CREATE TABLE %(name)s (
%(columns)s
)"""
- create_statement = template % {'name': self.name, 'columns': columns}
- return create_statement
+ create_stmts = [template % {'name': self.name, 'columns': columns}, ]
+
+ ix_tpl = "CREATE INDEX ix_{tbl}_{col} ON {tbl} ({br_l}{col}{br_r})"
+ for cname, _, is_index in column_names_and_types:
+ if not is_index:
+ continue
+ create_stmts.append(ix_tpl.format(tbl=self.name, col=cname,
+ br_l=br_l, br_r=br_r))
+
+ return create_stmts
def _sql_type_name(self, col):
pytype = col.dtype.type
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 0108335c94249..3ad9669abb883 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -199,7 +199,7 @@ def _load_test2_data(self):
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
- self.test_frame3 = df
+ self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
@@ -324,6 +324,13 @@ def _execute_sql(self):
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
+ def _to_sql_save_index(self):
+ df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
+ columns=['A','B','C'], index=['A'])
+ self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
+ ix_cols = self._get_index_columns('test_to_sql_saves_index')
+ self.assertEqual(ix_cols, [['A',],])
+
#------------------------------------------------------------------------------
#--- Testing the public API
@@ -694,6 +701,13 @@ def test_warning_case_insensitive_table_name(self):
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for writing a table")
+ def _get_index_columns(self, tbl_name):
+ from sqlalchemy.engine import reflection
+ insp = reflection.Inspector.from_engine(self.conn)
+ ixs = insp.get_indexes('test_index_saved')
+ ixs = [i['column_names'] for i in ixs]
+ return ixs
+
class TestSQLLegacyApi(_TestSQLApi):
"""
@@ -1074,6 +1088,16 @@ def test_nan_string(self):
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
+ def _get_index_columns(self, tbl_name):
+ from sqlalchemy.engine import reflection
+ insp = reflection.Inspector.from_engine(self.conn)
+ ixs = insp.get_indexes(tbl_name)
+ ixs = [i['column_names'] for i in ixs]
+ return ixs
+
+ def test_to_sql_save_index(self):
+ self._to_sql_save_index()
+
class TestSQLiteAlchemy(_TestSQLAlchemy):
"""
@@ -1368,6 +1392,20 @@ def test_datetime_time(self):
# test support for datetime.time
raise nose.SkipTest("datetime.time not supported for sqlite fallback")
+ def _get_index_columns(self, tbl_name):
+ ixs = sql.read_sql_query(
+ "SELECT * FROM sqlite_master WHERE type = 'index' " +
+ "AND tbl_name = '%s'" % tbl_name, self.conn)
+ ix_cols = []
+ for ix_name in ixs.name:
+ ix_info = sql.read_sql_query(
+ "PRAGMA index_info(%s)" % ix_name, self.conn)
+ ix_cols.append(ix_info.name.tolist())
+ return ix_cols
+
+ def test_to_sql_save_index(self):
+ self._to_sql_save_index()
+
class TestMySQLLegacy(TestSQLiteLegacy):
"""
@@ -1424,6 +1462,19 @@ def test_a_deprecation(self):
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
+ def _get_index_columns(self, tbl_name):
+ ixs = sql.read_sql_query(
+ "SHOW INDEX IN %s" % tbl_name, self.conn)
+ ix_cols = {}
+ for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
+ if ix_name not in ix_cols:
+ ix_cols[ix_name] = []
+ ix_cols[ix_name].append(ix_col)
+ return list(ix_cols.values())
+
+ def test_to_sql_save_index(self):
+ self._to_sql_save_index()
+
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
| Unfortunately my PR #8022 introduced a bug wherein database indexes are not created from DataFrame indexes. This pull request fixes it, also introduces this feature for the legacy interface, and adds tests to make sure indexes are created as appropriate.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8083 | 2014-08-20T20:23:36Z | 2014-09-11T06:33:17Z | 2014-09-11T06:33:17Z | 2014-09-14T10:42:50Z |
PERF: StataWriter is slow | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index d608304511a08..6c58e751a6bcc 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -482,6 +482,8 @@ Performance
- Performance improvements in ``Period`` creation (and ``PeriodIndex`` setitem) (:issue:`5155`)
- Improvements in Series.transform for significant performance gains (revised) (:issue:`6496`)
- Performance improvements in ``StataReader`` when reading large files (:issue:`8040`, :issue:`8073`)
+- Performance improvements in ``StataWriter`` when writing large files (:issue:`8079`)
+
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 0cf57d3035db5..246465153c611 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -19,11 +19,12 @@
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import datetime
-from pandas import compat, to_timedelta, to_datetime
+from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip
+import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer
-from pandas.lib import max_len_string_array, is_string_array
+from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
def read_stata(filepath_or_buffer, convert_dates=True,
@@ -63,88 +64,6 @@ def read_stata(filepath_or_buffer, convert_dates=True,
stata_epoch = datetime.datetime(1960, 1, 1)
-def _stata_elapsed_date_to_datetime(date, fmt):
- """
- Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
-
- Parameters
- ----------
- date : int
- The Stata Internal Format date to convert to datetime according to fmt
- fmt : str
- The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
-
- Examples
- --------
- >>> _stata_elapsed_date_to_datetime(52, "%tw")
- datetime.datetime(1961, 1, 1, 0, 0)
-
- Notes
- -----
- datetime/c - tc
- milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
- datetime/C - tC - NOT IMPLEMENTED
- milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
- date - td
- days since 01jan1960 (01jan1960 = 0)
- weekly date - tw
- weeks since 1960w1
- This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
- The datetime value is the start of the week in terms of days in the
- year, not ISO calendar weeks.
- monthly date - tm
- months since 1960m1
- quarterly date - tq
- quarters since 1960q1
- half-yearly date - th
- half-years since 1960h1 yearly
- date - ty
- years since 0000
-
- If you don't have pandas with datetime support, then you can't do
- milliseconds accurately.
- """
- #NOTE: we could run into overflow / loss of precision situations here
- # casting to int, but I'm not sure what to do. datetime won't deal with
- # numpy types and numpy datetime isn't mature enough / we can't rely on
- # pandas version > 0.7.1
- #TODO: IIRC relative delta doesn't play well with np.datetime?
- #TODO: When pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly
- if np.isnan(date):
- return NaT
- date = int(date)
- if fmt in ["%tc", "tc"]:
- from dateutil.relativedelta import relativedelta
- return stata_epoch + relativedelta(microseconds=date * 1000)
- elif fmt in ["%tC", "tC"]:
- from warnings import warn
- warn("Encountered %tC format. Leaving in Stata Internal Format.")
- return date
- elif fmt in ["%td", "td", "%d", "d"]:
- return stata_epoch + datetime.timedelta(int(date))
- elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
- year = datetime.datetime(stata_epoch.year + date // 52, 1, 1)
- day_delta = (date % 52) * 7
- return year + datetime.timedelta(int(day_delta))
- elif fmt in ["%tm", "tm"]:
- year = stata_epoch.year + date // 12
- month_delta = (date % 12) + 1
- return datetime.datetime(year, month_delta, 1)
- elif fmt in ["%tq", "tq"]:
- year = stata_epoch.year + date // 4
- month_delta = (date % 4) * 3 + 1
- return datetime.datetime(year, month_delta, 1)
- elif fmt in ["%th", "th"]:
- year = stata_epoch.year + date // 2
- month_delta = (date % 2) * 6 + 1
- return datetime.datetime(year, month_delta, 1)
- elif fmt in ["%ty", "ty"]:
- if date > 0:
- return datetime.datetime(date, 1, 1)
- else: # don't do negative years bc can't mix dtypes in column
- raise ValueError("Year 0 and before not implemented")
- else:
- raise ValueError("Date fmt %s not understood" % fmt)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
@@ -153,7 +72,7 @@ def _stata_elapsed_date_to_datetime_vec(dates, fmt):
Parameters
----------
- dates : array-like
+ dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
@@ -166,8 +85,11 @@ def _stata_elapsed_date_to_datetime_vec(dates, fmt):
Examples
--------
- >>> _stata_elapsed_date_to_datetime(52, "%tw")
- datetime.datetime(1961, 1, 1, 0, 0)
+ >>> import pandas as pd
+ >>> dates = pd.Series([52])
+ >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
+ 0 1961-01-01
+ dtype: datetime64[ns]
Notes
-----
@@ -288,7 +210,6 @@ def convert_delta_safe(base, deltas, unit):
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
- # TODO: Check about negative years, here, and raise or warn if needed
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
@@ -299,49 +220,103 @@ def convert_delta_safe(base, deltas, unit):
conv_dates[bad_locs] = NaT
return conv_dates
-def _datetime_to_stata_elapsed(date, fmt):
+
+def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
- date : datetime.datetime
- The date to convert to the Stata Internal Format given by fmt
+ dates : Series
+ Series or array containing datetime.datetime or datetime64[ns] to
+ convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
- if not isinstance(date, datetime.datetime):
- raise ValueError("date should be datetime.datetime format")
- stata_epoch = datetime.datetime(1960, 1, 1)
- # Handle NaTs
- if date is NaT:
- # Missing value for dates ('.'), assumed always double
- # TODO: Should be moved so a const somewhere, and consolidated
- return struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
+ index = dates.index
+ NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
+ US_PER_DAY = NS_PER_DAY / 1000
+
+ def parse_dates_safe(dates, delta=False, year=False, days=False):
+ d = {}
+ if com.is_datetime64_dtype(dates.values):
+ if delta:
+ delta = dates - stata_epoch
+ d['delta'] = delta.values.astype(
+ np.int64) // 1000 # microseconds
+ if days or year:
+ dates = DatetimeIndex(dates)
+ d['year'], d['month'] = dates.year, dates.month
+ if days:
+ days = (dates.astype(np.int64) -
+ to_datetime(d['year'], format='%Y').astype(np.int64))
+ d['days'] = days // NS_PER_DAY
+
+ elif infer_dtype(dates) == 'datetime':
+ if delta:
+ delta = dates.values - stata_epoch
+ f = lambda x: \
+ US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
+ v = np.vectorize(f)
+ d['delta'] = v(delta)
+ if year:
+ year_month = dates.apply(lambda x: 100 * x.year + x.month)
+ d['year'] = year_month.values // 100
+ d['month'] = (year_month.values - d['year'] * 100)
+ if days:
+ f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
+ v = np.vectorize(f)
+ d['days'] = v(dates)
+ else:
+ raise ValueError('Columns containing dates must contain either '
+ 'datetime64, datetime.datetime or null values.')
+
+ return DataFrame(d, index=index)
+
+ bad_loc = isnull(dates)
+ index = dates.index
+ if bad_loc.any():
+ dates = Series(dates)
+ if com.is_datetime64_dtype(dates):
+ dates[bad_loc] = to_datetime(stata_epoch)
+ else:
+ dates[bad_loc] = stata_epoch
+
if fmt in ["%tc", "tc"]:
- delta = date - stata_epoch
- return (delta.days * 86400000 + delta.seconds*1000 +
- delta.microseconds/1000)
+ d = parse_dates_safe(dates, delta=True)
+ conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
- return date
+ conv_dates = dates
elif fmt in ["%td", "td"]:
- return (date - stata_epoch).days
+ d = parse_dates_safe(dates, delta=True)
+ conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
- return (52*(date.year-stata_epoch.year) +
- (date - datetime.datetime(date.year, 1, 1)).days / 7)
+ d = parse_dates_safe(dates, year=True, days=True)
+ conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
- return (12 * (date.year - stata_epoch.year) + date.month - 1)
+ d = parse_dates_safe(dates, year=True)
+ conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
- return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3)
+ d = parse_dates_safe(dates, year=True)
+ conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
- return 2 * (date.year - stata_epoch.year) + int(date.month > 6)
+ d = parse_dates_safe(dates, year=True)
+ conv_dates = 2 * (d.year - stata_epoch.year) + \
+ (d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
- return date.year
+ d = parse_dates_safe(dates, year=True)
+ conv_dates = d.year
else:
raise ValueError("fmt %s not understood" % fmt)
+ conv_dates = Series(conv_dates, dtype=np.float64)
+ missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
+ conv_dates[bad_loc] = missing_value
+
+ return Series(conv_dates, index=index)
+
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters.
@@ -417,7 +392,7 @@ def _cast_to_stata_types(data):
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
- if data[col].max() >= 2 * 53:
+ if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
@@ -1254,9 +1229,8 @@ def _dtype_to_default_stata_fmt(dtype, column):
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
- string -> "%DDs" where DD is the length of the string
- object -> "%DDs" where DD is the length of the string, if a string, or 244
- for anything that cannot be converted to a string.
+ object -> "%DDs" where DD is the length of the string. If not a string,
+ raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
@@ -1264,19 +1238,13 @@ def _dtype_to_default_stata_fmt(dtype, column):
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
- #TODO: expand this to handle a default datetime format?
- if dtype.type == np.string_:
- if max_len_string_array(column.values) > 244:
- raise ValueError(excessive_string_length_error % column.name)
-
- return "%" + str(dtype.itemsize) + "s"
- elif dtype.type == np.object_:
- try:
- # Try to use optimal size if available
- itemsize = max_len_string_array(column.values)
- except:
- # Default size
- itemsize = 244
+ # TODO: expand this to handle a default datetime format?
+ if dtype.type == np.object_:
+ inferred_dtype = infer_dtype(column.dropna())
+ if not (inferred_dtype in ('string', 'unicode')
+ or len(column) == 0):
+ raise ValueError('Writing general object arrays is not supported')
+ itemsize = max_len_string_array(column.values)
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
@@ -1328,12 +1296,15 @@ class StataWriter(StataParser):
Examples
--------
+ >>> import pandas as pd
+ >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
-
- >>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
+ >>> from datetime import datetime
+ >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
+ >>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
@@ -1502,11 +1473,8 @@ def write_file(self):
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
- if self._convert_dates is None:
- self._write_data_nodates()
- else:
- self._write_data_dates()
- #self._write_value_labels()
+ self._prepare_data()
+ self._write_data()
self._file.close()
def _write_header(self, data_label=None, time_stamp=None):
@@ -1573,59 +1541,46 @@ def _write_variable_labels(self, labels=None):
for i in range(nvar):
self._write(_pad_bytes("", 81))
- def _write_data_nodates(self):
- data = self.datarows
- byteorder = self._byteorder
- TYPE_MAP = self.TYPE_MAP
+ def _prepare_data(self):
+ data = self.data.copy()
typlist = self.typlist
- for row in data:
- #row = row.squeeze().tolist() # needed for structured arrays
- for i, var in enumerate(row):
- typ = ord(typlist[i])
- if typ <= 244: # we've got a string
- if var is None or var == np.nan:
- var = _pad_bytes('', typ)
- if len(var) < typ:
- var = _pad_bytes(var, typ)
- if compat.PY3:
- self._write(var)
- else:
- self._write(var.encode(self._encoding))
- else:
- try:
- self._file.write(struct.pack(byteorder + TYPE_MAP[typ],
- var))
- except struct.error:
- # have to be strict about type pack won't do any
- # kind of casting
- self._file.write(struct.pack(byteorder+TYPE_MAP[typ],
- self.type_converters[typ](var)))
-
- def _write_data_dates(self):
convert_dates = self._convert_dates
- data = self.datarows
- byteorder = self._byteorder
- TYPE_MAP = self.TYPE_MAP
- MISSING_VALUES = self.MISSING_VALUES
- typlist = self.typlist
- for row in data:
- #row = row.squeeze().tolist() # needed for structured arrays
- for i, var in enumerate(row):
- typ = ord(typlist[i])
- #NOTE: If anyone finds this terribly slow, there is
- # a vectorized way to convert dates, see genfromdta for going
- # from int to datetime and reverse it. will copy data though
+ # 1. Convert dates
+ if self._convert_dates is not None:
+ for i, col in enumerate(data):
if i in convert_dates:
- var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
- if typ <= 244: # we've got a string
- if len(var) < typ:
- var = _pad_bytes(var, typ)
- if compat.PY3:
- self._write(var)
- else:
- self._write(var.encode(self._encoding))
- else:
- self._file.write(struct.pack(byteorder+TYPE_MAP[typ], var))
+ data[col] = _datetime_to_stata_elapsed_vec(data[col],
+ self.fmtlist[i])
+
+ # 2. Convert bad string data to '' and pad to correct length
+ dtype = []
+ data_cols = []
+ has_strings = False
+ for i, col in enumerate(data):
+ typ = ord(typlist[i])
+ if typ <= 244:
+ has_strings = True
+ data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
+ stype = 'S%d' % typ
+ dtype.append(('c'+str(i), stype))
+ string = data[col].str.encode(self._encoding)
+ data_cols.append(string.values.astype(stype))
+ else:
+ dtype.append(('c'+str(i), data[col].dtype))
+ data_cols.append(data[col].values)
+ dtype = np.dtype(dtype)
+
+ # 3. Convert to record array
+
+ # data.to_records(index=False, convert_datetime64=False)
+ if has_strings:
+ self.data = np.fromiter(zip(*data_cols), dtype=dtype)
+ else:
+ self.data = data.to_records(index=False)
+
+ def _write_data(self):
+ data = self.data
+ data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
diff --git a/pandas/io/tests/data/stata9_115.dta b/pandas/io/tests/data/stata9_115.dta
index 6c3b6ab4dc686..5ad6cd6a2c8ff 100644
Binary files a/pandas/io/tests/data/stata9_115.dta and b/pandas/io/tests/data/stata9_115.dta differ
diff --git a/pandas/io/tests/data/stata9_117.dta b/pandas/io/tests/data/stata9_117.dta
index 6c3b6ab4dc686..5ad6cd6a2c8ff 100644
Binary files a/pandas/io/tests/data/stata9_117.dta and b/pandas/io/tests/data/stata9_117.dta differ
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 54c1dd20029ee..c458688b3d2d2 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -646,14 +646,14 @@ def test_missing_value_conversion(self):
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
- yr = [1960, 2000, 9999, 100]
- mo = [1, 1, 12, 1]
- dd = [1, 1, 31, 1]
- hr = [0, 0, 23, 0]
- mm = [0, 0, 59, 0]
- ss = [0, 0, 59, 0]
+ yr = [1960, 2000, 9999, 100, 2262, 1677]
+ mo = [1, 1, 12, 1, 4, 9]
+ dd = [1, 1, 31, 1, 22, 23]
+ hr = [0, 0, 23, 0, 0, 0]
+ mm = [0, 0, 59, 0, 0, 0]
+ ss = [0, 0, 59, 0, 0, 0]
expected = []
- for i in range(4):
+ for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
@@ -672,6 +672,11 @@ def test_big_dates(self):
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
+ expected[4][2] = datetime(2262,4,16)
+ expected[4][3] = expected[4][4] = datetime(2262,4,1)
+ expected[4][5] = expected[4][6] = datetime(2262,1,1)
+ expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
+ expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
@@ -679,7 +684,17 @@ def test_big_dates(self):
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
- assert True
+
+ date_conversion = dict((c, c[-2:]) for c in columns)
+ #{c : c[-2:] for c in columns}
+ with tm.ensure_clean() as path:
+ expected.index.name = 'index'
+ expected.to_stata(path, date_conversion)
+ written_and_read_again = self.read_dta(path)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'),
+ expected)
+
+
if __name__ == '__main__':
| StataWriter wrote data using scalar operations. This has been replaced using
numpy's internal binary writer (tofile).
Changes needed to improve performance include:
- Vectorized pandas date to Stata date conversion
- Vectorized null padding
- Conversion to record array
When data contain strings, the old writing paths are still used.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8079 | 2014-08-20T15:27:40Z | 2014-08-21T14:26:34Z | 2014-08-21T14:26:34Z | 2014-08-22T15:42:17Z |
ENH: add support dtype='category' in Series constructor | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 6c58e751a6bcc..b987104ac2408 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -120,7 +120,7 @@ API changes
3 9
4 NaN
dtype: float64
-
+
New behavior (note final value is ``7 = sum([3, 4, NaN])``):
.. ipython:: python
@@ -346,7 +346,7 @@ Categoricals in Series/DataFrame
:class:`~pandas.Categorical` can now be included in `Series` and `DataFrames` and gained new
methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`,
-:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`, :issue:`7768`, :issue:`8006`, :issue:`3678`).
+:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`, :issue:`7768`, :issue:`8006`, :issue:`3678`, :issue:`8075`, :issue:`8076`).
For full docs, see the :ref:`Categorical introduction <categorical>` and the
:ref:`API documentation <api.categorical>`.
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 853feb27d1b21..ec1de70e18b4c 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -743,12 +743,14 @@ def fillna(self, fill_value=None, method=None, limit=None, **kwargs):
name=self.name, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
- """ Take the values by the indexer, fill with the fill_value. """
- if allow_fill and fill_value is None:
- fill_value = np.nan
+ """ Take the codes by the indexer, fill with the fill_value. """
+
+ # filling must always be None/nan here
+ # but is passed thru internally
+ assert isnull(fill_value)
- values = com.take_1d(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
- result = Categorical(values=values, levels=self.levels, ordered=self.ordered,
+ codes = com.take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
+ result = Categorical(codes, levels=self.levels, ordered=self.ordered,
name=self.name, fastpath=True)
return result
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 0274a0f1b3b03..e3a0cf14cfbc1 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2326,19 +2326,31 @@ def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
+def _coerce_to_dtype(dtype):
+ """ coerce a string / np.dtype to a dtype """
+ if is_categorical_dtype(dtype):
+ dtype = CategoricalDtype()
+ else:
+ dtype = np.dtype(dtype)
+ return dtype
+
def _get_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
- if isinstance(arr_or_dtype, type):
+ elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
+ elif isinstance(arr_or_dtype, CategoricalDtype):
+ return CategoricalDtype()
return arr_or_dtype.dtype
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
- if isinstance(arr_or_dtype, type):
+ elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
+ elif isinstance(arr_or_dtype, CategoricalDtype):
+ return CategoricalDtypeType
return arr_or_dtype.dtype.type
@@ -2488,7 +2500,7 @@ def _astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
- dtype = np.dtype(dtype)
+ dtype = _coerce_to_dtype(dtype)
if is_datetime64_dtype(arr):
if dtype == object:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5064545404fb0..ee5016386af4c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -105,7 +105,7 @@ def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
- dtype = np.dtype(dtype)
+ dtype = com._coerce_to_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 68f5b4d36392f..a0bbb2c713e56 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -19,7 +19,7 @@
is_list_like, _values_from_object,
_possibly_cast_to_datetime, _possibly_castable,
_possibly_convert_platform, _try_sort,
- ABCSparseArray, _maybe_match_name,
+ ABCSparseArray, _maybe_match_name, _coerce_to_dtype,
_ensure_object, SettingWithCopyError)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index)
@@ -2434,7 +2434,7 @@ def _sanitize_array(data, index, dtype=None, copy=False,
""" sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """
if dtype is not None:
- dtype = np.dtype(dtype)
+ dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
@@ -2455,9 +2455,11 @@ def _try_cast(arr, take_fast_path):
arr = _possibly_cast_to_datetime(arr, dtype)
subarr = pa.array(arr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
- if dtype is not None and raise_cast_failure:
+ if com.is_categorical_dtype(dtype):
+ subarr = Categorical(arr)
+ elif dtype is not None and raise_cast_failure:
raise
- else: # pragma: no cover
+ else:
subarr = pa.array(arr, dtype=object, copy=copy)
return subarr
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index fcfee8cf9b1ba..7bc2eeb97d47a 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -840,13 +840,58 @@ def test_creation_astype(self):
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
-
df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
+ def test_construction_series(self):
+
+ l = [1,2,3,1]
+ exp = Series(l).astype('category')
+ res = Series(l,dtype='category')
+ tm.assert_series_equal(res, exp)
+
+ l = ["a","b","c","a"]
+ exp = Series(l).astype('category')
+ res = Series(l,dtype='category')
+ tm.assert_series_equal(res, exp)
+
+ # insert into frame with different index
+ # GH 8076
+ index = pd.date_range('20000101', periods=3)
+ expected = Series(Categorical(values=[np.nan,np.nan,np.nan],levels=['a', 'b', 'c']))
+ expected.index = index
+
+ expected = DataFrame({'x': expected})
+ df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index)
+ tm.assert_frame_equal(df, expected)
+
+ def test_reindex(self):
+
+ index = pd.date_range('20000101', periods=3)
+
+ # reindexing to an invalid Categorical
+ s = Series(['a', 'b', 'c'],dtype='category')
+ result = s.reindex(index)
+ expected = Series(Categorical(values=[np.nan,np.nan,np.nan],levels=['a', 'b', 'c']))
+ expected.index = index
+ tm.assert_series_equal(result, expected)
+
+ # partial reindexing
+ expected = Series(Categorical(values=['b','c'],levels=['a', 'b', 'c']))
+ expected.index = [1,2]
+ result = s.reindex([1,2])
+ tm.assert_series_equal(result, expected)
+
+ expected = Series(Categorical(values=['c',np.nan],levels=['a', 'b', 'c']))
+ expected.index = [2,3]
+ result = s.reindex([2,3])
+ tm.assert_series_equal(result, expected)
+
+
+
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either the series or the
| xref #8074
closes #8076
```
In [1]: Series([1,1,2,2,3,4,5],dtype='category')
Out[1]:
0 1
1 1
2 2
3 2
4 3
5 4
6 5
dtype: category
Levels (5, int64): [1 < 2 < 3 < 4 < 5]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8075 | 2014-08-20T00:13:51Z | 2014-08-21T21:33:10Z | 2014-08-21T21:33:10Z | 2014-08-21T21:33:10Z |
PERF: StataReader is slow | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 1db189fcc74e3..d608304511a08 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -481,7 +481,7 @@ Performance
- Performance improvements in ``DatetimeIndex.__iter__`` to allow faster iteration (:issue:`7683`)
- Performance improvements in ``Period`` creation (and ``PeriodIndex`` setitem) (:issue:`5155`)
- Improvements in Series.transform for significant performance gains (revised) (:issue:`6496`)
-- Performance improvements in ``StataReader`` when reading large files (:issue:`8040`)
+- Performance improvements in ``StataReader`` when reading large files (:issue:`8040`, :issue:`8073`)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index c9a3104eec3f0..0cf57d3035db5 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -13,18 +13,18 @@
import sys
import struct
+from dateutil.relativedelta import relativedelta
from pandas.core.base import StringMixin
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import datetime
-from pandas import compat
+from pandas import compat, to_timedelta, to_datetime
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip
-from pandas import isnull
from pandas.io.common import get_filepath_or_buffer
from pandas.lib import max_len_string_array, is_string_array
-from pandas.tslib import NaT
+from pandas.tslib import NaT, Timestamp
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
@@ -62,6 +62,7 @@ def read_stata(filepath_or_buffer, convert_dates=True,
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
+stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
@@ -111,9 +112,7 @@ def _stata_elapsed_date_to_datetime(date, fmt):
#TODO: When pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly
if np.isnan(date):
return NaT
-
date = int(date)
- stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
from dateutil.relativedelta import relativedelta
return stata_epoch + relativedelta(microseconds=date * 1000)
@@ -148,6 +147,158 @@ def _stata_elapsed_date_to_datetime(date, fmt):
raise ValueError("Date fmt %s not understood" % fmt)
+def _stata_elapsed_date_to_datetime_vec(dates, fmt):
+ """
+ Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
+
+ Parameters
+ ----------
+ dates : array-like
+ The Stata Internal Format date to convert to datetime according to fmt
+ fmt : str
+ The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
+ Returns
+
+ Returns
+ -------
+ converted : Series
+ The converted dates
+
+ Examples
+ --------
+ >>> _stata_elapsed_date_to_datetime(52, "%tw")
+ datetime.datetime(1961, 1, 1, 0, 0)
+
+ Notes
+ -----
+ datetime/c - tc
+ milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
+ datetime/C - tC - NOT IMPLEMENTED
+ milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
+ date - td
+ days since 01jan1960 (01jan1960 = 0)
+ weekly date - tw
+ weeks since 1960w1
+ This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
+ The datetime value is the start of the week in terms of days in the
+ year, not ISO calendar weeks.
+ monthly date - tm
+ months since 1960m1
+ quarterly date - tq
+ quarters since 1960q1
+ half-yearly date - th
+ half-years since 1960h1 yearly
+ date - ty
+ years since 0000
+
+ If you don't have pandas with datetime support, then you can't do
+ milliseconds accurately.
+ """
+ MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
+ MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
+ MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
+ MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
+ MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
+
+ def convert_year_month_safe(year, month):
+ """
+ Convert year and month to datetimes, using pandas vectorized versions
+ when the date range falls within the range supported by pandas. Other
+ wise it falls back to a slower but more robust method using datetime.
+ """
+ if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
+ return to_datetime(100 * year + month, format='%Y%m')
+ else:
+ return Series(
+ [datetime.datetime(y, m, 1) for y, m in zip(year, month)])
+
+ def convert_year_days_safe(year, days):
+ """
+ Converts year (e.g. 1999) and days since the start of the year to a
+ datetime or datetime64 Series
+ """
+ if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
+ return to_datetime(year, format='%Y') + to_timedelta(days, unit='d')
+ else:
+ value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for
+ y, d in zip(year, days)]
+ return Series(value)
+
+ def convert_delta_safe(base, deltas, unit):
+ """
+ Convert base dates and deltas to datetimes, using pandas vectorized
+ versions if the deltas satisfy restrictions required to be expressed
+ as dates in pandas.
+ """
+ if unit == 'd':
+ if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
+ values = [base + relativedelta(days=int(d)) for d in deltas]
+ return Series(values)
+ elif unit == 'ms':
+ if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
+ values = [base + relativedelta(microseconds=(int(d) * 1000)) for
+ d in deltas]
+ return Series(values)
+ else:
+ raise ValueError('format not understood')
+
+ base = to_datetime(base)
+ deltas = to_timedelta(deltas, unit=unit)
+ return base + deltas
+
+ # TODO: If/when pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly
+ bad_locs = np.isnan(dates)
+ has_bad_values = False
+ if bad_locs.any():
+ has_bad_values = True
+ data_col = Series(dates)
+ data_col[bad_locs] = 1.0 # Replace with NaT
+ dates = dates.astype(np.int64)
+
+ if fmt in ["%tc", "tc"]: # Delta ms relative to base
+ base = stata_epoch
+ ms = dates
+ conv_dates = convert_delta_safe(base, ms, 'ms')
+ elif fmt in ["%tC", "tC"]:
+ from warnings import warn
+
+ warn("Encountered %tC format. Leaving in Stata Internal Format.")
+ conv_dates = Series(dates, dtype=np.object)
+ if has_bad_values:
+ conv_dates[bad_locs] = np.nan
+ return conv_dates
+ elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
+ base = stata_epoch
+ days = dates
+ conv_dates = convert_delta_safe(base, days, 'd')
+ elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
+ year = stata_epoch.year + dates // 52
+ days = (dates % 52) * 7
+ conv_dates = convert_year_days_safe(year, days)
+ elif fmt in ["%tm", "tm"]: # Delta months relative to base
+ year = stata_epoch.year + dates // 12
+ month = (dates % 12) + 1
+ conv_dates = convert_year_month_safe(year, month)
+ elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
+ year = stata_epoch.year + dates // 4
+ month = (dates % 4) * 3 + 1
+ conv_dates = convert_year_month_safe(year, month)
+ elif fmt in ["%th", "th"]: # Delta half-years relative to base
+ year = stata_epoch.year + dates // 2
+ month = (dates % 2) * 6 + 1
+ conv_dates = convert_year_month_safe(year, month)
+ elif fmt in ["%ty", "ty"]: # Years -- not delta
+ # TODO: Check about negative years, here, and raise or warn if needed
+ year = dates
+ month = np.ones_like(dates)
+ conv_dates = convert_year_month_safe(year, month)
+ else:
+ raise ValueError("Date fmt %s not understood" % fmt)
+
+ if has_bad_values: # Restore NaT for bad values
+ conv_dates[bad_locs] = NaT
+ return conv_dates
+
def _datetime_to_stata_elapsed(date, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
@@ -477,6 +628,14 @@ def __init__(self, encoding):
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
+ self.NUMPY_TYPE_MAP = \
+ {
+ 'b': 'i1',
+ 'h': 'i2',
+ 'l': 'i4',
+ 'f': 'f4',
+ 'd': 'f8'
+ }
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
@@ -759,15 +918,6 @@ def _calcsize(self, fmt):
return (type(fmt) is int and fmt
or struct.calcsize(self.byteorder + fmt))
- def _col_size(self, k=None):
- if k is None:
- return self.col_sizes
- else:
- return self.col_sizes[k]
-
- def _unpack(self, fmt, byt):
- return struct.unpack(self.byteorder + fmt, byt)[0]
-
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None: # have bytes not strings,
# so must decode
@@ -784,55 +934,6 @@ def _null_terminate(self, s):
except:
return s
- def _next(self):
- typlist = self.typlist
- if self.has_string_data:
- data = [None] * self.nvar
- for i in range(len(data)):
- if type(typlist[i]) is int:
- data[i] = self._null_terminate(
- self.path_or_buf.read(typlist[i])
- )
- else:
- data[i] = self._unpack(
- typlist[i], self.path_or_buf.read(self._col_size(i))
- )
- return data
- else:
- return lmap(
- lambda i: self._unpack(typlist[i],
- self.path_or_buf.read(
- self._col_size(i)
- )),
- range(self.nvar)
- )
-
-
- def _dataset(self):
- """
- Returns a Python generator object for iterating over the dataset.
-
-
- Parameters
- ----------
-
- Returns
- -------
- Generator object for iterating over the dataset. Yields each row of
- observations as a list by default.
-
- Notes
- -----
- If missing_values is True during instantiation of StataReader then
- observations with _StataMissingValue(s) are not filtered and should
- be handled by your applcation.
- """
-
- self.path_or_buf.seek(self.data_location)
-
- for i in range(self.nobs):
- yield self._next()
-
def _read_value_labels(self):
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
@@ -932,27 +1033,32 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None,
if self.format_version >= 117:
self._read_strls()
- stata_dta = self._dataset()
-
- data = []
- for rownum, line in enumerate(stata_dta):
- # doesn't handle missing value objects, just casts
- # None will only work without missing value object.
- for i, val in enumerate(line):
- #NOTE: This will only be scalar types because missing strings
- # are empty not None in Stata
- if val is None:
- line[i] = np.nan
- data.append(tuple(line))
+ # Read data
+ count = self.nobs
+ dtype = [] # Convert struct data types to numpy data type
+ for i, typ in enumerate(self.typlist):
+ if typ in self.NUMPY_TYPE_MAP:
+ dtype.append(('s' + str(i), self.NUMPY_TYPE_MAP[typ]))
+ else:
+ dtype.append(('s' + str(i), 'S' + str(typ)))
+ dtype = np.dtype(dtype)
+ read_len = count * dtype.itemsize
+ self.path_or_buf.seek(self.data_location)
+ data = np.frombuffer(self.path_or_buf.read(read_len),dtype=dtype,count=count)
+ self._data_read = True
if convert_categoricals:
self._read_value_labels()
- # TODO: Refactor to use a dictionary constructor and the correct dtype from the start?
if len(data)==0:
data = DataFrame(columns=self.varlist, index=index)
else:
- data = DataFrame(data, columns=self.varlist, index=index)
+ data = DataFrame.from_records(data, index=index)
+ data.columns = self.varlist
+
+ for col, typ in zip(data, self.typlist):
+ if type(typ) is int:
+ data[col] = data[col].apply(self._null_terminate, convert_dtype=True,)
cols_ = np.where(self.dtyplist)[0]
@@ -1010,8 +1116,7 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
- data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
- args=(self.fmtlist[i],))
+ data[col] = _stata_elapsed_date_to_datetime_vec(data[col], self.fmtlist[i])
if convert_categoricals:
cols = np.where(
diff --git a/pandas/io/tests/data/stata9_115.dta b/pandas/io/tests/data/stata9_115.dta
new file mode 100644
index 0000000000000..6c3b6ab4dc686
Binary files /dev/null and b/pandas/io/tests/data/stata9_115.dta differ
diff --git a/pandas/io/tests/data/stata9_117.dta b/pandas/io/tests/data/stata9_117.dta
new file mode 100644
index 0000000000000..6c3b6ab4dc686
Binary files /dev/null and b/pandas/io/tests/data/stata9_117.dta differ
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 9d630bf83ced7..54c1dd20029ee 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -18,6 +18,7 @@
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
+from pandas.tslib import NaT
from pandas.util.misc import is_little_endian
from pandas import compat
@@ -77,6 +78,10 @@ def setUp(self):
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
+ self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
+ self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
+
+
def read_dta(self, file):
return read_stata(file, convert_dates=True)
@@ -640,6 +645,43 @@ def test_missing_value_conversion(self):
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
+ def test_big_dates(self):
+ yr = [1960, 2000, 9999, 100]
+ mo = [1, 1, 12, 1]
+ dd = [1, 1, 31, 1]
+ hr = [0, 0, 23, 0]
+ mm = [0, 0, 59, 0]
+ ss = [0, 0, 59, 0]
+ expected = []
+ for i in range(4):
+ row = []
+ for j in range(7):
+ if j == 0:
+ row.append(
+ datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
+ elif j == 6:
+ row.append(datetime(yr[i], 1, 1))
+ else:
+ row.append(datetime(yr[i], mo[i], dd[i]))
+ expected.append(row)
+ expected.append([NaT] * 7)
+ columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
+ 'date_th', 'date_ty']
+ # Fixes for weekly, quarterly,half,year
+ expected[2][2] = datetime(9999,12,24)
+ expected[2][3] = datetime(9999,12,1)
+ expected[2][4] = datetime(9999,10,1)
+ expected[2][5] = datetime(9999,7,1)
+
+ expected = DataFrame(expected, columns=columns, dtype=np.object)
+
+ parsed_115 = read_stata(self.dta18_115)
+ parsed_117 = read_stata(self.dta18_117)
+ tm.assert_frame_equal(expected, parsed_115)
+ tm.assert_frame_equal(expected, parsed_117)
+ assert True
+
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
index cb933746bef83..403adbf289e1f 100644
--- a/vb_suite/packers.py
+++ b/vb_suite/packers.py
@@ -133,13 +133,13 @@ def remove(f):
packers_write_stata = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
setup = common_setup + """
-df['int8_'] = [randint(-127,100) for _ in range(N)]
-df['int16_'] = [randint(-127,100) for _ in range(N)]
-df['int32_'] = [randint(-127,100) for _ in range(N)]
+df['int8_'] = [randint(np.iinfo(np.int8).min, np.iinfo(np.int8).max - 27) for _ in range(N)]
+df['int16_'] = [randint(np.iinfo(np.int16).min, np.iinfo(np.int16).max - 27) for _ in range(N)]
+df['int32_'] = [randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max - 27) for _ in range(N)]
df['float32_'] = np.array(randn(N), dtype=np.float32)
df.to_stata(f, {'index': 'tc'})
"""
-packers_read_stata_with_int = Benchmark("pd.read_stata(f)", setup, start_date=start_date)
+packers_read_stata_with_validation = Benchmark("pd.read_stata(f)", setup, start_date=start_date)
-packers_write_stata_with_int = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
+packers_write_stata_with_validation = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
| StataReader does not make use of vectorized operations.
To improve performance, the following changes have been made:
- Use numpy.frombuffer to real the stored data in a single operation
- Vectorize date conversion
- Removal of unreachable private functions
| https://api.github.com/repos/pandas-dev/pandas/pulls/8073 | 2014-08-19T15:49:29Z | 2014-08-20T10:52:34Z | 2014-08-20T10:52:34Z | 2014-08-22T15:42:25Z |
BUG: Bug in Timestamp comparisons with == and dtype of int64 (GH8058) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 3839d475b29ad..509d1c4a9b327 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -465,7 +465,7 @@ Bug Fixes
- Bug in ``combine_first`` with ``PeriodIndex`` data raises ``TypeError`` (:issue:`3367`)
- Bug in multi-index slicing with missing indexers (:issue:`7866`)
- Regression in multi-index indexing with a non-scalar type object (:issue:`7914`)
-
+- Bug in Timestamp comparisons with ``==`` and dtype of int64 (:issue:`8058`)
- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`)
- Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 3da97074a93fd..63db28ca53cf1 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -3407,6 +3407,35 @@ def test_comparison(self):
self.assertTrue(other > val)
self.assertTrue(other >= val)
+ def test_compare_invalid(self):
+
+ # GH 8058
+ val = Timestamp('20130101 12:01:02')
+ self.assertFalse(val == 'foo')
+ self.assertFalse(val == 10.0)
+ self.assertFalse(val == 1)
+ self.assertFalse(val == long(1))
+ self.assertFalse(val == [])
+ self.assertFalse(val == {'foo' : 1})
+ self.assertFalse(val == np.float64(1))
+ self.assertFalse(val == np.int64(1))
+
+ self.assertTrue(val != 'foo')
+ self.assertTrue(val != 10.0)
+ self.assertTrue(val != 1)
+ self.assertTrue(val != long(1))
+ self.assertTrue(val != [])
+ self.assertTrue(val != {'foo' : 1})
+ self.assertTrue(val != np.float64(1))
+ self.assertTrue(val != np.int64(1))
+
+ # ops testing
+ df = DataFrame(randn(5,2))
+ a = df[0]
+ b = Series(randn(5))
+ b.name = Timestamp('2000-01-01')
+ tm.assert_series_equal(a / b, 1 / (b / a))
+
def test_cant_compare_tz_naive_w_aware(self):
tm._skip_if_no_pytz()
# #1404
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 5a2352508d42f..36c40f8ca39af 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -710,6 +710,12 @@ cdef class _Timestamp(datetime):
if isinstance(other, np.datetime64):
other = Timestamp(other)
else:
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+
+ # only allow ==, != ops
raise TypeError('Cannot compare type %r with type %r' %
(type(self).__name__,
type(other).__name__))
| closes #8058
| https://api.github.com/repos/pandas-dev/pandas/pulls/8070 | 2014-08-19T14:16:29Z | 2014-08-19T15:12:53Z | 2014-08-19T15:12:53Z | 2014-08-19T15:12:53Z |
DOC: Add scatter to visualization.rst | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 076870eff1761..1d2e996934f76 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -126,7 +126,7 @@ These include:
* :ref:`'hist' <visualization.hist>` for histogram
* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots
* :ref:`'area' <visualization.area_plot>` for area plots
-* :ref:`'scatter' <visualization.scatter_matrix>` for scatter plots
+* :ref:`'scatter' <visualization.scatter>` for scatter plots
* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots
* :ref:`'pie' <visualization.pie>` for pie plots
@@ -427,6 +427,52 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
@savefig area_plot_unstacked.png
df.plot(kind='area', stacked=False);
+.. _visualization.scatter:
+
+Scatter Plot
+~~~~~~~~~~~~
+
+.. versionadded:: 0.13
+
+You can create scatter plots with ``DataFrame.plot`` by passing ``kind='scatter'``.
+Scatter plot requires numeric columns for x and y axis.
+These can be specified by ``x`` and ``y`` keywords each.
+
+.. ipython:: python
+ :suppress:
+
+ np.random.seed(123456)
+ plt.figure()
+
+.. ipython:: python
+
+ df = DataFrame(rand(50, 4), columns=['a', 'b', 'c', 'd'])
+
+ @savefig scatter_plot.png
+ df.plot(kind='scatter', x='a', y='b');
+
+To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
+It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
+
+.. ipython:: python
+
+ ax = df.plot(kind='scatter', x='a', y='b',
+ color='DarkBlue', label='Group 1');
+ @savefig scatter_plot_repeated.png
+ df.plot(kind='scatter', x='c', y='d',
+ color='DarkGreen', label='Group 2', ax=ax);
+
+You can pass other keywords supported by matplotlib ``scatter``.
+Below example shows a bubble chart using a dataframe column values as bubble size.
+
+.. ipython:: python
+
+ @savefig scatter_plot_bubble.png
+ df.plot(kind='scatter', x='a', y='b', s=df['c']*200);
+
+See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
+`matplotlib scatter documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`__ for more.
+
.. _visualization.hexbin:
Hexagonal Bin Plot
| Current doc has no explanation, and incorrectly linked to `scatter_matrix`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8069 | 2014-08-19T14:10:00Z | 2014-08-19T14:18:39Z | 2014-08-19T14:18:39Z | 2014-08-22T21:31:29Z |
DOC: Fix Release note 8019, 8039 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 85f620fcd4b99..71d168e1b575d 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -394,7 +394,7 @@ Enhancements
-- Bug in ``DataFrame.shift`` where empty columns would throw ``ZeroDivisionError`` on numpy 1.7 (:issue:`8019`)
+
@@ -543,7 +543,7 @@ Bug Fixes
-
+- Bug in ``DataFrame.shift`` where empty columns would throw ``ZeroDivisionError`` on numpy 1.7 (:issue:`8019`)
@@ -555,7 +555,7 @@ Bug Fixes
- Bug in ``read_html`` where ``bytes`` objects were not tested for in
``_read`` (:issue:`7927`).
-- Bug in ``DataFrame.stack()`` when one of the column levels was a datelike (:issue: `8039`)
+- Bug in ``DataFrame.stack()`` when one of the column levels was a datelike (:issue:`8039`)
| Fix release note #8019, #8039
| https://api.github.com/repos/pandas-dev/pandas/pulls/8068 | 2014-08-19T13:32:17Z | 2014-08-19T13:36:12Z | 2014-08-19T13:36:12Z | 2014-08-22T21:31:16Z |
typo: "cubhelix" -> "cubehelix" | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 40b5d7c1599c1..076870eff1761 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1079,7 +1079,7 @@ colors are selected based on an even spacing determined by the number of columns
in the DataFrame. There is no consideration made for background color, so some
colormaps will produce lines that are not easily visible.
-To use the cubhelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=``
+To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=``
.. ipython:: python
:suppress:
| https://api.github.com/repos/pandas-dev/pandas/pulls/8067 | 2014-08-19T13:07:33Z | 2014-08-19T13:21:21Z | 2014-08-19T13:21:21Z | 2014-08-19T13:21:25Z | |
Fix bdist_wheel. Add Tag information to WHEEL dist info. | diff --git a/setup.py b/setup.py
index f57349c048a62..a7793f3300dfe 100755
--- a/setup.py
+++ b/setup.py
@@ -392,6 +392,20 @@ def run(self):
'build': build,
'sdist': CheckSDist}
+try:
+ from wheel.bdist_wheel import bdist_wheel
+
+ class BdistWheel(bdist_wheel):
+ def get_tag(self):
+ tag = bdist_wheel.get_tag(self)
+ repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
+ if tag[2] == 'macosx_10_6_intel':
+ tag = (tag[0], tag[1], repl)
+ return tag
+ cmdclass['bdist_wheel'] = BdistWheel
+except ImportError:
+ pass
+
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
| The wheel build with https://github.com/MacPython/pandas-wheels
```
# file: pandas-0.14.1.dist-info/WHEEL
Wheel-Version: 1.0
Generator: bdist_wheel (0.24.0)
Root-Is-Purelib: false
Tag: cp27-none-macosx_10_6_intel
```
Although it was renamed to `macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64`, the real
tag didn't change.
This patch would fix the `Tag` like what I did in [mistune](https://github.com/lepture/mistune)
```
# file: mistune-0.4.dist-info/WHEEL
Wheel-Version: 1.0
Generator: bdist_wheel (0.24.0)
Root-Is-Purelib: false
Tag: cp27-none-macosx_10_6_intel
Tag: cp27-none-macosx_10_9_intel
Tag: cp27-none-macosx_10_9_x86_64
```
Related: http://lepture.com/en/2014/python-on-a-hard-wheel
| https://api.github.com/repos/pandas-dev/pandas/pulls/8066 | 2014-08-19T08:55:56Z | 2014-09-14T14:13:25Z | 2014-09-14T14:13:25Z | 2014-09-14T15:19:01Z |
Adding a warning when dropping NA values for panel.to_frame #7879 | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1e6ed56386f63..e27dc0851e4a2 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -8,6 +8,7 @@
from pandas import compat
import sys
import numpy as np
+import warnings
from pandas.core.common import (PandasError, _try_sort, _default_index,
_infer_dtype_from_scalar, notnull)
from pandas.core.categorical import Categorical
@@ -38,6 +39,9 @@
"of\n %s" %
_shared_doc_kwargs['axes_single_arg'])
+# added to allow repetition of warnings
+warnings.simplefilter('always', RuntimeWarning)
+
def _ensure_like_indices(time, panels):
"""
@@ -835,7 +839,7 @@ def groupby(self, function, axis='major'):
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
- def to_frame(self, filter_observations=True):
+ def to_frame(self, filter_observations=False):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
@@ -858,6 +862,8 @@ def to_frame(self, filter_observations=True):
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
+ if not np.all(selector):
+ warnings.warn("NaN values found, empty values will be dropped", RuntimeWarning)
else:
# size = N * K
selector = slice(None, None)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index 105f661f08b10..4e5a87876fefb 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -1410,7 +1410,7 @@ def _check(frame):
dense_frame = frame.to_dense()
wp = Panel.from_dict({'foo': frame})
- from_dense_lp = wp.to_frame()
+ from_dense_lp = wp.to_frame(filter_observations=True)
from_sparse_lp = spf.stack_sparse_frame(frame)
@@ -1629,8 +1629,8 @@ def test_to_dense(self):
def test_to_frame(self):
def _compare_with_dense(panel):
- slp = panel.to_frame()
- dlp = panel.to_dense().to_frame()
+ slp = panel.to_frame(filter_observations=True)
+ dlp = panel.to_dense().to_frame(filter_observations=True)
self.assert_numpy_array_equal(slp.values, dlp.values)
self.assertTrue(slp.index.equals(dlp.index))
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index fb1f1c1693fdd..3e9f5b513566d 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1,11 +1,11 @@
# pylint: disable=W0612,E1101
from datetime import datetime
+import warnings
import operator
import nose
import numpy as np
-
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.panel import Panel
@@ -440,7 +440,7 @@ def test_delitem_and_pop(self):
def test_setitem(self):
# LongPanel with one item
- lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
+ lp = self.panel.filter(['ItemA', 'ItemB']).to_frame(filter_observations=True)
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
@@ -1436,12 +1436,12 @@ def test_transpose_copy(self):
def test_to_frame(self):
# filtered
- filtered = self.panel.to_frame()
- expected = self.panel.to_frame().dropna(how='any')
+ filtered = self.panel.to_frame(filter_observations=True)
+ expected = self.panel.to_frame(filter_observations=True).dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
- unfiltered = self.panel.to_frame(filter_observations=False)
+ unfiltered = self.panel.to_frame()
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
@@ -1492,11 +1492,11 @@ def test_to_frame_multi_major(self):
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b', 1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
- result = wp.to_frame()
+ result = wp.to_frame(filter_observations=True)
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
- result = wp.to_frame()
+ result = wp.to_frame(filter_observations=True)
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'),
@@ -1511,7 +1511,7 @@ def test_to_frame_multi_major(self):
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
- result = wp.to_frame()
+ result = wp.to_frame(filter_observations=True)
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
@@ -1542,7 +1542,7 @@ def test_to_frame_multi_major_minor(self):
['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'], [-1, -1],
[-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7],
[-8, -8]]
- result = wp.to_frame()
+ result = wp.to_frame(filter_observations=True)
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
@@ -1550,12 +1550,33 @@ def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
- result = wp.to_frame()
+ result = wp.to_frame(filter_observations=True)
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
+ def test_to_frame_na_drop_warnings(self):
+ df1 = DataFrame(np.random.randn(2, 3), columns=['A', 'B', 'C'],
+ index=['foo', 'bar'])
+ df2 = DataFrame(np.random.randn(2, 3), columns=['A', 'B', 'C'],
+ index=['foo', 'bar'])
+ dict_without_dropped_vals = {'df1': df1, 'df2': df2}
+ ## A panel without dropped vals shouldn't throw warnings
+ with tm.assert_produces_warning(False):
+ Panel(dict_without_dropped_vals).to_frame()
+ ## A panel with dropped vals should throw a Runtime warning if \
+ # filter_observations is True
+ df2_with_na_vals = DataFrame(np.random.randn(2, 3), columns=['A', 'B', 'C'],
+ index=['foo', 'bar'])
+ df2_with_na_vals.loc['foo', 'B'] = np.nan
+ dict_with_dropped_vals = {'df1': df1, 'df2_dropped': df2_with_na_vals}
+ with tm.assert_produces_warning(False):
+ Panel(dict_with_dropped_vals).to_frame()
+ ##if filter_observations is False, a warning shouldn't be throws
+ with tm.assert_produces_warning(RuntimeWarning):
+ Panel(dict_with_dropped_vals).to_frame(filter_observations=True)
+
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
@@ -2079,14 +2100,14 @@ def setUp(self):
panel = tm.makePanel()
tm.add_nans(panel)
- self.panel = panel.to_frame()
- self.unfiltered_panel = panel.to_frame(filter_observations=False)
+ self.panel = panel.to_frame(filter_observations=True)
+ self.unfiltered_panel = panel.to_frame()
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
- lp2 = wp2.to_frame()
+ lp2 = wp2.to_frame(filter_observations=True)
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
@@ -2197,7 +2218,7 @@ def test_truncate(self):
wp2 = wp.reindex(major=new_index)
- lp2 = wp2.to_frame()
+ lp2 = wp2.to_frame(filter_observations=True)
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
| closes: https://github.com/pydata/pandas/issues/7879
I've added a warning when the <code>NaN</code> values are dropped when calling <code>pd.Panel(dict).to_frame()</code> with the param <code>filter_observations</code> true
I've supplied a test case to be used against the example provided in the issue tracker
| https://api.github.com/repos/pandas-dev/pandas/pulls/8063 | 2014-08-18T21:09:16Z | 2015-01-25T23:40:01Z | null | 2015-01-25T23:40:01Z |
Added chunksize argument to to_sql | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 6e5d254d27b7f..e249585c10784 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3267,6 +3267,12 @@ the database using :func:`~pandas.DataFrame.to_sql`.
data.to_sql('data', engine)
+With some databases, writing large DataFrames can result in errors due to packet size limitations being exceeded. This can be avoided by setting the ``chunksize`` parameter when calling ``to_sql``. For example, the following writes ``data`` to the database in batches of 1000 rows at a time:
+
+.. ipython:: python
+
+ data.to_sql('data', engine, chunksize=1000)
+
.. note::
Due to the limited support for timedelta's in the different database
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index b987104ac2408..b13d055143794 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -425,6 +425,9 @@ Known Issues
Enhancements
~~~~~~~~~~~~
+
+- Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`)
+
- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`)
- Added ``layout`` keyword to ``DataFrame.plot`` (:issue:`6667`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ee5016386af4c..d56095b6300a4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -916,7 +916,7 @@ def to_msgpack(self, path_or_buf=None, **kwargs):
return packers.to_msgpack(path_or_buf, self, **kwargs)
def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True,
- index_label=None):
+ index_label=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -942,12 +942,15 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True,
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
+ chunksize : int, default None
+ If not None, then rows will be written in batches of this size at a
+ time. If None, all rows will be written at once.
"""
from pandas.io import sql
sql.to_sql(
self, name, con, flavor=flavor, if_exists=if_exists, index=index,
- index_label=index_label)
+ index_label=index_label, chunksize=chunksize)
def to_pickle(self, path):
"""
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index cb234f825a51e..914ade45adaa1 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -432,7 +432,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
- index_label=None):
+ index_label=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -459,6 +459,9 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
+ chunksize : int, default None
+ If not None, then rows will be written in batches of this size at a
+ time. If None, all rows will be written at once.
"""
if if_exists not in ('fail', 'replace', 'append'):
@@ -472,7 +475,7 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
- index_label=index_label)
+ index_label=index_label, chunksize=chunksize)
def has_table(table_name, con, flavor='sqlite'):
@@ -597,18 +600,30 @@ def insert_data(self):
return temp
- def insert(self):
+ def insert(self, chunksize=None):
+
ins = self.insert_statement()
- data_list = []
temp = self.insert_data()
keys = list(map(str, temp.columns))
- for t in temp.itertuples():
- data = dict((k, self.maybe_asscalar(v))
- for k, v in zip(keys, t[1:]))
- data_list.append(data)
-
- self.pd_sql.execute(ins, data_list)
+ nrows = len(temp)
+ if chunksize is None:
+ chunksize = nrows
+ chunks = int(nrows / chunksize) + 1
+
+ con = self.pd_sql.engine.connect()
+ with con.begin() as trans:
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, nrows)
+ if start_i >= end_i:
+ break
+ data_list = []
+ for t in temp.iloc[start_i:end_i].itertuples():
+ data = dict((k, self.maybe_asscalar(v))
+ for k, v in zip(keys, t[1:]))
+ data_list.append(data)
+ con.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
@@ -843,11 +858,11 @@ def read_sql(self, sql, index_col=None, coerce_float=True,
return data_frame
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None):
+ index_label=None, chunksize=None):
table = PandasSQLTable(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
- table.insert()
+ table.insert(chunksize)
@property
def tables(self):
@@ -948,19 +963,30 @@ def insert_statement(self):
self.name, col_names, wildcards)
return insert_statement
- def insert(self):
+ def insert(self, chunksize=None):
+
ins = self.insert_statement()
temp = self.insert_data()
- data_list = []
-
- for t in temp.itertuples():
- data = tuple((self.maybe_asscalar(v) for v in t[1:]))
- data_list.append(data)
- cur = self.pd_sql.con.cursor()
- cur.executemany(ins, data_list)
- cur.close()
- self.pd_sql.con.commit()
+ nrows = len(temp)
+ if chunksize is None:
+ chunksize = nrows
+ chunks = int(nrows / chunksize) + 1
+
+ with self.pd_sql.con:
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, nrows)
+ if start_i >= end_i:
+ break
+ data_list = []
+ for t in temp.iloc[start_i:end_i].itertuples():
+ data = tuple((self.maybe_asscalar(v) for v in t[1:]))
+ data_list.append(data)
+
+ cur = self.pd_sql.con.cursor()
+ cur.executemany(ins, data_list)
+ cur.close()
def _create_table_statement(self):
"Return a CREATE TABLE statement to suit the contents of a DataFrame."
@@ -1069,7 +1095,7 @@ def _fetchall_as_list(self, cur):
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None):
+ index_label=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -1087,7 +1113,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
table = PandasSQLTableLegacy(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
- table.insert()
+ table.insert(chunksize)
def has_table(self, name):
flavor_map = {
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 6a0130e515d59..68f170759b666 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -455,6 +455,14 @@ def test_roundtrip(self):
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
+ def test_roundtrip_chunksize(self):
+ sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
+ index=False, flavor='sqlite', chunksize=2)
+ result = sql.read_sql_query(
+ 'SELECT * FROM test_frame_roundtrip',
+ con=self.conn)
+ tm.assert_frame_equal(result, self.test_frame1)
+
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
| Large dataframes may fail to write to a database in one go due to packet size errors . This add a `chunksize` parameter which will write the dataframe to the database in batches. See https://github.com/pydata/pandas/issues/7347 .
Notice that there is quite a bit of code duplication between the SQLAlchemy and legacy APIs. This could be factored out if desired at the cost of a few private methods.
Closes #7347
| https://api.github.com/repos/pandas-dev/pandas/pulls/8062 | 2014-08-18T20:37:40Z | 2014-08-22T07:32:12Z | 2014-08-22T07:32:12Z | 2014-09-15T21:49:48Z |
ENH: improve datetime string parsing with CustomBusinessDay | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index cd37f4000e5a2..808be75ee857d 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -603,7 +603,7 @@ def _to_dt64(dt, dtype='datetime64'):
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
- dt = np.datetime64(dt)
+ dt = np.datetime64(to_datetime(dt))
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index f6f91760e8ad8..9ae1c3f4a627d 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -815,6 +815,14 @@ def test_holidays(self):
rs = dt + tday
self.assertEqual(rs, xp)
+ # test ability to do date parsing
+ holidays = ['5/13/2014', '20140514', '2014/05/15']
+ tday = CDay(holidays=holidays)
+ dt = datetime(2014, 5, 12)
+ xp = datetime(2014, 5, 16)
+ rs = dt + tday
+ self.assertEqual(rs, xp)
+
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
| Currently CustomBusinessDay requires a strict format for holiday definition
```
CustomBusinessDay(holidays=['2014-05-12'])
```
but the following would raise a ValueError:
```
CustomBusinessDay(holidays=['5/12/2014'])
```
this PR makes the date parsing easier and adds a unit test
| https://api.github.com/repos/pandas-dev/pandas/pulls/8061 | 2014-08-18T18:32:43Z | 2015-04-08T15:21:33Z | null | 2015-04-08T17:19:04Z |
BUG: rolling_count() and expanding_*() with zero-length args; rolling/expanding_apply with min_periods=0 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index d608304511a08..69d8632677b67 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -104,9 +104,9 @@ API changes
:func:`rolling_std`, :func:`rolling_var`, :func:`rolling_skew`, :func:`rolling_kurt`, and :func:`rolling_quantile`,
:func:`rolling_cov`, :func:`rolling_corr`, :func:`rolling_corr_pairwise`,
:func:`rolling_window`, and :func:`rolling_apply` with ``center=True`` previously would return a result of the same
- structure as the input ``arg`` with ``NaN``s in the final ``(window-1)/2`` entries.
+ structure as the input ``arg`` with ``NaN`` in the final ``(window-1)/2`` entries.
Now the final ``(window-1)/2`` entries of the result are calculated as if the input ``arg`` were followed
- by ``(window-1)/2`` ``NaN``s. (:issue:`7925`)
+ by ``(window-1)/2`` ``NaN`` values. (:issue:`7925`)
Prior behavior (note final value is ``NaN``):
@@ -556,8 +556,8 @@ Bug Fixes
returning results with columns sorted by name and producing an error for non-unique columns;
now handles non-unique columns and returns columns in original order
(except for the case of two DataFrames with ``pairwise=False``, where behavior is unchanged) (:issue:`7542`)
-
-
+- Bug in :func:`rolling_count` and ``expanding_*`` functions unnecessarily producing error message for zero-length data (:issue:`8056`)
+- Bug in :func:`rolling_apply` and :func:`expanding_apply`` interpreting ``min_periods=0`` as ``min_periods=1 (:issue:`8080`)
- Bug in ``DataFrame.plot`` and ``Series.plot`` may ignore ``rot`` and ``fontsize`` keywords (:issue:`7844`)
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 1c1d32e1d2a20..c0f0590c22a25 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -712,17 +712,15 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average',
#
# -
-def _check_minp(win, minp, N):
+def _check_minp(win, minp, N, floor=1):
if minp > win:
raise ValueError('min_periods (%d) must be <= window (%d)'
% (minp, win))
elif minp > N:
minp = N + 1
- elif minp == 0:
- minp = 1
elif minp < 0:
raise ValueError('min_periods must be >= 0')
- return minp
+ return max(minp, floor)
# original C implementation by N. Devillard.
# This code in public domain.
@@ -1766,7 +1764,7 @@ def roll_generic(ndarray[float64_t, cast=True] input, int win,
if n == 0:
return input
- minp = _check_minp(win, minp, n)
+ minp = _check_minp(win, minp, n, floor=0)
output = np.empty(n, dtype=float)
counts = roll_sum(np.isfinite(input).astype(float), win, minp)
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 74545a08d45b6..a2c7cc30e4798 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -206,7 +206,7 @@ def rolling_count(arg, window, freq=None, center=False, how=None):
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
- result = rolling_sum(converted, window, min_periods=1,
+ result = rolling_sum(converted, window, min_periods=0,
center=center) # already converted
# putmask here?
@@ -280,7 +280,8 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False):
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
- result.columns = frame_template.columns[result.columns]
+ if len(result.columns) > 0:
+ result.columns = frame_template.columns[result.columns]
return result
results = {}
@@ -314,8 +315,10 @@ def dataframe_from_int_dict(data, frame_template):
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))
p = Panel.from_dict(results).swapaxes('items', 'major')
- p.major_axis = arg1.columns[p.major_axis]
- p.minor_axis = arg2.columns[p.minor_axis]
+ if len(p.major_axis) > 0:
+ p.major_axis = arg1.columns[p.major_axis]
+ if len(p.minor_axis) > 0:
+ p.minor_axis = arg2.columns[p.minor_axis]
return p
else:
raise ValueError("'pairwise' is not True/False")
@@ -372,17 +375,22 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
- offset = int((window - 1) / 2.) if center else 0
- additional_nans = np.array([np.NaN] * offset)
- calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
- window, minp=minp, args=args, kwargs=kwargs,
- **kwds)
+
return_hook, values = _process_data_structure(arg)
- # actually calculate the moment. Faster way to do this?
- if values.ndim > 1:
- result = np.apply_along_axis(calc, axis, values)
+
+ if values.size == 0:
+ result = values.copy()
else:
- result = calc(values)
+ # actually calculate the moment. Faster way to do this?
+ offset = int((window - 1) / 2.) if center else 0
+ additional_nans = np.array([np.NaN] * offset)
+ calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
+ window, minp=minp, args=args, kwargs=kwargs,
+ **kwds)
+ if values.ndim > 1:
+ result = np.apply_along_axis(calc, axis, values)
+ else:
+ result = calc(values)
if center:
result = _center_window(result, window, axis)
@@ -817,11 +825,14 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None,
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
- offset = int((len(window) - 1) / 2.) if center else 0
- additional_nans = np.array([np.NaN] * offset)
- f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
- window, minp, avg=mean)
- result = np.apply_along_axis(f, axis, values)
+ if values.size == 0:
+ result = values.copy()
+ else:
+ offset = int((len(window) - 1) / 2.) if center else 0
+ additional_nans = np.array([np.NaN] * offset)
+ f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
+ window, minp, avg=mean)
+ result = np.apply_along_axis(f, axis, values)
if center:
result = _center_window(result, len(window), axis)
@@ -856,7 +867,7 @@ def _expanding_func(func, desc, check_minp=_use_window):
@Appender(_doc_template)
@wraps(func)
def f(arg, min_periods=1, freq=None, **kwargs):
- window = len(arg)
+ window = max(len(arg), min_periods) if min_periods else len(arg)
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 359868262a681..2c2a19660f266 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -244,6 +244,12 @@ def roll_mean(x, window, min_periods=None, freq=None, center=False):
center=center)
self._check_moment_func(roll_mean, np.mean)
+ # GH 8080
+ s = Series([None, None, None])
+ result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
+ expected = Series([1., 2., 2.])
+ assert_series_equal(result, expected)
+
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
@@ -814,6 +820,12 @@ def expanding_mean(x, min_periods=1, freq=None):
freq=freq)
self._check_expanding(expanding_mean, np.mean)
+ # GH 8080
+ s = Series([None, None, None])
+ result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
+ expected = Series([1., 2., 3.])
+ assert_series_equal(result, expected)
+
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
@@ -989,6 +1001,77 @@ def test_rolling_functions_window_non_shrinkage(self):
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
+ def test_moment_functions_zero_length(self):
+ # GH 8056
+ s = Series()
+ s_expected = s
+ df1 = DataFrame()
+ df1_expected = df1
+ df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
+ df2 = DataFrame(columns=['a'])
+ df2_expected = df2
+ df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
+
+ functions = [lambda x: mom.expanding_count(x),
+ lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
+ lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
+ lambda x: mom.expanding_max(x, min_periods=5),
+ lambda x: mom.expanding_min(x, min_periods=5),
+ lambda x: mom.expanding_sum(x, min_periods=5),
+ lambda x: mom.expanding_mean(x, min_periods=5),
+ lambda x: mom.expanding_std(x, min_periods=5),
+ lambda x: mom.expanding_var(x, min_periods=5),
+ lambda x: mom.expanding_skew(x, min_periods=5),
+ lambda x: mom.expanding_kurt(x, min_periods=5),
+ lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
+ lambda x: mom.expanding_median(x, min_periods=5),
+ lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
+ lambda x: mom.rolling_count(x, window=10),
+ lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
+ lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
+ lambda x: mom.rolling_max(x, window=10, min_periods=5),
+ lambda x: mom.rolling_min(x, window=10, min_periods=5),
+ lambda x: mom.rolling_sum(x, window=10, min_periods=5),
+ lambda x: mom.rolling_mean(x, window=10, min_periods=5),
+ lambda x: mom.rolling_std(x, window=10, min_periods=5),
+ lambda x: mom.rolling_var(x, window=10, min_periods=5),
+ lambda x: mom.rolling_skew(x, window=10, min_periods=5),
+ lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
+ lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
+ lambda x: mom.rolling_median(x, window=10, min_periods=5),
+ lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
+ lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
+ ]
+ for f in functions:
+ try:
+ s_result = f(s)
+ assert_series_equal(s_result, s_expected)
+
+ df1_result = f(df1)
+ assert_frame_equal(df1_result, df1_expected)
+
+ df2_result = f(df2)
+ assert_frame_equal(df2_result, df2_expected)
+ except (ImportError):
+
+ # scipy needed for rolling_window
+ continue
+
+ functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
+ lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
+ lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
+ lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
+ # rolling_corr_pairwise is depracated, so the following line should be deleted
+ # when rolling_corr_pairwise is removed.
+ lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
+ ]
+ for f in functions:
+ df1_result_panel = f(df1)
+ assert_panel_equal(df1_result_panel, df1_expected_panel)
+
+ df2_result_panel = f(df2)
+ assert_panel_equal(df2_result_panel, df2_expected_panel)
+
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
| Closes https://github.com/pydata/pandas/issues/8056
Closes https://github.com/pydata/pandas/issues/8080
| https://api.github.com/repos/pandas-dev/pandas/pulls/8059 | 2014-08-18T17:03:26Z | 2014-08-28T17:34:49Z | 2014-08-28T17:34:49Z | 2014-09-10T00:11:40Z |
TST: Fix tseries.converter test for MPL1.4 | diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py
index a1b873e1c0bea..d3287a01cd1da 100644
--- a/pandas/tseries/tests/test_converter.py
+++ b/pandas/tseries/tests/test_converter.py
@@ -74,7 +74,7 @@ def test_dateindex_conversion(self):
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k = 10, freq = freq)
rs = self.dtc.convert(dateindex, None, None)
- xp = converter.dates.date2num(dateindex)
+ xp = converter.dates.date2num(dateindex._mpl_repr())
np_assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
| Closes #7233.
Other tests have no problem with MPL1.4.dev:
- tests\test_graphics.py
- tseries\tests\test_plotting.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/8054 | 2014-08-18T12:46:26Z | 2014-08-19T12:50:59Z | 2014-08-19T12:50:59Z | 2014-08-19T13:34:02Z |
TST: catch invalid options data parsing | diff --git a/pandas/io/data.py b/pandas/io/data.py
index c40b91ffa91c9..d15522a33d4b1 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -735,16 +735,20 @@ def _get_option_data(self, month, year, expiry, name):
raise RemoteDataError("Table location {0} invalid, {1} tables"
" found".format(table_loc, ntables))
- option_data = _parse_options_data(tables[table_loc])
- option_data['Type'] = name[:-1]
- option_data = self._process_data(option_data, name[:-1])
+ try:
+ option_data = _parse_options_data(tables[table_loc])
+ option_data['Type'] = name[:-1]
+ option_data = self._process_data(option_data, name[:-1])
+
+ if month == CUR_MONTH and year == CUR_YEAR:
+ setattr(self, name, option_data)
- if month == CUR_MONTH and year == CUR_YEAR:
+ name += m1 + str(year)[-2:]
setattr(self, name, option_data)
+ return option_data
- name += m1 + str(year)[-2:]
- setattr(self, name, option_data)
- return option_data
+ except (Exception) as e:
+ raise RemoteDataError("Cannot retrieve Table data {0}".format(str(e)))
def get_call_data(self, month=None, year=None, expiry=None):
"""
| xref #8052
| https://api.github.com/repos/pandas-dev/pandas/pulls/8053 | 2014-08-18T12:45:15Z | 2014-08-18T13:05:59Z | 2014-08-18T13:05:59Z | 2014-08-18T13:05:59Z |
BUG: Bug in DataFrameGroupby.transform when transforming with a passed non-sorted key (GH8046) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 2e3841e8a00c3..0a857adbe84e8 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -465,7 +465,7 @@ Bug Fixes
when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`)
- Bug in HDFStore iteration when passing a where (:issue:`8014`)
-
+- Bug in DataFrameGroupby.transform when transforming with a passed non-sorted key (:issue:`8046`)
- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index ce57a9c03d570..eaaf85a1f5f84 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -475,6 +475,24 @@ def _set_selection_from_grouper(self):
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
+ def _set_result_index_ordered(self, result):
+ # set the result index on the passed values object
+ # return the new object
+ # related 8046
+
+ # the values/counts are repeated according to the group index
+ indices = self.indices
+
+ # shortcut of we have an already ordered grouper
+
+ if not Index(self.grouper.group_info[0]).is_monotonic:
+ index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
+ result.index = index
+ result = result.sort_index()
+
+ result.index = self.obj.index
+ return result
+
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
@@ -2087,7 +2105,6 @@ def _convert_grouper(axis, grouper):
else:
return grouper
-
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
@@ -2319,18 +2336,7 @@ def _transform_fast(self, func):
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
- # the values/counts are repeated according to the group index
- indices = self.indices
-
- # shortcut of we have an already ordered grouper
- if Index(self.grouper.group_info[0]).is_monotonic:
- result = Series(values, index=self.obj.index)
- else:
- index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
- result = Series(values, index=index).sort_index()
- result.index = self.obj.index
-
- return result
+ return self._set_result_index_ordered(Series(values))
def filter(self, func, dropna=True, *args, **kwargs):
"""
@@ -2842,8 +2848,7 @@ def _transform_general(self, func, *args, **kwargs):
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
- concatenated.sort_index(inplace=True)
- return concatenated
+ return self._set_result_index_ordered(concatenated)
def transform(self, func, *args, **kwargs):
"""
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 6f39750de9d9b..5d087a1ae0810 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -796,6 +796,26 @@ def test_transform(self):
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
+ # GH 8046
+ # make sure that we preserve the input order
+
+ df = DataFrame(np.arange(6,dtype='int64').reshape(3,2), columns=["a","b"], index=[0,2,1])
+ key = [0,0,1]
+ expected = df.sort_index().groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()
+ result = df.groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()
+ assert_frame_equal(result, expected)
+
+ def demean(arr):
+ return arr - arr.mean()
+
+ people = DataFrame(np.random.randn(5, 5),
+ columns=['a', 'b', 'c', 'd', 'e'],
+ index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
+ key = ['one', 'two', 'one', 'two', 'one']
+ result = people.groupby(key).transform(demean).groupby(key).mean()
+ expected = people.groupby(key).apply(demean).groupby(key).mean()
+ assert_frame_equal(result, expected)
+
def test_transform_fast(self):
df = DataFrame( { 'id' : np.arange( 100000 ) / 3,
@@ -2924,7 +2944,7 @@ def __call__(self, x):
lambda x: sum(x),
lambda x: x.sum(),
partial(sum), fn_class()]
-
+
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
| closes #8046
| https://api.github.com/repos/pandas-dev/pandas/pulls/8049 | 2014-08-17T22:03:29Z | 2014-08-18T15:26:26Z | 2014-08-18T15:26:26Z | 2014-09-30T21:35:39Z |
Update README to use pydata instead of pystatsmodels google group | diff --git a/README.md b/README.md
index 6a645dc64123d..778984aa4cf52 100644
--- a/README.md
+++ b/README.md
@@ -218,7 +218,6 @@ has been under active development since then.
Since pandas development is related to a number of other scientific
Python projects, questions are welcome on the scipy-user mailing
list. Specialized discussions or design issues should take place on
-the pystatsmodels mailing list / Google group, where
-``scikits.statsmodels`` and other libraries will also be discussed:
+the PyData mailing list / Google group:
-http://groups.google.com/group/pystatsmodels
+https://groups.google.com/forum/#!forum/pydata
| I'm pretty sure PyData is the main mailing list for pandas now, e.g., as seen on the website:
http://pandas.pydata.org/community.html
| https://api.github.com/repos/pandas-dev/pandas/pulls/8047 | 2014-08-17T09:01:28Z | 2014-08-17T09:09:15Z | 2014-08-17T09:09:15Z | 2014-08-17T09:32:27Z |
PERF: StataReader is slow due to excessive lookups for missing vales | diff --git a/doc/source/io.rst b/doc/source/io.rst
index baf684056e169..6e5d254d27b7f 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3558,6 +3558,13 @@ read and used to create a ``Categorical`` variable from them. Value labels can
also be retrieved by the function ``variable_labels``, which requires data to be
called before (see ``pandas.io.stata.StataReader``).
+The parameter ``convert_missing`` indicates whether missing value
+representations in Stata should be preserved. If ``False`` (the default),
+missing values are represented as ``np.nan``. If ``True``, missing values are
+represented using ``StataMissingValue`` objects, and columns containing missing
+values will have ``dtype`` set to ``object``.
+
+
The StataReader supports .dta Formats 104, 105, 108, 113-115 and 117.
Alternatively, the function :func:`~pandas.io.stata.read_stata` can be used
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 40a95ab103b0b..85f620fcd4b99 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -144,6 +144,11 @@ API changes
strings must contain 244 or fewer characters. Attempting to write Stata
dta files with strings longer than 244 characters raises a ``ValueError``. (:issue:`7858`)
+- ``read_stata`` and ``StataReader`` can import missing data information into a
+ ``DataFrame`` by setting the argument ``convert_missing`` to ``True``. When
+ using this options, missing values are returned as ``StataMissingValue``
+ objects and columns containing missing values have ``object`` data type. (:issue:`8045`)
+
- ``Index.isin`` now supports a ``level`` argument to specify which index level
to use for membership tests (:issue:`7892`, :issue:`7890`)
@@ -414,6 +419,7 @@ Performance
- Performance improvements in ``DatetimeIndex.__iter__`` to allow faster iteration (:issue:`7683`)
- Performance improvements in ``Period`` creation (and ``PeriodIndex`` setitem) (:issue:`5155`)
- Improvements in Series.transform for significant performance gains (revised) (:issue:`6496`)
+- Performance improvements in ``StataReader`` when reading large files (:issue:`8040`)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 5b5ce3e59e16e..c9a3104eec3f0 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -9,7 +9,6 @@
You can find more information on http://presbrey.mit.edu/PyDTA and
http://statsmodels.sourceforge.net/devel/
"""
-# TODO: Fix this module so it can use cross-compatible zip, map, and range
import numpy as np
import sys
@@ -20,14 +19,16 @@
from pandas.core.categorical import Categorical
import datetime
from pandas import compat
-from pandas.compat import long, lrange, lmap, lzip, text_type, string_types
+from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
+ zip
from pandas import isnull
from pandas.io.common import get_filepath_or_buffer
from pandas.lib import max_len_string_array, is_string_array
from pandas.tslib import NaT
def read_stata(filepath_or_buffer, convert_dates=True,
- convert_categoricals=True, encoding=None, index=None):
+ convert_categoricals=True, encoding=None, index=None,
+ convert_missing=False):
"""
Read Stata file into DataFrame
@@ -44,10 +45,19 @@ def read_stata(filepath_or_buffer, convert_dates=True,
support unicode. None defaults to cp1252.
index : identifier of index column
identifier of column that should be used as index of the DataFrame
+ convert_missing : boolean, defaults to False
+ Flag indicating whether to convert missing values to their Stata
+ representations. If False, missing values are replaced with nans.
+ If True, columns containing missing values are returned with
+ object data types and missing values are represented by
+ StataMissingValue objects.
"""
reader = StataReader(filepath_or_buffer, encoding)
- return reader.data(convert_dates, convert_categoricals, index)
+ return reader.data(convert_dates,
+ convert_categoricals,
+ index,
+ convert_missing)
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
@@ -291,35 +301,76 @@ class StataMissingValue(StringMixin):
Parameters
-----------
- offset
- value
+ value : int8, int16, int32, float32 or float64
+ The Stata missing value code
Attributes
----------
- string
- value
+ string : string
+ String representation of the Stata missing value
+ value : int8, int16, int32, float32 or float64
+ The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
+
+ Integer missing values make the code '.', '.a', ..., '.z' to the ranges
+ 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
+ 2147483647 (for int32). Missing values for floating point data types are
+ more complex but the pattern is simple to discern from the following table.
+
+ np.float32 missing values (float in Stata)
+ 0000007f .
+ 0008007f .a
+ 0010007f .b
+ ...
+ 00c0007f .x
+ 00c8007f .y
+ 00d0007f .z
+
+ np.float64 missing values (double in Stata)
+ 000000000000e07f .
+ 000000000001e07f .a
+ 000000000002e07f .b
+ ...
+ 000000000018e07f .x
+ 000000000019e07f .y
+ 00000000001ae07f .z
"""
- # TODO: Needs test
- def __init__(self, offset, value):
+
+ # Construct a dictionary of missing values
+ MISSING_VALUES = {}
+ bases = (101, 32741, 2147483621)
+ for b in bases:
+ MISSING_VALUES[b] = '.'
+ for i in range(1, 27):
+ MISSING_VALUES[i + b] = '.' + chr(96 + i)
+
+ base = b'\x00\x00\x00\x7f'
+ increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
+ for i in range(27):
+ value = struct.unpack('<f', base)[0]
+ MISSING_VALUES[value] = '.'
+ if i > 0:
+ MISSING_VALUES[value] += chr(96 + i)
+ int_value = struct.unpack('<i', struct.pack('<f', value))[0] + increment
+ base = struct.pack('<i', int_value)
+
+ base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
+ increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
+ for i in range(27):
+ value = struct.unpack('<d', base)[0]
+ MISSING_VALUES[value] = '.'
+ if i > 0:
+ MISSING_VALUES[value] += chr(96 + i)
+ int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
+ base = struct.pack('q', int_value)
+
+ def __init__(self, value):
self._value = value
- value_type = type(value)
- if value_type in int:
- loc = value - offset
- elif value_type in (float, np.float32, np.float64):
- if value <= np.finfo(np.float32).max: # float32
- conv_str, byte_loc, scale = '<f', 1, 8
- else:
- conv_str, byte_loc, scale = '<d', 5, 1
- value_bytes = struct.pack(conv_str, value)
- loc = (struct.unpack('<b', value_bytes[byte_loc])[0] / scale) + 0
- else:
- # Should never be hit
- loc = 0
- self._str = loc is 0 and '.' or ('.' + chr(loc + 96))
+ self._str = self.MISSING_VALUES[value]
+
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
@@ -333,6 +384,10 @@ def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__)
+ and self.string == other.string and self.value == other.value)
+
class StataParser(object):
_default_encoding = 'cp1252'
@@ -711,15 +766,7 @@ def _col_size(self, k=None):
return self.col_sizes[k]
def _unpack(self, fmt, byt):
- d = struct.unpack(self.byteorder + fmt, byt)[0]
- if fmt[-1] in self.VALID_RANGE:
- nmin, nmax = self.VALID_RANGE[fmt[-1]]
- if d < nmin or d > nmax:
- if self._missing_values:
- return StataMissingValue(nmax, d)
- else:
- return None
- return d
+ return struct.unpack(self.byteorder + fmt, byt)[0]
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None: # have bytes not strings,
@@ -752,16 +799,15 @@ def _next(self):
)
return data
else:
- return list(
- map(
+ return lmap(
lambda i: self._unpack(typlist[i],
self.path_or_buf.read(
self._col_size(i)
)),
range(self.nvar)
- )
)
+
def _dataset(self):
"""
Returns a Python generator object for iterating over the dataset.
@@ -853,7 +899,8 @@ def _read_strls(self):
self.GSO[v_o] = self.path_or_buf.read(length-1)
self.path_or_buf.read(1) # zero-termination
- def data(self, convert_dates=True, convert_categoricals=True, index=None):
+ def data(self, convert_dates=True, convert_categoricals=True, index=None,
+ convert_missing=False):
"""
Reads observations from Stata file, converting them into a dataframe
@@ -866,11 +913,18 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None):
variables
index : identifier of index column
identifier of column that should be used as index of the DataFrame
+ convert_missing : boolean, defaults to False
+ Flag indicating whether to convert missing values to their Stata
+ representation. If False, missing values are replaced with
+ nans. If True, columns containing missing values are returned with
+ object data types and missing values are represented by
+ StataMissingValue objects.
Returns
-------
y : DataFrame instance
"""
+ self._missing_values = convert_missing
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
@@ -894,18 +948,62 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None):
if convert_categoricals:
self._read_value_labels()
+ # TODO: Refactor to use a dictionary constructor and the correct dtype from the start?
if len(data)==0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame(data, columns=self.varlist, index=index)
cols_ = np.where(self.dtyplist)[0]
+
+ # Convert columns (if needed) to match input type
+ index = data.index
+ requires_type_conversion = False
+ data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
- if data[col].dtype is not np.dtype(object):
- data[col] = Series(data[col], data[col].index,
- self.dtyplist[i])
+ dtype = data[col].dtype
+ if (dtype != np.dtype(object)) and (dtype != self.dtyplist[i]):
+ requires_type_conversion = True
+ data_formatted.append((col, Series(data[col], index, self.dtyplist[i])))
+ else:
+ data_formatted.append((col, data[col]))
+ if requires_type_conversion:
+ data = DataFrame.from_items(data_formatted)
+ del data_formatted
+
+ # Check for missing values, and replace if found
+ for i, colname in enumerate(data):
+ fmt = self.typlist[i]
+ if fmt not in self.VALID_RANGE:
+ continue
+
+ nmin, nmax = self.VALID_RANGE[fmt]
+ series = data[colname]
+ missing = np.logical_or(series < nmin, series > nmax)
+
+ if not missing.any():
+ continue
+
+ if self._missing_values: # Replacement follows Stata notation
+ missing_loc = np.argwhere(missing)
+ umissing, umissing_loc = np.unique(series[missing],
+ return_inverse=True)
+ replacement = Series(series, dtype=np.object)
+ for i, um in enumerate(umissing):
+ missing_value = StataMissingValue(um)
+
+ loc = missing_loc[umissing_loc == i]
+ replacement.iloc[loc] = missing_value
+ else: # All replacements are identical
+ dtype = series.dtype
+ if dtype not in (np.float32, np.float64):
+ dtype = np.float64
+ replacement = Series(series, dtype=dtype)
+ replacement[missing] = np.nan
+
+ data[colname] = replacement
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
diff --git a/pandas/io/tests/data/stata8_113.dta b/pandas/io/tests/data/stata8_113.dta
new file mode 100644
index 0000000000000..9b0831746025e
Binary files /dev/null and b/pandas/io/tests/data/stata8_113.dta differ
diff --git a/pandas/io/tests/data/stata8_115.dta b/pandas/io/tests/data/stata8_115.dta
new file mode 100644
index 0000000000000..bb78368b3462b
Binary files /dev/null and b/pandas/io/tests/data/stata8_115.dta differ
diff --git a/pandas/io/tests/data/stata8_117.dta b/pandas/io/tests/data/stata8_117.dta
new file mode 100644
index 0000000000000..fcfa7abd7b0d9
Binary files /dev/null and b/pandas/io/tests/data/stata8_117.dta differ
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 459a1fe6c0e89..9d630bf83ced7 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -5,16 +5,18 @@
import os
import warnings
import nose
+import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
+from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
- PossiblePrecisionLoss)
+ PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.util.misc import is_little_endian
from pandas import compat
@@ -71,6 +73,10 @@ def setUp(self):
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
+ self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
+ self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
+ self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
+
def read_dta(self, file):
return read_stata(file, convert_dates=True)
@@ -589,6 +595,50 @@ def test_excessively_long_string(self):
with tm.ensure_clean() as path:
original.to_stata(path)
+ def test_missing_value_generator(self):
+ types = ('b','h','l')
+ df = DataFrame([[0.0]],columns=['float_'])
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ valid_range = StataReader(path).VALID_RANGE
+ expected_values = ['.' + chr(97 + i) for i in range(26)]
+ expected_values.insert(0, '.')
+ for t in types:
+ offset = valid_range[t][1]
+ for i in range(0,27):
+ val = StataMissingValue(offset+1+i)
+ self.assertTrue(val.string == expected_values[i])
+
+ # Test extremes for floats
+ val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
+ self.assertTrue(val.string == '.')
+ val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
+ self.assertTrue(val.string == '.z')
+
+ # Test extremes for floats
+ val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
+ self.assertTrue(val.string == '.')
+ val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
+ self.assertTrue(val.string == '.z')
+
+ def test_missing_value_conversion(self):
+ columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
+ smv = StataMissingValue(101)
+ keys = [key for key in iterkeys(smv.MISSING_VALUES)]
+ keys.sort()
+ data = []
+ for i in range(27):
+ row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
+ data.append(row)
+ expected = DataFrame(data,columns=columns)
+
+ parsed_113 = read_stata(self.dta17_113, convert_missing=True)
+ parsed_115 = read_stata(self.dta17_115, convert_missing=True)
+ parsed_117 = read_stata(self.dta17_117, convert_missing=True)
+
+ tm.assert_frame_equal(expected, parsed_113)
+ tm.assert_frame_equal(expected, parsed_115)
+ tm.assert_frame_equal(expected, parsed_117)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
index 40227b3c9bc48..cb933746bef83 100644
--- a/vb_suite/packers.py
+++ b/vb_suite/packers.py
@@ -121,3 +121,25 @@ def remove(f):
packers_write_json_date_index = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date)
setup = setup + setup_int_index
packers_write_json = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# stata
+
+setup = common_setup + """
+df.to_stata(f, {'index': 'tc'})
+"""
+packers_read_stata = Benchmark("pd.read_stata(f)", setup, start_date=start_date)
+
+packers_write_stata = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
+
+setup = common_setup + """
+df['int8_'] = [randint(-127,100) for _ in range(N)]
+df['int16_'] = [randint(-127,100) for _ in range(N)]
+df['int32_'] = [randint(-127,100) for _ in range(N)]
+df['float32_'] = np.array(randn(N), dtype=np.float32)
+df.to_stata(f, {'index': 'tc'})
+"""
+
+packers_read_stata_with_int = Benchmark("pd.read_stata(f)", setup, start_date=start_date)
+
+packers_write_stata_with_int = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date)
| Previous versions of StataReader did not correctly check for missing values.
This was fixed in a previous PR, but these checks had previously been
implemented on a value-by-value basis. This has now been changed to a
vectorized version that is orders of magnitude faster.
Additionally, a benchmark was added to monitor performance issues in the
future.
Closes #8040
| https://api.github.com/repos/pandas-dev/pandas/pulls/8045 | 2014-08-16T22:08:26Z | 2014-08-18T20:55:52Z | 2014-08-18T20:55:52Z | 2014-08-22T15:42:41Z |
Added parameter float_precision to CSV parser | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 273cbd5daae7d..b467e6243399a 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -176,7 +176,12 @@ They can take a number of arguments:
- ``mangle_dupe_cols``: boolean, default True, then duplicate columns will be specified
as 'X.0'...'X.N', rather than 'X'...'X'
- ``tupleize_cols``: boolean, default False, if False, convert a list of tuples
- to a multi-index of columns, otherwise, leave the column index as a list of tuples
+ to a multi-index of columns, otherwise, leave the column index as a list of
+ tuples
+ - ``float_precision`` : string, default None. Specifies which converter the C
+ engine should use for floating-point values. The options are None for the
+ ordinary converter, 'high' for the high-precision converter, and
+ 'round_trip' for the round-trip converter.
.. ipython:: python
:suppress:
@@ -512,6 +517,23 @@ data columns:
specify `index_col` as a column label rather then as an index on the resulting frame.
+Specifying method for floating-point conversion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The parameter ``float_precision`` can be specified in order to use
+a specific floating-point converter during parsing with the C engine.
+The options are the ordinary converter, the high-precision converter, and
+the round-trip converter (which is guaranteed to round-trip values after
+writing to a file). For example:
+
+.. ipython:: python
+
+ val = '0.3066101993807095471566981359501369297504425048828125'
+ data = 'a,b,c\n1,2,{0}'.format(val)
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision=None)['c'][0] - float(val))
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision='high')['c'][0] - float(val))
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision='round_trip')['c'][0] - float(val))
+
+
Date Parsing Functions
~~~~~~~~~~~~~~~~~~~~~~
Finally, the parser allows you can specify a custom ``date_parser`` function to
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 74cffa7859a1d..6eaeccf89059d 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -631,6 +631,8 @@ Enhancements
- Added support for ``c``, ``colormap`` and ``colorbar`` arguments for
``DataFrame.plot`` with ``kind='scatter'`` (:issue:`7780`)
+- ``read_csv`` now has a keyword parameter ``float_precision`` which specifies which floating-point
+ converter the C engine should use during parsing, see :ref:`_io` (:issue:`8002`, :issue:`8044`)
- ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`)
- ``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue:`7070`)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 22f076d3aabca..e0243964c78ae 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -303,7 +303,8 @@ def _read(filepath_or_buffer, kwds):
'error_bad_lines': True,
'warn_bad_lines': True,
'dtype': None,
- 'decimal': b'.'
+ 'decimal': b'.',
+ 'float_precision': None
}
_fwf_defaults = {
@@ -369,6 +370,7 @@ def parser_f(filepath_or_buffer,
date_parser=None,
memory_map=False,
+ float_precision=None,
nrows=None,
iterator=False,
chunksize=None,
@@ -437,6 +439,7 @@ def parser_f(filepath_or_buffer,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
+ float_precision=float_precision,
na_filter=na_filter,
compact_ints=compact_ints,
@@ -1264,6 +1267,11 @@ def TextParser(*args, **kwds):
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
+ float_precision : string, default None
+ Specifies which converter the C engine should use for floating-point
+ values. The options are None for the ordinary converter,
+ 'high' for the high-precision converter, and 'round_trip' for the
+ round-trip converter.
"""
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index f2b9a9447e8fb..a381e1802d29c 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2523,6 +2523,12 @@ def test_verbose_import(self):
finally:
sys.stdout = sys.__stdout__
+ def test_float_precision_specified(self):
+ # Should raise an error if float_precision (C parser option) is specified
+ with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
+ "is not supported with the 'python' engine"):
+ self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
+
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest("won't work in Python 3 {0}".format(sys.version_info))
@@ -3088,6 +3094,25 @@ def test_compact_ints(self):
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
+ def test_precise_conversion(self):
+ # GH #8002
+ from decimal import Decimal
+ normal_errors = []
+ precise_errors = []
+ for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
+ text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
+ normal_val = float(self.read_csv(StringIO(text))['a'][0])
+ precise_val = float(self.read_csv(StringIO(text), float_precision='high')['a'][0])
+ roundtrip_val = float(self.read_csv(StringIO(text), float_precision='round_trip')['a'][0])
+ actual_val = Decimal(text[2:])
+ def error(val):
+ return abs(Decimal('{0:.100}'.format(val)) - actual_val)
+ normal_errors.append(error(normal_val))
+ precise_errors.append(error(precise_val))
+ self.assertEqual(roundtrip_val, float(text[2:])) # round-trip should match float()
+ self.assertTrue(sum(precise_errors) < sum(normal_errors))
+ self.assertTrue(max(precise_errors) < max(normal_errors))
+
def test_pass_dtype(self):
data = """\
one,two
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 199d4ab44abfa..5905fada0cbfb 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -62,6 +62,9 @@ cdef extern from "headers/stdint.h":
cdef extern from "headers/portable.h":
pass
+cdef extern from "errno.h":
+ int errno
+
try:
basestring
except NameError:
@@ -155,6 +158,7 @@ cdef extern from "parser/tokenizer.h":
void *skipset
int skip_footer
+ double (*converter)(const char *, char **, char, char, char, int)
# error handling
char *warn_msg
@@ -189,8 +193,13 @@ cdef extern from "parser/tokenizer.h":
int64_t int_max, int *error, char tsep)
uint64_t str_to_uint64(char *p_item, uint64_t uint_max, int *error)
- inline int to_double(char *item, double *p_value,
- char sci, char decimal, char thousands)
+ double xstrtod(const char *p, char **q, char decimal, char sci,
+ char tsep, int skip_trailing)
+ double precise_xstrtod(const char *p, char **q, char decimal, char sci,
+ char tsep, int skip_trailing)
+ double round_trip(const char *p, char **q, char decimal, char sci,
+ char tsep, int skip_trailing)
+
inline int to_complex(char *item, double *p_real,
double *p_imag, char sci, char decimal)
inline int to_longlong(char *item, long long *p_value)
@@ -315,7 +324,8 @@ cdef class TextReader:
skip_footer=0,
verbose=False,
mangle_dupe_cols=True,
- tupleize_cols=False):
+ tupleize_cols=False,
+ float_precision=None):
self.parser = parser_new()
self.parser.chunksize = tokenize_chunksize
@@ -415,6 +425,11 @@ cdef class TextReader:
self.verbose = verbose
self.low_memory = low_memory
+ self.parser.converter = xstrtod
+ if float_precision == 'high':
+ self.parser.converter = precise_xstrtod
+ elif float_precision == 'round_trip':
+ self.parser.converter = round_trip
# encoding
if encoding is not None:
@@ -1018,7 +1033,7 @@ cdef class TextReader:
elif dtype[1] == 'f':
result, na_count = _try_double(self.parser, i, start, end,
- na_filter, na_hashset, na_flist)
+ na_filter, na_hashset, na_flist)
if dtype[1:] != 'f8':
result = result.astype(dtype)
@@ -1415,12 +1430,14 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
size_t i, lines
coliter_t it
char *word
+ char *p_end
double *data
double NA = na_values[np.float64]
ndarray result
khiter_t k
bint use_na_flist = len(na_flist) > 0
+ global errno
lines = line_end - line_start
result = np.empty(lines, dtype=np.float64)
data = <double *> result.data
@@ -1436,8 +1453,9 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
na_count += 1
data[0] = NA
else:
- error = to_double(word, data, parser.sci, parser.decimal, parser.thousands)
- if error != 1:
+ data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
+ parser.thousands, 1)
+ if errno != 0 or p_end[0] or p_end == word:
if strcasecmp(word, cinf) == 0:
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
@@ -1452,8 +1470,9 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
else:
for i in range(lines):
word = COLITER_NEXT(it)
- error = to_double(word, data, parser.sci, parser.decimal, parser.thousands)
- if error != 1:
+ data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
+ parser.thousands, 1)
+ if errno != 0 or p_end[0] or p_end == word:
if strcasecmp(word, cinf) == 0:
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c
index b30706f85894b..79d854dd07674 100644
--- a/pandas/src/parser/tokenizer.c
+++ b/pandas/src/parser/tokenizer.c
@@ -1689,10 +1689,6 @@ void test_count_lines(char *fname) {
-// forward declaration
-static double xstrtod(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing);
-
-
P_INLINE void lowercase(char *p) {
for ( ; *p; ++p) *p = tolower(*p);
}
@@ -1702,32 +1698,6 @@ P_INLINE void uppercase(char *p) {
}
-/*
- * `item` must be the nul-terminated string that is to be
- * converted to a double.
- *
- * To be successful, to_double() must use *all* the characters
- * in `item`. E.g. "1.q25" will fail. Leading and trailing
- * spaces are allowed.
- *
- * `sci` is the scientific notation exponent character, usually
- * either 'E' or 'D'. Case is ignored.
- *
- * `decimal` is the decimal point character, usually either
- * '.' or ','.
- *
- */
-
-int to_double(char *item, double *p_value, char sci, char decimal, char tsep)
-{
- char *p_end;
-
- *p_value = xstrtod(item, &p_end, decimal, sci, tsep, TRUE);
-
- return (errno == 0) && (!*p_end);
-}
-
-
int P_INLINE to_complex(char *item, double *p_real, double *p_imag, char sci, char decimal)
{
char *p_end;
@@ -1917,7 +1887,7 @@ int main(int argc, char *argv[])
// * Add tsep argument for thousands separator
//
-static double xstrtod(const char *str, char **endptr, char decimal,
+double xstrtod(const char *str, char **endptr, char decimal,
char sci, char tsep, int skip_trailing)
{
double number;
@@ -2048,6 +2018,171 @@ static double xstrtod(const char *str, char **endptr, char decimal,
return number;
}
+double precise_xstrtod(const char *str, char **endptr, char decimal,
+ char sci, char tsep, int skip_trailing)
+{
+ double number;
+ int exponent;
+ int negative;
+ char *p = (char *) str;
+ int num_digits;
+ int num_decimals;
+ int max_digits = 17;
+ int n;
+ // Cache powers of 10 in memory
+ static double e[] = {1., 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10,
+ 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20,
+ 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30,
+ 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40,
+ 1e41, 1e42, 1e43, 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50,
+ 1e51, 1e52, 1e53, 1e54, 1e55, 1e56, 1e57, 1e58, 1e59, 1e60,
+ 1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67, 1e68, 1e69, 1e70,
+ 1e71, 1e72, 1e73, 1e74, 1e75, 1e76, 1e77, 1e78, 1e79, 1e80,
+ 1e81, 1e82, 1e83, 1e84, 1e85, 1e86, 1e87, 1e88, 1e89, 1e90,
+ 1e91, 1e92, 1e93, 1e94, 1e95, 1e96, 1e97, 1e98, 1e99, 1e100,
+ 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, 1e110,
+ 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, 1e120,
+ 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130,
+ 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140,
+ 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150,
+ 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, 1e160,
+ 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, 1e170,
+ 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, 1e180,
+ 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, 1e190,
+ 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, 1e200,
+ 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, 1e210,
+ 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220,
+ 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230,
+ 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240,
+ 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, 1e250,
+ 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, 1e260,
+ 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, 1e270,
+ 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, 1e280,
+ 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, 1e290,
+ 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, 1e300,
+ 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308};
+ errno = 0;
+
+ // Skip leading whitespace
+ while (isspace(*p)) p++;
+
+ // Handle optional sign
+ negative = 0;
+ switch (*p)
+ {
+ case '-': negative = 1; // Fall through to increment position
+ case '+': p++;
+ }
+
+ number = 0.;
+ exponent = 0;
+ num_digits = 0;
+ num_decimals = 0;
+
+ // Process string of digits
+ while (isdigit(*p))
+ {
+ if (num_digits < max_digits)
+ {
+ number = number * 10. + (*p - '0');
+ num_digits++;
+ }
+ else
+ ++exponent;
+
+ p++;
+ p += (tsep != '\0' & *p == tsep);
+ }
+
+ // Process decimal part
+ if (*p == decimal)
+ {
+ p++;
+
+ while (num_digits < max_digits && isdigit(*p))
+ {
+ number = number * 10. + (*p - '0');
+ p++;
+ num_digits++;
+ num_decimals++;
+ }
+
+ if (num_digits >= max_digits) // consume extra decimal digits
+ while (isdigit(*p))
+ ++p;
+
+ exponent -= num_decimals;
+ }
+
+ if (num_digits == 0)
+ {
+ errno = ERANGE;
+ return 0.0;
+ }
+
+ // Correct for sign
+ if (negative) number = -number;
+
+ // Process an exponent string
+ if (toupper(*p) == toupper(sci))
+ {
+ // Handle optional sign
+ negative = 0;
+ switch (*++p)
+ {
+ case '-': negative = 1; // Fall through to increment pos
+ case '+': p++;
+ }
+
+ // Process string of digits
+ n = 0;
+ while (isdigit(*p))
+ {
+ n = n * 10 + (*p - '0');
+ p++;
+ }
+
+ if (negative)
+ exponent -= n;
+ else
+ exponent += n;
+ }
+
+ if (exponent > 308)
+ {
+ errno = ERANGE;
+ return HUGE_VAL;
+ }
+ else if (exponent > 0)
+ number *= e[exponent];
+ else if (exponent < -308) // subnormal
+ {
+ if (exponent < -616) // prevent invalid array access
+ number = 0.;
+ number /= e[-308 - exponent];
+ number /= e[308];
+ }
+ else
+ number /= e[-exponent];
+
+ if (number == HUGE_VAL || number == -HUGE_VAL)
+ errno = ERANGE;
+
+ if (skip_trailing) {
+ // Skip trailing whitespace
+ while (isspace(*p)) p++;
+ }
+
+ if (endptr) *endptr = p;
+ return number;
+}
+
+double round_trip(const char *p, char **q, char decimal, char sci,
+ char tsep, int skip_trailing)
+{
+ return strtod(p, q);
+}
+
/*
float strtof(const char *str, char **endptr)
{
diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h
index 6af63c07f1104..62e890f60e43d 100644
--- a/pandas/src/parser/tokenizer.h
+++ b/pandas/src/parser/tokenizer.h
@@ -202,6 +202,7 @@ typedef struct parser_t {
void *skipset;
int skip_footer;
+ double (*converter)(const char *, char **, char, char, char, int);
// error handling
char *warn_msg;
@@ -257,7 +258,9 @@ int64_t str_to_int64(const char *p_item, int64_t int_min,
int64_t int_max, int *error, char tsep);
uint64_t str_to_uint64(const char *p_item, uint64_t uint_max, int *error);
-int P_INLINE to_double(char *item, double *p_value, char sci, char decimal, char tsep);
+double xstrtod(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing);
+double precise_xstrtod(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing);
+double round_trip(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing);
int P_INLINE to_complex(char *item, double *p_real, double *p_imag, char sci, char decimal);
int P_INLINE to_longlong(char *item, long long *p_value);
int P_INLINE to_longlong_thousands(char *item, long long *p_value, char tsep);
diff --git a/vb_suite/parser_vb.py b/vb_suite/parser_vb.py
index 50d37f37708e7..96da3fac2de5e 100644
--- a/vb_suite/parser_vb.py
+++ b/vb_suite/parser_vb.py
@@ -79,3 +79,22 @@
cmd = "read_table(StringIO(data), sep=',', header=None, parse_dates=[1])"
sdate = datetime(2012, 5, 7)
read_table_multiple_date_baseline = Benchmark(cmd, setup, start_date=sdate)
+
+setup = common_setup + """
+from cStringIO import StringIO
+data = '''\
+0.1213700904466425978256438611,0.0525708283766902484401839501,0.4174092731488769913994474336
+0.4096341697147408700274695547,0.1587830198973579909349496119,0.1292545832485494372576795285
+0.8323255650024565799327547210,0.9694902427379478160318626578,0.6295047811546814475747169126
+0.4679375305798131323697930383,0.2963942381834381301075609371,0.5268936082160610157032465394
+0.6685382761849776311890991564,0.6721207066140679753374342908,0.6519975277021627935170045020
+'''
+data = data * 200
+"""
+cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision=None)"
+sdate = datetime(2014, 8, 20)
+read_csv_default_converter = Benchmark(cmd, setup, start_date=sdate)
+cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision='high')"
+read_csv_precise_converter = Benchmark(cmd, setup, start_date=sdate)
+cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision='round_trip')"
+read_csv_roundtrip_converter = Benchmark(cmd, setup, start_date=sdate)
| closes #8002.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8044 | 2014-08-16T14:29:32Z | 2014-09-19T16:07:47Z | 2014-09-19T16:07:47Z | 2014-09-22T16:08:17Z |
BUG: stack with datetimes | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 0223a11d8a011..548f23df532e2 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -548,8 +548,7 @@ Bug Fixes
- Bug in ``read_html`` where ``bytes`` objects were not tested for in
``_read`` (:issue:`7927`).
-
-
+- Bug in ``DataFrame.stack()`` when one of the column levels was a datelike (:issue: `8039`)
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index b014ede6e65a8..f2817e04819bb 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -601,7 +601,7 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
- lev.values.take(lab) for lev, lab in
+ lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index cf845a18092af..d1e6a2bf59303 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11857,6 +11857,17 @@ def test_unstack_non_unique_index_names(self):
with tm.assertRaises(ValueError):
df.T.stack('c1')
+ def test_stack_datetime_column_multiIndex(self):
+ # GH 8039
+ t = datetime(2014, 1, 1)
+ df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
+ result = df.stack()
+
+ eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
+ ecols = MultiIndex.from_tuples([(t, 'A')])
+ expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
+ assert_frame_equal(result, expected)
+
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
| closes https://github.com/pydata/pandas/issues/8039
The only change I made was to select from the MultiIndex itself, rather than the underlying `.values` attribute. The test set passed but I want to look at this a bit more closely to make sure everything is ok.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8043 | 2014-08-15T22:17:05Z | 2014-08-18T13:14:00Z | 2014-08-18T13:14:00Z | 2016-11-03T12:38:05Z |
Fix testing multiindex dtypes in assert_frame_equal and assert_series_equal | diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py
index 298fa73c69064..642e50c37874d 100644
--- a/pandas/tests/test_testing.py
+++ b/pandas/tests/test_testing.py
@@ -6,9 +6,10 @@
import nose
import numpy as np
import sys
-from pandas import Series
+from pandas import Series, DataFrame
from pandas.util.testing import (
- assert_almost_equal, assertRaisesRegexp, raise_with_traceback, assert_series_equal,
+ assert_almost_equal, assertRaisesRegexp, raise_with_traceback,
+ assert_series_equal, assert_frame_equal,
RNGContext
)
@@ -174,6 +175,46 @@ def test_less_precise(self):
self.assertRaises(AssertionError, assert_series_equal, s1, s2)
self.assertRaises(AssertionError, assert_series_equal, s1, s2, True)
+ def test_index_dtype(self):
+ df1 = DataFrame.from_records(
+ {'a':[1,2],'c':['l1','l2']}, index=['a'])
+ df2 = DataFrame.from_records(
+ {'a':[1.0,2.0],'c':['l1','l2']}, index=['a'])
+ self._assert_not_equal(df1.c, df2.c, check_index_type=True)
+
+ def test_multiindex_dtype(self):
+ df1 = DataFrame.from_records(
+ {'a':[1,2],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b'])
+ df2 = DataFrame.from_records(
+ {'a':[1.0,2.0],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b'])
+ self._assert_not_equal(df1.c, df2.c, check_index_type=True)
+
+
+class TestAssertFrameEqual(unittest.TestCase):
+ _multiprocess_can_split_ = True
+
+ def _assert_equal(self, x, y, **kwargs):
+ assert_frame_equal(x,y,**kwargs)
+ assert_frame_equal(y,x,**kwargs)
+
+ def _assert_not_equal(self, a, b, **kwargs):
+ self.assertRaises(AssertionError, assert_frame_equal, a, b, **kwargs)
+ self.assertRaises(AssertionError, assert_frame_equal, b, a, **kwargs)
+
+ def test_index_dtype(self):
+ df1 = DataFrame.from_records(
+ {'a':[1,2],'c':['l1','l2']}, index=['a'])
+ df2 = DataFrame.from_records(
+ {'a':[1.0,2.0],'c':['l1','l2']}, index=['a'])
+ self._assert_not_equal(df1, df2, check_index_type=True)
+
+ def test_multiindex_dtype(self):
+ df1 = DataFrame.from_records(
+ {'a':[1,2],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b'])
+ df2 = DataFrame.from_records(
+ {'a':[1.0,2.0],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b'])
+ self._assert_not_equal(df1, df2, check_index_type=True)
+
class TestRNGContext(unittest.TestCase):
def test_RNGContext(self):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index c6ddfd20cec7c..a59994970009f 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -608,9 +608,12 @@ def assert_series_equal(left, right, check_dtype=True,
else:
assert_index_equal(left.index, right.index)
if check_index_type:
- assert_isinstance(left.index, type(right.index))
- assert_attr_equal('dtype', left.index, right.index)
- assert_attr_equal('inferred_type', left.index, right.index)
+ for level in range(left.index.nlevels):
+ lindex = left.index.get_level_values(level)
+ rindex = right.index.get_level_values(level)
+ assert_isinstance(lindex, type(rindex))
+ assert_attr_equal('dtype', lindex, rindex)
+ assert_attr_equal('inferred_type', lindex, rindex)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
@@ -657,9 +660,12 @@ def assert_frame_equal(left, right, check_dtype=True,
check_exact=check_exact)
if check_index_type:
- assert_isinstance(left.index, type(right.index))
- assert_attr_equal('dtype', left.index, right.index)
- assert_attr_equal('inferred_type', left.index, right.index)
+ for level in range(left.index.nlevels):
+ lindex = left.index.get_level_values(level)
+ rindex = right.index.get_level_values(level)
+ assert_isinstance(lindex, type(rindex))
+ assert_attr_equal('dtype', lindex, rindex)
+ assert_attr_equal('inferred_type', lindex, rindex)
if check_column_type:
assert_isinstance(left.columns, type(right.columns))
assert_attr_equal('dtype', left.columns, right.columns)
| When using `assert_frame_equal` or `assert_series_equal` in the presence of a multiindex, the dtype of the index as a whole is `object`. Hence, there is no checking that the dtypes of the individual levels match. We now iterate through the levels.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8042 | 2014-08-15T18:51:02Z | 2014-08-15T19:22:28Z | 2014-08-15T19:22:28Z | 2014-08-15T19:24:07Z |
BUG: Don't call np.roll on empty arrays. | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 4bd55b2172013..b3ee0980635e3 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -389,7 +389,7 @@ Enhancements
-
+- Bug in ``DataFrame.shift`` where empty columns would throw ``ZeroDivisionError`` on numpy 1.7 (:issue:`8019`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3b8a54034d56..58f98b4ee21e6 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -817,7 +817,10 @@ def shift(self, periods, axis=0):
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
- new_values = np.roll(new_values, periods, axis=axis)
+
+ if np.prod(new_values.shape):
+ new_values = np.roll(new_values, periods, axis=axis)
+
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index d1e6a2bf59303..b4e548dd5b964 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9610,6 +9610,13 @@ def test_shift_bool(self):
columns=['high', 'low'])
assert_frame_equal(rs, xp)
+ def test_shift_empty(self):
+ # Regression test for #8019
+ df = DataFrame({'foo': []})
+ rs = df.shift(-1)
+
+ assert_frame_equal(df, rs)
+
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
| On some versions of numpy (such as 1.7, but not 1.8), this throws a
ZeroDivisionError.
Fixes #8019.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8041 | 2014-08-15T17:16:33Z | 2014-08-18T15:38:09Z | 2014-08-18T15:38:09Z | 2014-08-18T18:20:52Z |
DOC/CLN: Cleanups plotting.py | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 9d03b7b38bea7..8513fb7807084 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -257,6 +257,8 @@ API changes
- Added support for numpy 1.8+ data types (bool_, int_, float_, string_) for conversion to R dataframe (:issue:`8400`)
+- ``DataFrame.plot`` and ``Series.plot`` keywords are now have consistent orders (:issue:`8037`)
+
.. _whatsnew_0150.dt:
.dt accessor
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 1cc5e2a99148b..410494d3b0062 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -2784,6 +2784,9 @@ def test_pie_df(self):
ax = _check_plot_works(df.plot, kind='pie', y='Y')
self._check_text_labels(ax.texts, df.index)
+ ax = _check_plot_works(df.plot, kind='pie', y=2)
+ self._check_text_labels(ax.texts, df.index)
+
axes = _check_plot_works(df.plot, kind='pie', subplots=True)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 0b1a0ceb8da60..0ad712d8508ef 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -852,16 +852,13 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
self._validate_color_args()
def _validate_color_args(self):
- from pandas import DataFrame
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
- if ('color' in self.kwds and
- (isinstance(self.data, Series) or
- isinstance(self.data, DataFrame) and len(self.data.columns) == 1)):
+ if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
@@ -1264,7 +1261,6 @@ def _get_style(self, i, col_name):
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
- from pandas.core.frame import DataFrame
if num_colors is None:
num_colors = self.nseries
@@ -1709,7 +1705,7 @@ def _get_plot_function(self):
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
- def plotf(ax, x, y, style=None, column_num=0, **kwds):
+ def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
@@ -1764,8 +1760,8 @@ def __init__(self, data, **kwargs):
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
- self.bottom = kwargs.pop('bottom', None)
- self.left = kwargs.pop('left', None)
+ self.bottom = kwargs.pop('bottom', 0)
+ self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
@@ -1796,13 +1792,11 @@ def _args_adjust(self):
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
- if self.bottom is not None:
- start = start + self.bottom
+ start = start + self.bottom
return ax.bar(x, y, w, bottom=start,log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
- if self.left is not None:
- start = start + self.left
+ start = start + self.left
return ax.barh(x, y, w, left=start, **kwds)
else:
raise NotImplementedError
@@ -2243,58 +2237,145 @@ def result(self):
'area': AreaPlot, 'pie': PiePlot}
-def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
- sharey=False, use_index=True, figsize=None, grid=None,
- legend=True, rot=None, ax=None, style=None, title=None,
- xlim=None, ylim=None, logx=False, logy=False, xticks=None,
- yticks=None, kind='line', sort_columns=False, fontsize=None,
- secondary_y=False, layout=None, **kwds):
+def _plot(data, x=None, y=None, subplots=False,
+ ax=None, kind='line', **kwds):
+ kind = _get_standard_kind(kind.lower().strip())
+ if kind in _all_kinds:
+ klass = _plot_klass[kind]
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
- """
- Make line, bar, or scatter plots of DataFrame series with the index on the x-axis
- using matplotlib / pylab.
+ from pandas import DataFrame
+ if kind in _dataframe_kinds:
+ if isinstance(data, DataFrame):
+ plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
+ kind=kind, **kwds)
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
- Parameters
- ----------
- frame : DataFrame
- x : label or position, default None
+ elif kind in _series_kinds:
+ if isinstance(data, DataFrame):
+ if y is None and subplots is False:
+ msg = "{0} requires either y column or 'subplots=True'"
+ raise ValueError(msg.format(kind))
+ elif y is not None:
+ if com.is_integer(y) and not data.columns.holds_integer():
+ y = data.columns[y]
+ data = data[y] # converted to series actually
+ data.index.name = y
+ plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
+ else:
+ if isinstance(data, DataFrame):
+ if x is not None:
+ if com.is_integer(x) and not data.columns.holds_integer():
+ x = data.columns[x]
+ data = data.set_index(x)
+
+ if y is not None:
+ if com.is_integer(y) and not data.columns.holds_integer():
+ y = data.columns[y]
+ label = x if x is not None else data.index.name
+ label = kwds.pop('label', label)
+ series = data[y]
+ series.index.name = label
+
+ for kw in ['xerr', 'yerr']:
+ if (kw in kwds) and \
+ (isinstance(kwds[kw], string_types) or com.is_integer(kwds[kw])):
+ try:
+ kwds[kw] = data[kwds[kw]]
+ except (IndexError, KeyError, TypeError):
+ pass
+ data = series
+ plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
+
+ plot_obj.generate()
+ plot_obj.draw()
+ return plot_obj.result
+
+
+df_kind = """- 'scatter' : scatter plot
+ - 'hexbin' : hexbin plot"""
+series_kind = ""
+
+df_coord = """x : label or position, default None
y : label or position, default None
- Allows plotting of one column versus another
- yerr : DataFrame (with matching labels), Series, list-type (tuple, list,
- ndarray), or str of column name containing y error values
- xerr : similar functionality as yerr, but for x error values
+ Allows plotting of one column versus another"""
+series_coord = ""
+
+df_unique = """stacked : boolean, default False in line and bar plots, and True in area plot.
+ If True, create stacked plot.
+ sort_columns : boolean, default False
+ Sort column names to determine plot ordering
+ secondary_y : boolean or sequence, default False
+ Whether to plot on the secondary y-axis
+ If a list/tuple, which columns to plot on secondary y-axis
+"""
+series_unique = """label : label argument to provide to plot
+ secondary_y : boolean or sequence of ints, default False
+ If True then y-axis will be on the right"""
+
+df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
- Make separate subplots for each time series
+ Make separate subplots for each column
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
+ layout : tuple (optional)
+ (rows, columns) for the layout of subplots"""
+series_ax = """ax : matplotlib axes object
+ If not passed, uses gca()"""
+
+df_note = """- If `kind`='bar' or 'barh', you can specify relative alignments
+ for bar plot layout by `position` keyword.
+ From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
+ - If `kind`='hexbin', you can control the size of the bins with the
+ `gridsize` argument. By default, a histogram of the counts around each
+ `(x, y)` point is computed. You can specify alternative aggregations
+ by passing values to the `C` and `reduce_C_function` arguments.
+ `C` specifies the value at each `(x, y)` point and `reduce_C_function`
+ is a function of one argument that reduces all the values in a bin to
+ a single number (e.g. `mean`, `max`, `sum`, `std`)."""
+series_note = ""
+
+_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
+ klass_coord=df_coord, klass_ax=df_ax,
+ klass_unique=df_unique, klass_note=df_note)
+_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
+ klass_coord=series_coord, klass_ax=series_ax,
+ klass_unique=series_unique, klass_note=series_note)
+
+_shared_docs['plot'] = """
+ Make plots of %(klass)s using matplotlib / pylab.
+
+ Parameters
+ ----------
+ data : %(klass)s
+ %(klass_coord)s
+ kind : str
+ - 'line' : line plot (default)
+ - 'bar' : vertical bar plot
+ - 'barh' : horizontal bar plot
+ - 'hist' : histogram
+ - 'box' : boxplot
+ - 'kde' : Kernel Density Estimation plot
+ - 'density' : same as 'kde'
+ - 'area' : area plot
+ - 'pie' : pie plot
+ %(klass_kind)s
+ %(klass_ax)s
+ figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
- stacked : boolean, default False
- If True, create stacked bar plot. Only valid for DataFrame input
- sort_columns: boolean, default False
- Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
-
- ax : matplotlib axis object, default None
style : list or dict
matplotlib line style per column
- kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box', 'scatter', 'hexbin'}
- line : line plot
- bar : vertical bar plot
- barh : horizontal bar plot
- hist : histogram
- kde/density : Kernel Density Estimation plot
- area : area plot
- box : box plot
- scatter : scatter plot
- hexbin : hexbin plot
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
@@ -2309,12 +2390,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
- secondary_y : boolean or sequence, default False
- Whether to plot on the secondary y-axis
- If a list/tuple, which columns to plot on secondary y-axis
- mark_right: boolean, default True
- When using a secondary_y axis, should the legend label the axis of
- the various columns automatically
+ fontsize : int, default None
+ Font size for ticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
@@ -2329,12 +2406,19 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
+ yerr : DataFrame, Series, array-like, dict and str
+ See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
+ xerr : same types as yerr.
+ %(klass_unique)s
+ mark_right : boolean, default True
+ When using a secondary_y axis, automatically mark the column
+ labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
- ax_or_axes : matplotlib.AxesSubplot or list of them
+ axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
@@ -2349,178 +2433,64 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
If `kind`='scatter' and the argument `c` is the name of a dataframe column,
the values of that column are used to color each point.
+ - See matplotlib documentation online for more on this subject
+ %(klass_note)s
"""
- kind = _get_standard_kind(kind.lower().strip())
- if kind in _all_kinds:
- klass = _plot_klass[kind]
- else:
- raise ValueError('Invalid chart type given %s' % kind)
-
- if kind in _dataframe_kinds:
- plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
- rot=rot,legend=legend, ax=ax, style=style,
- fontsize=fontsize, use_index=use_index, sharex=sharex,
- sharey=sharey, xticks=xticks, yticks=yticks,
- xlim=xlim, ylim=ylim, title=title, grid=grid,
- figsize=figsize, logx=logx, logy=logy,
- sort_columns=sort_columns, secondary_y=secondary_y,
- layout=layout, **kwds)
- elif kind in _series_kinds:
- if y is None and subplots is False:
- msg = "{0} requires either y column or 'subplots=True'"
- raise ValueError(msg.format(kind))
- elif y is not None:
- if com.is_integer(y) and not frame.columns.holds_integer():
- y = frame.columns[y]
- frame = frame[y] # converted to series actually
- frame.index.name = y
-
- plot_obj = klass(frame, kind=kind, subplots=subplots,
- rot=rot,legend=legend, ax=ax, style=style,
- fontsize=fontsize, use_index=use_index, sharex=sharex,
- sharey=sharey, xticks=xticks, yticks=yticks,
- xlim=xlim, ylim=ylim, title=title, grid=grid,
- figsize=figsize, layout=layout,
- sort_columns=sort_columns, **kwds)
- else:
- if x is not None:
- if com.is_integer(x) and not frame.columns.holds_integer():
- x = frame.columns[x]
- frame = frame.set_index(x)
-
- if y is not None:
- if com.is_integer(y) and not frame.columns.holds_integer():
- y = frame.columns[y]
- label = x if x is not None else frame.index.name
- label = kwds.pop('label', label)
- ser = frame[y]
- ser.index.name = label
-
- for kw in ['xerr', 'yerr']:
- if (kw in kwds) and \
- (isinstance(kwds[kw], string_types) or com.is_integer(kwds[kw])):
- try:
- kwds[kw] = frame[kwds[kw]]
- except (IndexError, KeyError, TypeError):
- pass
-
- return plot_series(ser, label=label, kind=kind,
- use_index=use_index,
- rot=rot, xticks=xticks, yticks=yticks,
- xlim=xlim, ylim=ylim, ax=ax, style=style,
- grid=grid, logx=logx, logy=logy,
- secondary_y=secondary_y, title=title,
- figsize=figsize, fontsize=fontsize, **kwds)
-
- else:
- plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
- legend=legend, ax=ax, style=style, fontsize=fontsize,
- use_index=use_index, sharex=sharex, sharey=sharey,
- xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
- title=title, grid=grid, figsize=figsize, logx=logx,
- logy=logy, sort_columns=sort_columns,
- secondary_y=secondary_y, layout=layout, **kwds)
-
- plot_obj.generate()
- plot_obj.draw()
- return plot_obj.result
-
-
-def plot_series(series, label=None, kind='line', use_index=True, rot=None,
+@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
+def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
+ subplots=False, sharex=True, sharey=False, layout=None, # Dataframe unique
+ figsize=None, use_index=True, title=None, grid=None, legend=True,
+ style=None, logx=False, logy=False, loglog=False,
+ xticks=None, yticks=None, xlim=None, ylim=None,
+ rot=None, fontsize=None, colormap=None, table=False,
+ yerr=None, xerr=None,
+ secondary_y=False, sort_columns=False, # Dataframe unique
+ **kwds):
+ return _plot(data, kind=kind, x=x, y=y, ax=ax,
+ subplots=subplots, sharex=sharex, sharey=sharey, layout=layout,
+ figsize=figsize, use_index=use_index, title=title,
+ grid=grid, legend=legend,
+ style=style, logx=logx, logy=logy, loglog=loglog,
+ xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
+ rot=rot, fontsize=fontsize, colormap=colormap, table=table,
+ yerr=yerr, xerr=xerr,
+ secondary_y=secondary_y, sort_columns=sort_columns,
+ **kwds)
+
+
+@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
+def plot_series(data, kind='line', ax=None, # Series unique
+ figsize=None, use_index=True, title=None, grid=None, legend=True,
+ style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
- ax=None, style=None, grid=None, legend=False, logx=False,
- logy=False, secondary_y=False, **kwds):
- """
- Plot the input series with the index on the x-axis using matplotlib
-
- Parameters
- ----------
- label : label argument to provide to plot
- kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box'}
- line : line plot
- bar : vertical bar plot
- barh : horizontal bar plot
- hist : histogram
- kde/density : Kernel Density Estimation plot
- area : area plot
- box : box plot
- use_index : boolean, default True
- Plot index as axis tick labels
- rot : int, default None
- Rotation for tick labels
- xticks : sequence
- Values to use for the xticks
- yticks : sequence
- Values to use for the yticks
- xlim : 2-tuple/list
- ylim : 2-tuple/list
- ax : matplotlib axis object
- If not passed, uses gca()
- style : string, default matplotlib default
- matplotlib line style to use
- grid : matplotlib grid
- legend: matplotlib legend
- logx : boolean, default False
- Use log scaling on x axis
- logy : boolean, default False
- Use log scaling on y axis
- loglog : boolean, default False
- Use log scaling on both x and y axes
- secondary_y : boolean or sequence of ints, default False
- If True then y-axis will be on the right
- figsize : a tuple (width, height) in inches
- position : float
- Specify relative alignments for bar plot layout.
- From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
- table : boolean, Series or DataFrame, default False
- If True, draw a table using the data in the Series and the data will
- be transposed to meet matplotlib's default layout.
- If a Series or DataFrame is passed, use passed data to draw a table.
- kwds : keywords
- Options to pass to matplotlib plotting method
-
- Notes
- -----
- See matplotlib documentation online for more on this subject
- """
-
- kind = _get_standard_kind(kind.lower().strip())
- if kind in _common_kinds or kind in _series_kinds:
- klass = _plot_klass[kind]
- else:
- raise ValueError('Invalid chart type given %s' % kind)
+ rot=None, fontsize=None, colormap=None, table=False,
+ yerr=None, xerr=None,
+ label=None, secondary_y=False, # Series unique
+ **kwds):
+ import matplotlib.pyplot as plt
"""
- If no axis is specified, we check whether there are existing figures.
- If so, we get the current axis and check whether yaxis ticks are on the
- right. Ticks for the plot of the series will be on the right unless
- there is at least one axis with ticks on the left.
-
- If we do not check for whether there are existing figures, _gca() will
- create a figure with the default figsize, causing the figsize= parameter to
+ If no axes is specified, check whether there are existing figures
+ If there is no existing figures, _gca() will
+ create a figure with the default figsize, causing the figsize=parameter to
be ignored.
"""
- import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = getattr(ax, 'left_ax', ax)
-
# is there harm in this?
if label is None:
- label = series.name
-
- plot_obj = klass(series, kind=kind, rot=rot, logx=logx, logy=logy,
- ax=ax, use_index=use_index, style=style,
- xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
- legend=legend, grid=grid, label=label,
- secondary_y=secondary_y, **kwds)
-
- plot_obj.generate()
- plot_obj.draw()
-
- # plot_obj.ax is None if we created the first figure
- return plot_obj.result
+ label = data.name
+ return _plot(data, kind=kind, ax=ax,
+ figsize=figsize, use_index=use_index, title=title,
+ grid=grid, legend=legend,
+ style=style, logx=logx, logy=logy, loglog=loglog,
+ xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
+ rot=rot, fontsize=fontsize, colormap=colormap, table=table,
+ yerr=yerr, xerr=xerr,
+ label=label, secondary_y=secondary_y,
+ **kwds)
_shared_docs['boxplot'] = """
| - Some refactoring which should not affects to output.
- Made `DataFrame.plot` and `Series.plot` should have consistent docstring except differences.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8037 | 2014-08-15T13:54:51Z | 2014-10-04T15:09:20Z | 2014-10-04T15:09:20Z | 2014-11-22T09:25:46Z |
API: Timetamp.tz_localize and tz_convert raises TypeError rathar | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index a6be3a1ed3d0e..2e3841e8a00c3 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -172,7 +172,7 @@ API changes
- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
for localizing a specific level of a MultiIndex (:issue:`7846`)
-
+- ``Timestamp.tz_localize`` and ``Timestamp.tz_convert`` now raise ``TypeError`` in error cases, rather than ``Exception`` (:issue:`8025`)
- ``Timestamp.__repr__`` displays ``dateutil.tz.tzoffset`` info (:issue:`7907`)
- ``merge``, ``DataFrame.merge``, and ``ordered_merge`` now return the same type
as the ``left`` argument. (:issue:`7737`)
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 57dc5f4404621..2cd8539d27dd3 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -233,6 +233,15 @@ def test_tz(self):
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
+ # GH 8025
+ with tm.assertRaisesRegexp(TypeError, 'Cannot localize tz-aware Timestamp, use '
+ 'tz_convert for conversions'):
+ Timestamp('2011-01-01' ,tz='US/Eastern').tz_localize('Asia/Tokyo')
+
+ with tm.assertRaisesRegexp(TypeError, 'Cannot convert tz-naive Timestamp, use '
+ 'tz_localize to localize'):
+ Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
+
def test_tz_localize_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',
@@ -241,7 +250,7 @@ def test_tz_localize_roundtrip(self):
localized = ts.tz_localize(tz)
self.assertEqual(localized, Timestamp(t, tz=tz))
- with tm.assertRaises(Exception):
+ with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 3bdd422d9fc06..5a2352508d42f 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -388,7 +388,7 @@ class Timestamp(_Timestamp):
value = tz_convert_single(self.value, 'UTC', self.tz)
return Timestamp(value, tz=None)
else:
- raise Exception('Cannot localize tz-aware Timestamp, use '
+ raise TypeError('Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions')
def tz_convert(self, tz):
@@ -408,7 +408,7 @@ class Timestamp(_Timestamp):
"""
if self.tzinfo is None:
# tz naive, use tz_localize
- raise Exception('Cannot convert tz-naive Timestamp, use '
+ raise TypeError('Cannot convert tz-naive Timestamp, use '
'tz_localize to localize')
else:
# Same UTC timestamp, different time zone
| Closes #8025.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8036 | 2014-08-15T13:49:49Z | 2014-08-16T16:56:02Z | 2014-08-16T16:56:02Z | 2014-08-19T13:33:55Z |
DOC: Fixing 'chunksize' parameter name typo io.rst | diff --git a/doc/source/io.rst b/doc/source/io.rst
index f4065d736a674..baf684056e169 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2722,7 +2722,7 @@ The default is 50,000 rows returned in a chunk.
.. code-block:: python
- for df in read_hdf('store.h5','df', chunsize=3):
+ for df in read_hdf('store.h5','df', chunksize=3):
print(df)
Note, that the chunksize keyword applies to the **source** rows. So if you
| Fixing an error in the 'chunksize' parameter name for the io doc. With the typo (parameter called 'chunsize'), the sample code from the documentation fails silently / confusingly - returns an iterator over individual dataframe elements rather than dataframe chunks.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8035 | 2014-08-15T13:19:00Z | 2014-08-15T13:20:18Z | 2014-08-15T13:20:18Z | 2014-08-15T13:20:50Z |
DOC: suppress warning + fix reshape example | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 8ec61496c538a..43b11fa0a60bc 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1476,6 +1476,7 @@ You can control the action of a chained assignment via the option ``mode.chained
which can take the values ``['raise','warn',None]``, where showing a warning is the default.
.. ipython:: python
+ :okwarning:
dfb = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 92a35d0276e22..8d718bacd262b 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -151,6 +151,15 @@ unstacks the **last level**:
stacked.unstack(1)
stacked.unstack(0)
+.. _reshaping.unstack_by_name:
+
+If the indexes have names, you can use the level names instead of specifying
+the level numbers:
+
+.. ipython:: python
+
+ stacked.unstack('second')
+
Notice that the ``stack`` and ``unstack`` methods implicitly sort the index
levels involved. Hence a call to ``stack`` and then ``unstack``, or viceversa,
will result in a **sorted** copy of the original DataFrame or Series:
@@ -165,15 +174,6 @@ will result in a **sorted** copy of the original DataFrame or Series:
while the above code will raise a ``TypeError`` if the call to ``sort`` is
removed.
-.. _reshaping.unstack_by_name:
-
-If the indexes have names, you can use the level names instead of specifying
-the level numbers:
-
-.. ipython:: python
-
- stacked.unstack('second')
-
.. _reshaping.stack_multiple:
Multiple Levels
@@ -218,6 +218,8 @@ calling ``sortlevel``, of course). Here is a more complex example:
columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'),
('B', 'cat'), ('A', 'dog')],
names=['exp', 'animal'])
+ index = MultiIndex.from_product([('bar', 'baz', 'foo', 'qux'), ('one', 'two')],
+ names=['first', 'second'])
df = DataFrame(randn(8, 4), index=index, columns=columns)
df2 = df.ix[[0, 1, 2, 4, 5, 7]]
df2
| https://github.com/pydata/pandas/commit/bc610104f96699ff73ce507e79b8be070f126454 defined new `index`, but the previous was still used in later example, and suppress SettingWIthCopyWarning from https://github.com/pydata/pandas/commit/70a17da4003af0b1882deb5bcb28e188335fb9dd
| https://api.github.com/repos/pandas-dev/pandas/pulls/8034 | 2014-08-15T08:45:45Z | 2014-08-15T09:05:08Z | 2014-08-15T09:05:08Z | 2014-08-15T09:05:09Z |
BUG: fix py3 read_html bytes input | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 54acb278d2100..27c00103f80b1 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -533,7 +533,11 @@ Bug Fixes
+- Bug in installation where ``html_encoding/*.html`` wasn't installed and
+ therefore some tests were not running correctly (:issue:`7927`).
+- Bug in ``read_html`` where ``bytes`` objects were not tested for in
+ ``_read`` (:issue:`7927`).
diff --git a/pandas/io/html.py b/pandas/io/html.py
index d9c980b5e88db..1fe86201a8db0 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -15,8 +15,8 @@
from pandas.io.common import _is_url, urlopen, parse_url
from pandas.io.parsers import TextParser
-from pandas.compat import (lrange, lmap, u, string_types, iteritems, text_type,
- raise_with_traceback)
+from pandas.compat import (lrange, lmap, u, string_types, iteritems,
+ raise_with_traceback, binary_type)
from pandas.core import common as com
from pandas import Series
@@ -51,6 +51,9 @@
_RE_WHITESPACE = re.compile(r'[\r\n]+|\s{2,}')
+char_types = string_types + (binary_type,)
+
+
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
@@ -114,13 +117,13 @@ def _read(obj):
text = url.read()
elif hasattr(obj, 'read'):
text = obj.read()
- elif isinstance(obj, string_types):
+ elif isinstance(obj, char_types):
text = obj
try:
if os.path.isfile(text):
with open(text, 'rb') as f:
return f.read()
- except TypeError:
+ except (TypeError, ValueError):
pass
else:
raise TypeError("Cannot read object of type %r" % type(obj).__name__)
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index ecfc4c87d585d..834140e3de746 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -622,6 +622,7 @@ def read_string(self, f, encoding):
return self.read_html(fobj.read(), encoding=encoding, index_col=0)
def test_encode(self):
+ assert self.files, 'no files read from the data folder'
for f in self.files:
_, encoding = _lang_enc(f)
from_string = self.read_string(f, encoding).pop()
diff --git a/setup.py b/setup.py
index f93ade98c26cf..f57349c048a62 100755
--- a/setup.py
+++ b/setup.py
@@ -583,6 +583,7 @@ def pxd(name):
'tests/data/*.xlsm',
'tests/data/*.table',
'tests/data/*.html',
+ 'tests/data/html_encoding/*.html',
'tests/test_json/data/*.json'],
'pandas.tools': ['tests/*.csv'],
'pandas.tests': ['data/*.pickle',
| closes #7927
| https://api.github.com/repos/pandas-dev/pandas/pulls/8030 | 2014-08-14T17:46:50Z | 2014-08-14T19:41:22Z | 2014-08-14T19:41:22Z | 2014-08-14T19:41:24Z |
BUG: fix HDFStore iterator to handle a where properly (GH8014) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 0223a11d8a011..a367147203e3f 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -463,7 +463,7 @@ Bug Fixes
- Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity
when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`)
-
+- Bug in HDFStore iteration when passing a where (:issue:`8014`)
- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5150729ed6f79..07e9abeaadbb4 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -662,21 +662,18 @@ def select(self, key, where=None, start=None, stop=None, columns=None,
s = self._create_storer(group)
s.infer_axes()
- # what we are actually going to do for a chunk
- def func(_start, _stop):
- return s.read(where=where, start=_start, stop=_stop,
+ # function to call on iteration
+ def func(_start, _stop, _where):
+ return s.read(start=_start, stop=_stop,
+ where=_where,
columns=columns, **kwargs)
- if iterator or chunksize is not None:
- if not s.is_table:
- raise TypeError(
- "can only use an iterator or chunksize on a table")
- return TableIterator(self, func, nrows=s.nrows, start=start,
- stop=stop, chunksize=chunksize,
- auto_close=auto_close)
+ # create the iterator
+ it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start,
+ stop=stop, iterator=iterator, chunksize=chunksize,
+ auto_close=auto_close)
- return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop,
- auto_close=auto_close).get_values()
+ return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
@@ -779,26 +776,22 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None,
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
- def func(_start, _stop):
- if where is not None:
- c = s.read_coordinates(where=where, start=_start, stop=_stop, **kwargs)
- else:
- c = None
+ def func(_start, _stop, _where):
- objs = [t.read(where=c, start=_start, stop=_stop,
- columns=columns, **kwargs) for t in tbls]
+ # retrieve the objs, _where is always passed as a set of coordinates here
+ objs = [t.read(where=_where, columns=columns, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False).consolidate()
- if iterator or chunksize is not None:
- return TableIterator(self, func, nrows=nrows, start=start,
- stop=stop, chunksize=chunksize,
- auto_close=auto_close)
+ # create the iterator
+ it = TableIterator(self, s, func, where=where, nrows=nrows, start=start,
+ stop=stop, iterator=iterator, chunksize=chunksize,
+ auto_close=auto_close)
+
+ return it.get_result(coordinates=True)
- return TableIterator(self, func, nrows=nrows, start=start, stop=stop,
- auto_close=auto_close).get_values()
def put(self, key, value, format=None, append=False, **kwargs):
"""
@@ -1293,20 +1286,25 @@ class TableIterator(object):
----------
store : the reference store
- func : the function to get results
+ s : the refered storer
+ func : the function to execute the query
+ where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
- chunksize : the passed chunking valeu (default is 50000)
+ iterator : boolean, whether to use the default iterator
+ chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
- def __init__(self, store, func, nrows, start=None, stop=None,
- chunksize=None, auto_close=False):
+ def __init__(self, store, s, func, where, nrows, start=None, stop=None,
+ iterator=False, chunksize=None, auto_close=False):
self.store = store
- self.func = func
+ self.s = s
+ self.func = func
+ self.where = where
self.nrows = nrows or 0
self.start = start or 0
@@ -1314,23 +1312,29 @@ def __init__(self, store, func, nrows, start=None, stop=None,
stop = self.nrows
self.stop = min(self.nrows, stop)
- if chunksize is None:
- chunksize = 100000
+ self.coordinates = None
+ if iterator or chunksize is not None:
+ if chunksize is None:
+ chunksize = 100000
+ self.chunksize = int(chunksize)
+ else:
+ self.chunksize = None
- self.chunksize = chunksize
self.auto_close = auto_close
def __iter__(self):
+
+ # iterate
current = self.start
while current < self.stop:
- stop = current + self.chunksize
- v = self.func(current, stop)
- current = stop
- if v is None:
+ stop = min(current + self.chunksize, self.stop)
+ value = self.func(None, None, self.coordinates[current:stop])
+ current = stop
+ if value is None or not len(value):
continue
- yield v
+ yield value
self.close()
@@ -1338,12 +1342,29 @@ def close(self):
if self.auto_close:
self.store.close()
- def get_values(self):
- results = self.func(self.start, self.stop)
+ def get_result(self, coordinates=False):
+
+ # return the actual iterator
+ if self.chunksize is not None:
+ if not self.s.is_table:
+ raise TypeError(
+ "can only use an iterator or chunksize on a table")
+
+ self.coordinates = self.s.read_coordinates(where=self.where)
+
+ return self
+
+ # if specified read via coordinates (necessary for multiple selections
+ if coordinates:
+ where = self.s.read_coordinates(where=self.where)
+ else:
+ where = self.where
+
+ # directly return the result
+ results = self.func(self.start, self.stop, where)
self.close()
return results
-
class IndexCol(StringMixin):
""" an index column description class
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 9cdecd16755c7..c1419ef2d023e 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -3264,21 +3264,16 @@ def test_select_iterator(self):
expected = store.select('df')
- results = []
- for s in store.select('df',iterator=True):
- results.append(s)
+ results = [ s for s in store.select('df',iterator=True) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
- results = []
- for s in store.select('df',chunksize=100):
- results.append(s)
+
+ results = [ s for s in store.select('df',chunksize=100) ]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
- results = []
- for s in store.select('df',chunksize=150):
- results.append(s)
+ results = [ s for s in store.select('df',chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(result, expected)
@@ -3294,12 +3289,10 @@ def test_select_iterator(self):
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
- results = []
- for x in read_hdf(path,'df',chunksize=100):
- results.append(x)
+ results = [ s for s in read_hdf(path,'df',chunksize=100) ]
+ result = concat(results)
self.assertEqual(len(results), 5)
- result = concat(results)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
@@ -3318,10 +3311,8 @@ def test_select_iterator(self):
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
- results = []
- for s in store.select_as_multiple(
- ['df1', 'df2'], selector='df1', chunksize=150):
- results.append(s)
+ results = [ s for s in store.select_as_multiple(
+ ['df1', 'df2'], selector='df1', chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
@@ -3335,6 +3326,185 @@ def test_select_iterator(self):
#result = concat(results)
#tm.assert_frame_equal(expected, result)
+ def test_select_iterator_complete_8014(self):
+
+ # GH 8014
+ # using iterator and where clause
+ chunksize=1e4
+
+ # no iterator
+ with ensure_clean_store(self.path) as store:
+
+ expected = tm.makeTimeDataFrame(100064, 'S')
+ _maybe_remove(store, 'df')
+ store.append('df',expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[-1]
+
+ # select w/o iteration and no where clause works
+ result = store.select('df')
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, single term, begin
+ # of range, works
+ where = "index >= '%s'" % beg_dt
+ result = store.select('df',where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, single term, end
+ # of range, works
+ where = "index <= '%s'" % end_dt
+ result = store.select('df',where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, inclusive range,
+ # works
+ where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
+ result = store.select('df',where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # with iterator, full range
+ with ensure_clean_store(self.path) as store:
+
+ expected = tm.makeTimeDataFrame(100064, 'S')
+ _maybe_remove(store, 'df')
+ store.append('df',expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[-1]
+
+ # select w/iterator and no where clause works
+ results = [ s for s in store.select('df',chunksize=chunksize) ]
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, single term, begin of range
+ where = "index >= '%s'" % beg_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = "index <= '%s'" % end_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ def test_select_iterator_non_complete_8014(self):
+
+ # GH 8014
+ # using iterator and where clause
+ chunksize=1e4
+
+ # with iterator, non complete range
+ with ensure_clean_store(self.path) as store:
+
+ expected = tm.makeTimeDataFrame(100064, 'S')
+ _maybe_remove(store, 'df')
+ store.append('df',expected)
+
+ beg_dt = expected.index[1]
+ end_dt = expected.index[-2]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = "index >= '%s'" % beg_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ rexpected = expected[expected.index >= beg_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = "index <= '%s'" % end_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ rexpected = expected[expected.index <= end_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
+ tm.assert_frame_equal(rexpected, result)
+
+ # with iterator, empty where
+ with ensure_clean_store(self.path) as store:
+
+ expected = tm.makeTimeDataFrame(100064, 'S')
+ _maybe_remove(store, 'df')
+ store.append('df',expected)
+
+ end_dt = expected.index[-1]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = "index > '%s'" % end_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ self.assertEqual(0, len(results))
+
+ def test_select_iterator_many_empty_frames(self):
+
+ # GH 8014
+ # using iterator and where clause can return many empty
+ # frames.
+ chunksize=int(1e4)
+
+ # with iterator, range limited to the first chunk
+ with ensure_clean_store(self.path) as store:
+
+ expected = tm.makeTimeDataFrame(100000, 'S')
+ _maybe_remove(store, 'df')
+ store.append('df',expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[chunksize-1]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = "index >= '%s'" % beg_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+ result = concat(results)
+ rexpected = expected[expected.index >= beg_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = "index <= '%s'" % end_dt
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+
+ tm.assert_equal(1, len(results))
+ result = concat(results)
+ rexpected = expected[expected.index <= end_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+
+ # should be 1, is 10
+ tm.assert_equal(1, len(results))
+ result = concat(results)
+ rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause which selects
+ # *nothing*.
+ #
+ # To be consistent with Python idiom I suggest this should
+ # return [] e.g. `for e in []: print True` never prints
+ # True.
+
+ where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
+ results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
+
+ # should be []
+ tm.assert_equal(0, len(results))
+
+
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
| closes #8014
| https://api.github.com/repos/pandas-dev/pandas/pulls/8029 | 2014-08-14T14:33:58Z | 2014-08-16T00:39:45Z | 2014-08-16T00:39:45Z | 2014-08-16T00:39:45Z |
BUG: Area plot legend has incorrect color | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index c6e784ac93e92..066aef1f791eb 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -472,7 +472,7 @@ Bug Fixes
times were returned when crossing DST boundaries (:issue:`7835`, :issue:`7901`).
-
+- Bug in area plot draws legend with incorrect ``alpha`` when ``stacked=True`` (:issue:`8027`)
- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index b3a92263370e8..c38518a4c50be 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1718,7 +1718,7 @@ def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
- np.array([8, 8, 8, 8, 8])),
+ np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
@@ -1726,7 +1726,7 @@ def test_hist_df_coord(self):
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
- np.array([8, 3, 8, 8, 8, 8])),
+ np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
@@ -2157,20 +2157,38 @@ def test_area_colors(self):
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
+
+ handles, labels = ax.get_legend_handles_labels()
+ # legend is stored as Line2D, thus check linecolors
+ self._check_colors(handles, linecolors=custom_colors)
+ for h in handles:
+ self.assertTrue(h.get_alpha() is None)
tm.close()
ax = df.plot(kind='area', colormap='jet')
- rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
+ self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
- self._check_colors(poly, facecolors=rgba_colors)
+ self._check_colors(poly, facecolors=jet_colors)
+
+ handles, labels = ax.get_legend_handles_labels()
+ self._check_colors(handles, linecolors=jet_colors)
+ for h in handles:
+ self.assertTrue(h.get_alpha() is None)
tm.close()
- ax = df.plot(kind='area', colormap=cm.jet)
- rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ # When stacked=True, alpha is set to 0.5
+ ax = df.plot(kind='area', colormap=cm.jet, stacked=False)
+ self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
- self._check_colors(poly, facecolors=rgba_colors)
+ jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
+ self._check_colors(poly, facecolors=jet_with_alpha)
+
+ handles, labels = ax.get_legend_handles_labels()
+ # Line2D can't have alpha in its linecolor
+ self._check_colors(handles, linecolors=jet_colors)
+ for h in handles:
+ self.assertEqual(h.get_alpha(), 0.5)
@slow
def test_hist_colors(self):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7d0eaea5b36d6..954a88bc9d1ad 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1686,7 +1686,7 @@ def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
- alpha = self.kwds.get('alpha', 0.5)
+ alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
| Derived from #7636. Area plot sets incorrect `alpha` values to legend when `stacked=True`.
#### NG: Each areas are drawn with `alpha=1.0`, but legend has `alpha=0.5`

#### After fix
```
df = pd.DataFrame(np.random.rand(20, 5), columns=['A', 'B', 'C', 'D', 'E'])
df.plot(kind='area')
```

```
# When alpha is specified
df.plot(kind='area', alpha=0.2)
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/8027 | 2014-08-14T11:54:01Z | 2014-08-15T12:50:09Z | 2014-08-15T12:50:09Z | 2014-10-02T12:18:35Z |
BUG: related to GH5080, get_indexer choking on boolean type promotion (GH8024) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index c6e784ac93e92..7842b80cec46f 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -217,7 +217,7 @@ Internal Refactoring
In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray``
but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This change allows very easy sub-classing and creation of new index types. This should be
-a transparent change with only very limited API implications (:issue:`5080`, :issue:`7439`, :issue:`7796`)
+a transparent change with only very limited API implications (:issue:`5080`, :issue:`7439`, :issue:`7796`, :issue:`8024`)
- you may need to unpickle pandas version < 0.15.0 pickles using ``pd.read_pickle`` rather than ``pickle.load``. See :ref:`pickle docs <io.pickle>`
- when plotting with a ``PeriodIndex``. The ``matplotlib`` internal axes will now be arrays of ``Period`` rather than a ``PeriodIndex``. (this is similar to how a ``DatetimeIndex`` passes arrays of ``datetimes`` now)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index a58a3331f9759..c8836dae4ad7c 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1506,6 +1506,9 @@ def _possibly_promote(self, other):
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
+ elif self.inferred_type == 'boolean':
+ if self.dtype != 'object':
+ return self.astype('object'), other.astype('object')
return self, other
def groupby(self, to_groupby):
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 24282fdc280af..022a8b543ce32 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4968,6 +4968,13 @@ def test_map(self):
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
+ def test_map_compat(self):
+ # related GH 8024
+ s = Series([True,True,False],index=[1,2,3])
+ result = s.map({ True : 'foo', False : 'bar' })
+ expected = Series(['foo','foo','bar'],index=[1,2,3])
+ assert_series_equal(result,expected)
+
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
| closes #8024
| https://api.github.com/repos/pandas-dev/pandas/pulls/8026 | 2014-08-14T11:50:31Z | 2014-08-14T13:08:19Z | 2014-08-14T13:08:19Z | 2014-08-14T13:08:19Z |
Fixed incorrect datatype conversion on multi-indexes | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 0223a11d8a011..49084e28f21da 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -446,6 +446,7 @@ There are no experimental changes in 0.15.0
Bug Fixes
~~~~~~~~~
+- Bug in multiindexes dtypes getting mixed up when DataFrame is saved to SQL table (:issue:`8021`)
- Bug in Series 0-division with a float and integer operand dtypes (:issue:`7785`)
- Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`)
- Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 1ee5c55c0ae06..cb234f825a51e 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -664,20 +664,28 @@ def _index_name(self, index, index_label):
else:
return None
+ def _get_column_names_and_types(self, dtype_mapper):
+ column_names_and_types = []
+ if self.index is not None:
+ for i, idx_label in enumerate(self.index):
+ idx_type = dtype_mapper(
+ self.frame.index.get_level_values(i).dtype)
+ column_names_and_types.append((idx_label, idx_type))
+
+ column_names_and_types += zip(
+ list(map(str, self.frame.columns)),
+ map(dtype_mapper, self.frame.dtypes)
+ )
+ return column_names_and_types
+
def _create_table_statement(self):
from sqlalchemy import Table, Column
- columns = list(map(str, self.frame.columns))
- column_types = map(self._sqlalchemy_type, self.frame.dtypes)
+ column_names_and_types = \
+ self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ)
- for name, typ in zip(columns, column_types)]
-
- if self.index is not None:
- for i, idx_label in enumerate(self.index[::-1]):
- idx_type = self._sqlalchemy_type(
- self.frame.index.get_level_values(i))
- columns.insert(0, Column(idx_label, idx_type, index=True))
+ for name, typ in column_names_and_types]
return Table(self.name, self.pd_sql.meta, *columns)
@@ -957,16 +965,13 @@ def insert(self):
def _create_table_statement(self):
"Return a CREATE TABLE statement to suit the contents of a DataFrame."
- columns = list(map(str, self.frame.columns))
+ column_names_and_types = \
+ self._get_column_names_and_types(self._sql_type_name)
+
pat = re.compile('\s+')
- if any(map(pat.search, columns)):
+ column_names = [col_name for col_name, _ in column_names_and_types]
+ if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING)
- column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes]
-
- if self.index is not None:
- for i, idx_label in enumerate(self.index[::-1]):
- columns.insert(0, idx_label)
- column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype))
flv = self.pd_sql.flavor
@@ -976,7 +981,7 @@ def _create_table_statement(self):
col_template = br_l + '%s' + br_r + ' %s'
columns = ',\n '.join(col_template %
- x for x in zip(columns, column_types))
+ x for x in column_names_and_types)
template = """CREATE TABLE %(name)s (
%(columns)s
)"""
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 4d7eb2d04af21..6a0130e515d59 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -581,6 +581,15 @@ def test_to_sql_index_label_multiindex(self):
'test_index_label', self.conn, if_exists='replace',
index_label='C')
+ def test_multiindex_roundtrip(self):
+ df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
+ columns=['A','B','C'], index=['A','B'])
+
+ df.to_sql('test_multiindex_roundtrip', self.conn)
+ result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
+ self.conn, index_col=['A','B'])
+ tm.assert_frame_equal(df, result, check_index_type=True)
+
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
@@ -641,9 +650,7 @@ def test_read_sql_delegate(self):
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
- tm.assert_frame_equal(iris_frame1, iris_frame2,
- "read_sql and read_sql_query have not the same"
- " result with a query")
+ tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
@@ -697,9 +704,7 @@ def test_sql_open_close(self):
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
- tm.assert_frame_equal(iris_frame1, iris_frame2,
- "read_sql and read_sql_query have not the same"
- " result with a query")
+ tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
| When creating a new table from a `pandas` dataframe with a multi-index, the code would iterate over the index columns backwards but use their types as if counting forwards. This meant that the created table had incorrect indexes for multi-indexes.
This code factors out a function to pull column names and column types for both SQLAlchemy and legacy table objects. This code now iterates over the indices properly.
And additional test has been written. All test pass on my local machine.
Closes https://github.com/pydata/pandas/issues/8021 .
| https://api.github.com/repos/pandas-dev/pandas/pulls/8022 | 2014-08-14T08:32:13Z | 2014-08-15T22:40:58Z | 2014-08-15T22:40:58Z | 2014-08-20T20:23:36Z |
ENH: Groupby.plot enhancement | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 2eaf143a3e0b8..ca54715a3bac6 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -265,50 +265,7 @@ You can pass other keywords supported by matplotlib ``hist``. For example, horiz
See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the
`matplotlib hist documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
-
-The existing interface ``DataFrame.hist`` to plot histogram still can be used.
-
-.. ipython:: python
-
- plt.figure();
-
- @savefig hist_plot_ex.png
- df['A'].diff().hist()
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
-
-:meth:`DataFrame.hist` plots the histograms of the columns on multiple
-subplots:
-
-.. ipython:: python
-
- plt.figure()
-
- @savefig frame_hist_ex.png
- df.diff().hist(color='k', alpha=0.5, bins=50)
-
-
-.. versionadded:: 0.10.0
-
-The ``by`` keyword can be specified to plot grouped histograms:
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
- plt.figure()
- np.random.seed(123456)
-
-.. ipython:: python
-
- data = pd.Series(np.random.randn(1000))
-
- @savefig grouped_hist.png
- data.hist(by=np.random.randint(0, 4, 1000), figsize=(6, 4))
-
+.. note:: The existing interface ``DataFrame.hist`` to plot histogram still can be used.
.. _visualization.box:
@@ -377,69 +334,7 @@ For example, horizontal and custom-positioned boxplot can be drawn by
See the :meth:`boxplot <matplotlib.axes.Axes.boxplot>` method and the
`matplotlib boxplot documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot>`__ for more.
-
-The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
- np.random.seed(123456)
-
-.. ipython:: python
- :okwarning:
-
- df = pd.DataFrame(np.random.rand(10,5))
- plt.figure();
-
- @savefig box_plot_ex.png
- bp = df.boxplot()
-
-You can create a stratified boxplot using the ``by`` keyword argument to create
-groupings. For instance,
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
- np.random.seed(123456)
-
-.. ipython:: python
- :okwarning:
-
- df = pd.DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] )
- df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B'])
-
- plt.figure();
-
- @savefig box_plot_ex2.png
- bp = df.boxplot(by='X')
-
-You can also pass a subset of columns to plot, as well as group by multiple
-columns:
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
- np.random.seed(123456)
-
-.. ipython:: python
- :okwarning:
-
- df = pd.DataFrame(np.random.rand(10,3), columns=['Col1', 'Col2', 'Col3'])
- df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B'])
- df['Y'] = pd.Series(['A','B','A','B','A','B','A','B','A','B'])
-
- plt.figure();
-
- @savefig box_plot_ex3.png
- bp = df.boxplot(column=['Col1','Col2'], by=['X','Y'])
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
+.. note:: The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
.. _visualization.box.return:
@@ -455,45 +350,8 @@ When ``subplots=False`` / ``by`` is ``None``:
* if ``return_type`` is ``'both'`` a namedtuple containging the :class:`matplotlib Axes <matplotlib.axes.Axes>`
and :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned
-When ``subplots=True`` / ``by`` is some column of the DataFrame:
-
-* A dict of ``return_type`` is returned, where the keys are the columns
- of the DataFrame. The plot has a facet for each column of
- the DataFrame, with a separate box for each value of ``by``.
-
-Finally, when calling boxplot on a :class:`Groupby` object, a dict of ``return_type``
-is returned, where the keys are the same as the Groupby object. The plot has a
-facet for each key, with each facet containing a box for each column of the
-DataFrame.
-
-.. ipython:: python
- :okwarning:
-
- np.random.seed(1234)
- df_box = pd.DataFrame(np.random.randn(50, 2))
- df_box['g'] = np.random.choice(['A', 'B'], size=50)
- df_box.loc[df_box['g'] == 'B', 1] += 3
-
- @savefig boxplot_groupby.png
- bp = df_box.boxplot(by='g')
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
-
-Compare to:
-
-.. ipython:: python
- :okwarning:
-
- @savefig groupby_boxplot_vis.png
- bp = df_box.groupby('g').boxplot()
-
-.. ipython:: python
- :suppress:
-
- plt.close('all')
+When ``subplots=True``, a dict of ``return_type`` is returned, where the keys
+are the columns of the DataFrame.
.. _visualization.area_plot:
@@ -806,6 +664,142 @@ explicit about how missing values are handled, consider using
:meth:`~pandas.DataFrame.fillna` or :meth:`~pandas.DataFrame.dropna`
before plotting.
+.. _visualization.groupby:
+
+Plotting with Grouped Data
+--------------------------
+
+.. versionadded:: 0.17
+
+You can plot grouped data easily by using ``GroupBy.plot`` method. It draws
+each column as line categorized by groups.
+
+.. ipython:: python
+
+ dfg = pd.DataFrame(np.random.rand(45, 4), columns=['A', 'B', 'C', 'D'])
+ dfg['by'] = ['Group 0', 'Group 1', 'Group 2'] * 15
+ grouped = dfg.groupby(by='by')
+
+ @savefig dfgropuby_line.png
+ grouped.plot();
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+``SeriesGroupBy`` also supports plotting. It outputs each group in a single axes
+by default. It supports ``line``, ``bar``, ``barh``, ``hist``, ``kde``,
+``area``, ``box`` and ``pie`` charts.
+
+.. ipython:: python
+
+ @savefig sgropuby_bar.png
+ grouped['A'].plot(kind='bar');
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+.. ipython:: python
+
+ @savefig sgropuby_kde.png
+ grouped['A'].plot(kind='kde');
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+Specify ``subplots=True`` to output in separate axes.
+
+.. ipython:: python
+
+ @savefig sgropuby_box_subplots.png
+ grouped['A'].plot(kind='box', subplots=True);
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+``layout`` keyword allows to specify the lauyout.
+
+.. ipython:: python
+
+ @savefig sgropuby_pie_subplots.png
+ grouped['A'].plot(kind='pie', subplots=True, legend=False, layout=(2, 2));
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+``DataFrameGroupBy.plot`` supports ``line``, ``bar``, ``barh``, ``hist``,
+``kde``, ``area``, ``box``, ``scatter`` and ``hexbin`` plots.
+Except ``scatter``, plots are outputs as subplots.
+
+Following example shows stacked bar chart categorized by group.
+Note that you can pass keywords which is supported in normal plots.
+
+.. ipython:: python
+
+ @savefig dfgropuby_bar.png
+ grouped.plot(kind='bar', stacked=True);
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+If you want to subplot by column, specify ``axis=1`` keyword.
+
+.. ipython:: python
+
+ @savefig dfgropuby_bar_axis1.png
+ grouped.plot(kind='bar', axis=1);
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+Scatter plot can be drawn in a single axes specifying ``subplots=False``.
+Each group is colorized by separated colors.
+
+.. note:: Hexbin cannot be plotted in a single axes.
+
+.. ipython:: python
+
+ @savefig dfgropuby_scatter.png
+ grouped.plot(kind='scatter', x='A', y='B', subplots=False);
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+Otherwise, it is drawn as subplots.
+
+.. ipython:: python
+
+ @savefig dfgropuby_scatter_subplots.png
+ grouped.plot(kind='scatter', x='A', y='B', layout=(2, 2));
+
+.. ipython:: python
+ :suppress:
+
+ plt.close('all')
+
+.. note:: Prior to 0.17, ``GroupBy.plot`` results in each group to be plotted
+ on separate figures. To output the same result, you can do:
+
+.. code-block:: python
+
+ for name, group in grouped:
+ group.plot()
+
.. _visualization.tools:
Plotting Tools
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 7100f78cb3c7a..7b2d9ba72559e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -40,6 +40,7 @@ Highlights include:
- Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8316`)
- Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>`
- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>`
+- GroupBy plot enhancement, see :ref:`here <whatsnew_0170.groupbyplot>` (:issue:`8018`)
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
@@ -205,6 +206,24 @@ The support math functions are `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`,
These functions map to the intrinsics for the NumExpr engine. For Python
engine, they are mapped to NumPy calls.
+.. _whatsnew_0170.groupbyplot:
+
+Plotting with Grouped Data
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``GroupBy.plot`` now can output grouped plot in a single figure,
+supporting the same kinds as ``DataFrame`` and ``Series``.
+
+.. ipython:: python
+
+ dfg = pd.DataFrame(np.random.rand(45, 4), columns=['A', 'B', 'C', 'D'])
+ dfg['by'] = ['Group 0', 'Group 1', 'Group 2'] * 15
+ grouped = dfg.groupby(by='by')
+
+ grouped.plot();
+
+To see the output and its detail, refer to :ref:`here <visualization.groupby>`.
+
.. _whatsnew_0170.enhancements.other:
Other enhancements
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0293fc655742e..cb09824c3ac8f 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3475,8 +3475,10 @@ def count(self):
return self._wrap_agged_blocks(data.items, list(blk))
-from pandas.tools.plotting import boxplot_frame_groupby
-DataFrameGroupBy.boxplot = boxplot_frame_groupby
+import pandas.tools.plotting as plotting
+DataFrameGroupBy.boxplot = plotting.boxplot_frame_groupby
+SeriesGroupBy.plot = plotting.plot_grouped_series
+DataFrameGroupBy.plot = plotting.plot_grouped_frame
class PanelGroupBy(NDFrameGroupBy):
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index d1f1f2196558a..4d5963a05f6f4 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1769,6 +1769,7 @@ def test_line_lim(self):
self.assertFalse(hasattr(ax, 'right_ax'))
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
+ self.assertTrue(hasattr(ax, 'left_ax'))
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
@@ -1793,7 +1794,6 @@ def test_area_lim(self):
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
-
default_colors = plt.rcParams.get('axes.color_cycle')
df = DataFrame(randn(5, 5))
@@ -2986,7 +2986,6 @@ def test_hexbin_cmap(self):
@slow
def test_no_color_bar(self):
df = self.hexbin_df
-
ax = df.plot(kind='hexbin', x='A', y='B', colorbar=None)
self.assertIs(ax.collections[0].colorbar, None)
@@ -3552,6 +3551,11 @@ def test_invalid_colormap(self):
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
+ def setUp(self):
+ self.df = DataFrame(np.random.rand(30, 5), columns=['A', 'B', 'C', 'D', 'E'])
+ self.df['by'] = ['Group {0}'.format(i) for i in [0]*10 + [1]*10 + [2]*10]
+ self.grouped = self.df.groupby(by='by')
+
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
@@ -3586,6 +3590,378 @@ def test_hist_single_row(self):
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
+ def test_line_groupby(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = _check_plot_works(sgb.plot, color=['r', 'g', 'b'])
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.get_lines(), linecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = _check_plot_works(sgb.plot, subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = _check_plot_works(grouped.plot, subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_colors(ax.get_lines(), linecolors=default_colors[:5])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ axes = _check_plot_works(grouped.plot, subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.get_lines(), linecolors=default_colors[:3])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_area_groupby(self):
+ from matplotlib.collections import PolyCollection
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = _check_plot_works(sgb.plot, kind='area', color=['r', 'g', 'b'])
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.get_lines(), linecolors=['r', 'g', 'b'])
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ self._check_colors(poly, facecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = _check_plot_works(sgb.plot, kind='area', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = _check_plot_works(grouped.plot, kind='area', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_colors(ax.get_lines(), linecolors=default_colors[:5])
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ self._check_colors(poly, facecolors=default_colors[:5])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ axes = _check_plot_works(grouped.plot, kind='area', subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.get_lines(), linecolors=default_colors[:3])
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ self._check_colors(poly, facecolors=default_colors[:3])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_line_groupby_layout(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ axes = sgb.plot(subplots=True, layout=(2, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+
+ axes = self._flatten_visible(axes)
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = grouped.plot(subplots=True, layout=(3, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 2))
+
+ axes = self._flatten_visible(axes)
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_colors(ax.get_lines(), linecolors=default_colors[:5])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_bar_groupby(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = sgb.plot(kind='bar', color=['r', 'g', 'b'])
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.patches[::30], facecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = sgb.plot(kind='bar', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ self._check_colors([axes[0].patches[0]], facecolors=default_colors[0])
+ self._check_colors([axes[1].patches[0]], facecolors=default_colors[0])
+ self._check_colors([axes[2].patches[0]], facecolors=default_colors[0])
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = grouped.plot(kind='bar', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_text_labels(ax.title, title)
+ self._check_colors(axes[0].patches[::10], facecolors=default_colors[:5])
+ self._check_colors(axes[1].patches[::10], facecolors=default_colors[:5])
+ self._check_colors(axes[2].patches[::10], facecolors=default_colors[:5])
+ tm.close()
+
+ axes = grouped.plot(kind='bar', subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.patches[::30], facecolors=['b', 'g', 'r'])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_hist_groupby(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = sgb.plot(kind='hist', color=['r', 'g', 'b'])
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.patches[::10], facecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = sgb.plot(kind='hist', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ self._check_colors([axes[0].patches[0]], facecolors=default_colors[0])
+ self._check_colors([axes[1].patches[0]], facecolors=default_colors[1])
+ self._check_colors([axes[2].patches[0]], facecolors=default_colors[2])
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = grouped.plot(kind='hist', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_text_labels(ax.title, title)
+ self._check_colors(axes[0].patches[::10], facecolors=default_colors[:5])
+ self._check_colors(axes[1].patches[::10], facecolors=default_colors[:5])
+ self._check_colors(axes[2].patches[::10], facecolors=default_colors[:5])
+ tm.close()
+
+ axes = grouped.plot(kind='hist', subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.patches[::10], facecolors=['b', 'g', 'r'])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_kde_groupby(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = sgb.plot(kind='kde', color=['r', 'g', 'b'])
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.lines, linecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = sgb.plot(kind='kde', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ self._check_colors(axes[0].lines, linecolors=default_colors[0])
+ self._check_colors(axes[1].lines, linecolors=default_colors[1])
+ self._check_colors(axes[2].lines, linecolors=default_colors[2])
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = grouped.plot(kind='kde', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=['A', 'B', 'C', 'D', 'E'])
+ self._check_text_labels(ax.title, title)
+ self._check_colors(axes[0].lines, linecolors=default_colors[:5])
+ self._check_colors(axes[1].lines, linecolors=default_colors[:5])
+ self._check_colors(axes[2].lines, linecolors=default_colors[:5])
+ tm.close()
+
+ axes = grouped.plot(kind='kde', subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_colors(ax.lines, linecolors=['b', 'g', 'r'])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_box_groupby(self):
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+
+ grouped = self.grouped
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ ax = sgb.plot(kind='box', color=['r', 'g', 'b'])
+ self.assertIsNone(ax.get_legend())
+ self._check_text_labels(ax.title, 'A')
+ tm.close()
+
+ axes = sgb.plot(kind='box', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self.assertIsNone(ax.get_legend())
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ # DataFrameGroupBy
+ axes = grouped.plot(kind='box', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, title in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self.assertIsNone(ax.get_legend())
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ axes = grouped.plot(kind='box', subplots=True, axis=1)
+ self._check_axes_shape(axes, axes_num=5, layout=(1, 5))
+ for ax, title in zip(axes, ['A', 'B', 'C', 'D', 'E']):
+ self.assertIsNone(ax.get_legend())
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ def test_pie_groupby(self):
+ df = DataFrame({'by': ['Group 0', 'Group 1', 'Group 2'] * 3,
+ 'A': [2, 3, 4, 1, 2, 3, 2, 1, 3]},
+ index=list('abcdefghi'))
+ grouped = df.groupby('by')
+
+ # SeriesGroupBy
+ sgb = grouped['A']
+ msg = "To plot SeriesGroupBy, specify 'suplots=True'"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ sgb.plot(kind='pie', subplots=False)
+
+ axes = sgb.plot(kind='pie', colors=['r', 'g', 'b'], subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, labels, title in zip(axes, ['adg', 'beh', 'cfi'],
+ ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=list(labels))
+ self._check_colors(ax.patches, facecolors=['r', 'g', 'b'])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ import matplotlib.pyplot as plt
+ default_colors = plt.rcParams.get('axes.color_cycle')
+ axes = sgb.plot(kind='pie', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, labels, title in zip(axes, ['adg', 'beh', 'cfi'],
+ ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=list(labels))
+ self._check_colors(ax.patches, facecolors=default_colors[:3])
+ self._check_text_labels(ax.title, title)
+ tm.close()
+
+ # DataFrameGroupBy
+ msg = "plot kind pie cannot be used for DataFrameGroupBy"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ grouped.plot(kind='pie', subplots=False)
+
+ def test_scatter_groupby(self):
+ grouped = self.grouped
+
+ ax = _check_plot_works(grouped.plot, kind='scatter', x='A', y='B', subplots=False)
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_text_labels(ax.title, '')
+ tm.close()
+
+ # use title
+ ax = _check_plot_works(grouped.plot, kind='scatter', x='A', y='B',
+ subplots=False, title='xx')
+ self._check_legend_labels(ax, labels=['Group 0', 'Group 1', 'Group 2'])
+ self._check_text_labels(ax.title, 'xx')
+ tm.close()
+
+ axes = _check_plot_works(grouped.plot, kind='scatter', x='A', y='B', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ def test_scatter_groupby_layout(self):
+ grouped = self.grouped
+ axes = grouped.plot(kind='scatter', x='A', y='B',
+ subplots=True, layout=(2, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+
+ axes = self._flatten_visible(axes)
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_legend_labels(ax, labels=[group])
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ def test_hexbin_groupby(self):
+ grouped = self.grouped
+
+ msg = "To plot DataFrameGroupBy, specify 'suplots=True'"
+ with tm.assertRaisesRegexp(ValueError, msg):
+ grouped.plot(kind='hexbin', x='A', y='B', subplots=False)
+
+ axes = _check_plot_works(grouped.plot, kind='hexbin', x='A', y='B', subplots=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ for ax, group in zip(axes, ['Group 0', 'Group 1', 'Group 2']):
+ self._check_text_labels(ax.title, group)
+ tm.close()
+
+ @slow
+ def test_groupby_errorbar(self):
+ grouped = self.grouped
+
+ msg = 'Error bars are not supported in groupby plots'
+ with tm.assertRaisesRegexp(NotImplementedError, msg):
+ grouped.plot(xerr=1)
+
+ msg = 'Error bars are not supported in groupby plots'
+ with tm.assertRaisesRegexp(NotImplementedError, msg):
+ grouped.plot(yerr=1)
+
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
diff --git a/pandas/tests/test_graphics_others.py b/pandas/tests/test_graphics_others.py
index 641180c8010c0..be8a2af3f2a4b 100644
--- a/pandas/tests/test_graphics_others.py
+++ b/pandas/tests/test_graphics_others.py
@@ -590,12 +590,6 @@ def test_grouped_plot_fignums(self):
gender = tm.choice(['male', 'female'], size=n)
df = DataFrame({'height': height, 'weight': weight, 'gender': gender})
gb = df.groupby('gender')
-
- res = gb.plot()
- self.assertEqual(len(self.plt.get_fignums()), 2)
- self.assertEqual(len(res), 2)
- tm.close()
-
res = gb.boxplot(return_type='axes')
self.assertEqual(len(self.plt.get_fignums()), 1)
self.assertEqual(len(res), 2)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 9eab385a7a2a5..b0b9f494c3c0f 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -782,7 +782,8 @@ def _kind(self):
_layout_type = 'vertical'
_default_rot = 0
- orientation = None
+ _fillna = None
+ orientation = 'vertical'
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
@@ -796,10 +797,11 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
- table=False, layout=None, **kwds):
+ table=False, layout=None, axis=0, **kwds):
self.data = data
self.by = by
+ self.axis = axis
self.kind = kind
@@ -843,8 +845,6 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
self.grid = grid
self.legend = legend
- self.legend_handles = []
- self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
@@ -875,9 +875,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
self.colormap = colormap
self.table = table
-
self.kwds = kwds
-
self._validate_color_args()
def _validate_color_args(self):
@@ -909,44 +907,163 @@ def _validate_color_args(self):
" use one or the other or pass 'style' "
"without a color symbol")
- def _iter_data(self, data=None, keep_index=False, fillna=None):
- if data is None:
+ def _map_axes_to_data(self):
+ """
+ Iterate over target axes and corresponding data to be plotted on the axes
+
+ Returns:
+ -----------
+ ax: Matplotlib axis object
+ name: str
+ name to be used for axes title
+ data: Series, DataFrame or SeriesGroupBy
+ data to be drawn on axes
+ """
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+
+ def _to_frame(data):
+ """Convert SeriesGroupBy to DataFrame"""
+ groups = {}
+ for name, group in data:
+ groups[name] = group
+ return DataFrame(groups)
+
+ data = self.data
+
+ if isinstance(data, Series):
+ label = self.label
+ if label is None and data.name is None:
+ label = 'None'
+ data = data.to_frame(name=label)
+ yield self.axes[0], label, data
+ elif isinstance(data, DataFrame):
+ data = self._compute_plot_data(data)
+ if self.subplots:
+ for i, (name, column) in enumerate(data.iteritems()):
+ # return DataFrame
+ yield self.axes[i], name, column.to_frame()
+ else:
+ yield self.axes[0], None, data
+ elif isinstance(data, SeriesGroupBy):
+ if self.subplots:
+ for i, (name, group) in enumerate(data):
+ # overwrite column name with group name
+ group.name = name
+ # return DataFrame
+ yield self.axes[i], name, group.to_frame()
+ else:
+ yield self.axes[0], data.obj.name, _to_frame(data)
+
+ elif isinstance(data, DataFrameGroupBy):
+ if self.subplots:
+ if self.axis == 0:
+ for i, (name, group) in enumerate(data):
+ yield self.axes[i], name, group
+ elif self.axis == 1:
+ data.obj = self._compute_plot_data(data.obj)
+ for i, col in enumerate(self.columns):
+ yield self.axes[i], col, _to_frame(data[col])
+ else:
+ raise ValueError("To plot DataFrameGroupBy, specify 'suplots=True'")
+ else: # pragma no cover
+ raise NotImplementedError(type(data))
+
+ @cache_readonly
+ def columns(self):
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+
+ if isinstance(self.data, Series):
+ data = self.data.to_frame()
+ elif isinstance(self.data, SeriesGroupBy):
+ data = self.data.obj.to_frame()
+ elif isinstance(self.data, DataFrame):
data = self.data
- if fillna is not None:
- data = data.fillna(fillna)
+ elif isinstance(self.data, DataFrameGroupBy):
+ data = self.data.obj
+ else:
+ return None
+
+ data = self._compute_plot_data(data)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
+ return columns
- for col, values in data.iteritems():
- if keep_index is True:
- yield col, values
+ @property
+ def ndim(self):
+ """
+ Return a tuple of (number of subplots, number of unique series)
+ """
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+ if isinstance(self.data, Series):
+ ndim = (1, 1)
+ elif isinstance(self.data, DataFrame):
+ columns = len(self.columns)
+ ndim = (1, columns)
+ elif isinstance(self.data, SeriesGroupBy):
+ ngroups = self.data.ngroups
+ ndim = (1, ngroups)
+ elif isinstance(self.data, DataFrameGroupBy):
+ columns = len(self.columns)
+ ngroups = self.data.ngroups
+ if self.axis == 0:
+ ndim = (ngroups, columns)
+ elif self.axis == 1:
+ ndim = (columns, ngroups)
else:
- yield col, values.values
+ msg = 'In {0}, axis must be 0 (subplot by group) or 1 (subplot by index)'
+ raise ValueError(msg.format(self.kind))
+ else:
+ raise NotImplementedError
+ return ndim
+
+ @property
+ def nplots(self):
+ """
+ Number of required subplots
+ """
+ nplots = self.ndim[0]
+ if self.subplots and nplots == 1:
+ # subplot by series
+ nplots = self.ndim[1]
+ if nplots > 1 and not self.subplots:
+ raise ValueError("Input data requires {0} subplots, specify 'subplots=True'")
+ return nplots
@property
def nseries(self):
- if self.data.ndim == 1:
- return 1
- else:
- return self.data.shape[1]
+ """
+ Number of total unique series (lines, bars, etc) in all the axes
+ Used to define the number of unique colors / styles in a plot
+ """
+ return self.ndim[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
- self._compute_plot_data()
self._setup_subplots()
- self._make_plot()
- self._add_table()
- self._make_legend()
- for ax in self.axes:
- self._post_plot_logic_common(ax, self.data)
- self._post_plot_logic(ax, self.data)
+ for i, (ax, title, data) in enumerate(self._map_axes_to_data()):
+ data = self._compute_plot_data(data)
+ self._make_plot(ax, data, axes_num=i, title=title)
+
+ if title is not None:
+ ax.set_title(title)
+
+ self._add_table(ax, data)
+ self._make_legend(ax, data)
+ # post process for each axes
+ self._post_plot_logic_common(ax, data)
+ self._post_plot_logic(ax, data)
+
+ # post process for figure
self._adorn_subplots()
def _args_adjust(self):
@@ -956,33 +1073,37 @@ def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
- len(ax.containers) != 0)
-
- def _maybe_right_yaxis(self, ax, axes_num):
- if not self.on_right(axes_num):
- # secondary axes may be passed via ax kw
- return self._get_ax_layer(ax)
-
- if hasattr(ax, 'right_ax'):
- # if it has right_ax proparty, ``ax`` must be left axes
- return ax.right_ax
- elif hasattr(ax, 'left_ax'):
- # if it has left_ax proparty, ``ax`` must be right axes
- return ax
- else:
- # otherwise, create twin axes
- orig_ax, new_ax = ax, ax.twinx()
- new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
+ len(ax.containers) != 0 or
+ len(ax.patches) != 0)
- orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
+ def _get_ax_by_label(self, ax, label=None):
+ if not self.on_right(label):
+ # secondary axes may be passed as axes
+ ax = self._get_ax_layer(ax)
+ else:
+ # do not use _get_ax_layer here to create twinx if it isn't exists
+ if hasattr(ax, 'right_ax'):
+ # if it has right_ax proparty, ``ax`` must be left axes
+ ax = ax.right_ax
+ elif hasattr(ax, 'left_ax'):
+ # if it has left_ax proparty, ``ax`` must be right axes
+ pass
+ # otherwise, create twin axes
+ else:
+ new_ax = ax.twinx()
+ new_ax._get_lines.color_cycle = ax._get_lines.color_cycle
+ ax.right_ax = new_ax
+ new_ax.left_ax = ax
+ if not self._has_plotted_object(ax): # no data on left y
+ ax.get_yaxis().set_visible(False)
+ ax = new_ax
- if not self._has_plotted_object(orig_ax): # no data on left y
- orig_ax.get_yaxis().set_visible(False)
- return new_ax
+ ax.get_yaxis().set_visible(True)
+ return ax
def _setup_subplots(self):
if self.subplots:
- fig, axes = _subplots(naxes=self.nseries,
+ fig, axes = _subplots(naxes=self.nplots,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
@@ -1013,6 +1134,10 @@ def result(self):
Return result axes
"""
if self.subplots:
+ for i in range(self.nplots):
+ ax = self.axes[i]
+ if not self._has_plotted_object(ax):
+ self.axes[i] = self._get_ax_layer(ax, primary=False)
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
@@ -1021,46 +1146,54 @@ def result(self):
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
+
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
- def _compute_plot_data(self):
- data = self.data
-
- if isinstance(data, Series):
- label = self.label
- if label is None and data.name is None:
- label = 'None'
- data = data.to_frame(name=label)
-
- numeric_data = data.convert_objects(datetime=True)._get_numeric_data()
+ def _compute_plot_data(self, data):
+ """Filter non-numeric values, raise TypeError when no numerics"""
+ from pandas.core.frame import DataFrame
+ assert isinstance(data, DataFrame), type(data)
+ data = data.convert_objects(datetime=True)._get_numeric_data()
try:
- is_empty = numeric_data.empty
+ is_empty = data.empty
except AttributeError:
- is_empty = not len(numeric_data)
-
+ is_empty = not len(data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
- 'plot'.format(numeric_data.__class__.__name__))
+ 'plot'.format(data.__class__.__name__))
+ if self._fillna is not None:
+ data = data.fillna(value=self._fillna)
+ return data
+
+ def _make_plot(self, ax, data, axes_num=0, title=None):
+ raise NotImplementedError
- self.data = numeric_data
+ def _iter_data(self, data, keep_index=False):
+ if self.sort_columns:
+ columns = com._try_sort(data.columns)
+ else:
+ columns = data.columns
- def _make_plot(self):
- raise AbstractMethodError(self)
+ for col in columns:
+ if keep_index is True:
+ yield col, data[col]
+ else:
+ yield col, data[col].values
- def _add_table(self):
+ def _add_table(self, ax, data):
if self.table is False:
return
elif self.table is True:
- data = self.data.transpose()
+ data = data
else:
data = self.table
- ax = self._get_ax(0)
+ ax = self._get_ax_layer(ax)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
@@ -1068,7 +1201,7 @@ def _post_plot_logic_common(self, ax, data):
labels = [com.pprint_thing(key) for key in data.index]
labels = dict(zip(range(len(data.index)), labels))
- if self.orientation == 'vertical' or self.orientation is None:
+ if self.orientation == 'vertical':
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
@@ -1130,66 +1263,70 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None):
@property
def legend_title(self):
- if not isinstance(self.data.columns, MultiIndex):
- name = self.data.columns.name
+ if not isinstance(self.columns, MultiIndex):
+ name = self.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
- stringified = map(com.pprint_thing,
- self.data.columns.names)
+ stringified = map(com.pprint_thing, self.columns.names)
return ','.join(stringified)
- def _add_legend_handle(self, handle, label, index=None):
- if not label is None:
- if self.mark_right and index is not None:
- if self.on_right(index):
- label = label + ' (right)'
- self.legend_handles.append(handle)
- self.legend_labels.append(label)
-
- def _make_legend(self):
- ax, leg = self._get_ax_legend(self.axes[0])
-
- handles = []
- labels = []
- title = ''
-
- if not self.subplots:
- if not leg is None:
- title = leg.get_title().get_text()
- handles = leg.legendHandles
- labels = [x.get_text() for x in leg.get_texts()]
+ def _add_legend_handle(self, ax, handle, label):
+ # always attach legend data to left ax
+ ax = self._get_ax_layer(ax)
+ if not hasattr(ax, '_pd_legend_handles'):
+ ax._pd_legend_handles = []
+ ax._pd_legend_labels = []
- if self.legend:
- if self.legend == 'reverse':
- self.legend_handles = reversed(self.legend_handles)
- self.legend_labels = reversed(self.legend_labels)
-
- handles += self.legend_handles
- labels += self.legend_labels
- if not self.legend_title is None:
- title = self.legend_title
+ if not label is None:
+ if self.mark_right and self.on_right(label):
+ label = label + ' (right)'
+ ax._pd_legend_handles.append(handle)
+ ax._pd_legend_labels.append(label)
- if len(handles) > 0:
- ax.legend(handles, labels, loc='best', title=title)
+ def _make_legend(self, ax, data):
+ if not ax.get_visible():
+ return
- elif self.subplots and self.legend:
- for ax in self.axes:
- if ax.get_visible():
- ax.legend(loc='best')
+ legax, leg = self._get_ax_legend(ax)
+ if not leg is None:
+ title = leg.get_title().get_text()
+ handles = leg.legendHandles
+ labels = [x.get_text() for x in leg.get_texts()]
+ else:
+ handles = []
+ labels = []
+ title = ''
+
+ if self.legend:
+ ax = self._get_ax_layer(ax)
+ if self.legend == 'reverse':
+ ax._pd_legend_handles = reversed(ax._pd_legend_handles)
+ ax._pd_legend_labels = reversed(ax._pd_legend_labels)
+
+ handles += ax._pd_legend_handles
+ labels += ax._pd_legend_labels
+ if not self.legend_title is None:
+ title = self.legend_title
+ if len(handles) > 0:
+ legax.legend(handles, labels, loc='best', title=title)
+ # remove axes properties of drawn legends
+ ax._pd_legend_handles = []
+ ax._pd_legend_labels = []
def _get_ax_legend(self, ax):
- leg = ax.get_legend()
- other_ax = (getattr(ax, 'left_ax', None) or
- getattr(ax, 'right_ax', None))
- other_leg = None
- if other_ax is not None:
- other_leg = other_ax.get_legend()
- if leg is None and other_leg is not None:
- leg = other_leg
- ax = other_ax
- return ax, leg
+ left_ax = self._get_ax_layer(ax)
+ right_ax = self._get_ax_layer(ax, primary=False)
+
+ axes = [left_ax, right_ax]
+ for ax in axes:
+ # search existing legend
+ leg = ax.get_legend()
+ if leg is not None:
+ return ax, leg
+ # if no legend, draw on left axes
+ return left_ax, None
@cache_readonly
def plt(self):
@@ -1198,15 +1335,15 @@ def plt(self):
_need_to_set_index = False
- def _get_xticks(self, convert_period=False):
- index = self.data.index
+ def _get_xticks(self, data, convert_period=False):
+ index = data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
- self.data = self.data.reindex(index=index.sort_values())
- x = self.data.index.to_timestamp()._mpl_repr()
+ data = data.reindex(index=index.sort_values())
+ x = data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
@@ -1216,15 +1353,14 @@ def _get_xticks(self, convert_period=False):
"""
x = index._mpl_repr()
elif is_datetype:
- self.data = self.data.sort_index()
- x = self.data.index._mpl_repr()
+ data = data.sort_index()
+ x = data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
-
- return x
+ return data, x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
@@ -1246,18 +1382,17 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
args = (x, y)
return ax.plot(*args, **kwds)
- def _get_index_name(self):
- if isinstance(self.data.index, MultiIndex):
- name = self.data.index.names
+ def _get_index_name(self, data):
+ if isinstance(data.index, MultiIndex):
+ name = data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
- name = self.data.index.name
+ name = data.index.name
if name is not None:
name = com.pprint_thing(name)
-
return name
@classmethod
@@ -1268,25 +1403,14 @@ def _get_ax_layer(cls, ax, primary=True):
else:
return getattr(ax, 'right_ax', ax)
- def _get_ax(self, i):
- # get the twinx ax if appropriate
- if self.subplots:
- ax = self.axes[i]
- ax = self._maybe_right_yaxis(ax, i)
- self.axes[i] = ax
- else:
- ax = self.axes[0]
- ax = self._maybe_right_yaxis(ax, i)
-
- ax.get_yaxis().set_visible(True)
- return ax
-
- def on_right(self, i):
+ def on_right(self, label):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
- return self.data.columns[i] in self.secondary_y
+ if label is None:
+ return False
+ return label in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
@@ -1337,6 +1461,11 @@ def _parse_errorbars(self, label, err):
return None
from pandas import DataFrame, Series
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+
+ if isinstance(self.data, (DataFrameGroupBy, SeriesGroupBy)):
+ # seems no good way to map error bar to each groups...
+ raise NotImplementedError('Error bars are not supported in groupby plots')
def match_labels(data, e):
e = e.reindex_axis(data.index)
@@ -1344,7 +1473,6 @@ def match_labels(data, e):
# key-matched DataFrame
if isinstance(err, DataFrame):
-
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
@@ -1436,24 +1564,94 @@ class PlanePlot(MPLPlot):
Abstract class for plotting on plane, currently scatter and hexbin.
"""
- _layout_type = 'single'
+ _kind = 'plane'
+ _layout_type = 'horizontal'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires and x and y column')
- if com.is_integer(x) and not self.data.columns.holds_integer():
- x = self.data.columns[x]
- if com.is_integer(y) and not self.data.columns.holds_integer():
- y = self.data.columns[y]
+
+ if com.is_integer(x) and not self.columns.holds_integer():
+ x = self.columns[x]
+ if com.is_integer(y) and not self.columns.holds_integer():
+ y = self.columns[y]
+
self.x = x
self.y = y
@property
- def nseries(self):
- return 1
+ def ndim(self):
+ """
+ Return a tuple of (number of subplots, number of unique series)
+ """
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby import DataFrameGroupBy
+ if isinstance(self.data, DataFrame):
+ ndim = (1, 1)
+ elif isinstance(self.data, DataFrameGroupBy):
+ ngroups = self.data.ngroups
+ if self.subplots:
+ if self.axis == 0:
+ ndim = (ngroups, ngroups)
+ else:
+ msg = 'In {0}, axis must be 0 (subplot by group)'
+ raise ValueError(msg.format(self.kind))
+ else:
+ ndim = (1, ngroups)
+ else: # pragma no cover
+ raise NotImplementedError
+ return ndim
+
+ def _map_axes_to_data(self):
+ """
+ Iterate over target axes and corresponding data to be plotted on the axes
+
+ Returns:
+ -----------
+ ax: Matplotlib axis object
+ name: str
+ name to be used for axes title
+ data: Series, DataFrame or SeriesGroupBy
+ data to be drawn on axes
+ """
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby import DataFrameGroupBy
+
+ data = self.data
+ if isinstance(data, DataFrame):
+ label = getattr(self, 'label', None)
+ yield self.axes[0], label, data
+ elif isinstance(data, DataFrameGroupBy):
+ if self.subplots:
+ if self.axis == 0:
+ for i, (name, group) in enumerate(data):
+ group = self._compute_plot_data(group)
+ yield self.axes[i], name, group
+ else:
+ msg = 'In {0}, axis must be 0 (subplot by group)'
+ raise ValueError(msg.format(self.kind))
+ else:
+ for i, (name, group) in enumerate(data):
+ group = self._compute_plot_data(group)
+ yield self.axes[0], name, group
+ else: # pragma no cover
+ raise NotImplementedError
+
+ def _get_colors(self, num_colors=None):
+ if num_colors is None:
+ num_colors = self.nseries
+ return _get_standard_colors(num_colors=num_colors,
+ colormap=self.colormap,
+ color=self.c)
def _post_plot_logic(self, ax, data):
+ if not self.subplots:
+ # in subplots, use each group name as title
+ if self.title is None:
+ ax.set_title('')
+ else:
+ ax.set_title(self.title)
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
@@ -1463,19 +1661,17 @@ class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, c=None, **kwargs):
- super(ScatterPlot, self).__init__(data, x, y, **kwargs)
- if com.is_integer(c) and not self.data.columns.holds_integer():
- c = self.data.columns[c]
+ PlanePlot.__init__(self, data, x, y, **kwargs)
+ if com.is_integer(c) and not self.columns.holds_integer():
+ c = self.columns[c]
self.c = c
- def _make_plot(self):
+ def _make_plot(self, ax, data, axes_num=0, title=None):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
- x, y, c, data = self.x, self.y, self.c, self.data
- ax = self.axes[0]
-
- c_is_column = com.is_hashable(c) and c in self.data.columns
+ x, y, c = self.x, self.y, self.c
+ c_is_column = com.is_hashable(c) and c in data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
@@ -1484,19 +1680,26 @@ def _make_plot(self):
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
- if c is None:
- c_values = self.plt.rcParams['patch.facecolor']
- elif c_is_column:
+ if c_is_column:
c_values = self.data[c].values
+ elif self.nseries > 1 or self.c is None:
+ colors = self._get_colors(num_colors=self.nseries)
+ c_values = colors[axes_num % len(colors)]
else:
- c_values = c
+ # when nseries == 1 and have passed color, use it as it is
+ # it is because passed color may be a list of float values
+ c_values = self.c
- if self.legend and hasattr(self, 'label'):
- label = self.label
- else:
- label = None
+ if hasattr(self, 'label'):
+ if com.is_list_like(self.label):
+ title = self.label[axes_num]
+ else:
+ if self.nseries == 1:
+ title = self.label
+
+ # title will be a group label of scatters
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
- label=label, cmap=cmap, **self.kwds)
+ label=title, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
@@ -1504,8 +1707,8 @@ def _make_plot(self):
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
- if label is not None:
- self._add_legend_handle(scatter, label)
+ if title is not None:
+ self._add_legend_handle(ax, scatter, label=title)
else:
self.legend = False
@@ -1514,6 +1717,8 @@ def _make_plot(self):
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
+ if 'color' in self.kwds:
+ err_kwds['color'] = self.kwds['color']
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
@@ -1526,13 +1731,19 @@ def __init__(self, data, x, y, C=None, **kwargs):
C = self.data.columns[C]
self.C = C
- def _make_plot(self):
- x, y, data, C = self.x, self.y, self.data, self.C
- ax = self.axes[0]
+ from pandas.core.groupby import DataFrameGroupBy
+ if isinstance(data, DataFrameGroupBy) and not self.subplots:
+ raise ValueError("To plot DataFrameGroupBy, specify 'suplots=True'")
+
+ def _make_plot(self, ax, data, axes_num=0, title=None):
+ x, y, C = self.x, self.y, self.C
+
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
- cb = self.kwds.pop('colorbar', True)
+
+ kwds = self.kwds.copy()
+ cb = kwds.pop('colorbar', True)
if C is None:
c_values = None
@@ -1540,12 +1751,12 @@ def _make_plot(self):
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
- **self.kwds)
+ **kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
- def _make_legend(self):
+ def _make_legend(self, ax, data):
pass
@@ -1556,41 +1767,48 @@ class LinePlot(MPLPlot):
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
- if self.stacked:
- self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
- def _is_ts_plot(self):
- # this is slightly deceptive
- return not self.x_compat and self.use_index and self._use_dynamic_x()
+ @property
+ def _fillna(self):
+ if self.stacked:
+ return 0
+ else:
+ return None
- def _use_dynamic_x(self):
+ def _is_ts_plot(self, ax, data):
+ # this is slightly deceptive
from pandas.tseries.plotting import _use_dynamic_x
- return _use_dynamic_x(self._get_ax(0), self.data)
+ return (not self.x_compat and self.use_index and
+ _use_dynamic_x(ax, data))
- def _make_plot(self):
- if self._is_ts_plot():
+ def _make_plot(self, ax, data, axes_num=0, title=None):
+ if self._is_ts_plot(ax, data):
from pandas.tseries.plotting import _maybe_convert_index
- data = _maybe_convert_index(self._get_ax(0), self.data)
-
+ data = _maybe_convert_index(ax, data)
x = data.index # dummy, not used
plotf = self._ts_plot
- it = self._iter_data(data=data, keep_index=True)
+ it = self._iter_data(data, keep_index=True)
else:
- x = self._get_xticks(convert_period=True)
+ data, x = self._get_xticks(data, convert_period=True)
plotf = self._plot
- it = self._iter_data()
+ it = self._iter_data(data)
stacking_id = self._get_stacking_id()
is_errorbar = any(e is not None for e in self.errors.values())
colors = self._get_colors()
+
for i, (label, y) in enumerate(it):
- ax = self._get_ax(i)
+ ax = self._get_ax_by_label(ax, label=label)
kwds = self.kwds.copy()
- style, kwds = self._apply_style_colors(colors, kwds, i, label)
+
+ if len(data.shape) > 1 and data.shape[1] == 1:
+ style, kwds = self._apply_style_colors(colors, kwds, axes_num, label)
+ else:
+ style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
@@ -1602,7 +1820,7 @@ def _make_plot(self):
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
- self._add_legend_handle(newlines[0], label, index=i)
+ self._add_legend_handle(ax, newlines[0], label=label)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
@@ -1681,12 +1899,13 @@ def _update_stacker(cls, ax, stacking_id, values):
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
- condition = (not self._use_dynamic_x()
+ from pandas.tseries.plotting import _use_dynamic_x
+ condition = (not _use_dynamic_x(ax, data)
and data.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
- index_name = self._get_index_name()
+ index_name = self._get_index_name(data)
if condition:
# irregular TS rotated 30 deg. by default
@@ -1701,12 +1920,11 @@ def _post_plot_logic(self, ax, data):
class AreaPlot(LinePlot):
_kind = 'area'
+ _fillna = 0
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
- data = data.fillna(value=0)
- LinePlot.__init__(self, data, **kwargs)
-
+ super(AreaPlot, self).__init__(data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
@@ -1742,17 +1960,16 @@ def _plot(cls, ax, x, y, style=None, column_num=None,
cls._update_stacker(ax, stacking_id, y)
return lines
- def _add_legend_handle(self, handle, label, index=None):
+ def _add_legend_handle(self, ax, handle, label):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
- LinePlot._add_legend_handle(self, handle, label, index=index)
+ super(AreaPlot, self)._add_legend_handle(ax, handle, label=label)
def _post_plot_logic(self, ax, data):
- LinePlot._post_plot_logic(self, ax, data)
-
+ super(AreaPlot, self)._post_plot_logic(ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
@@ -1764,12 +1981,12 @@ class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
+ _fillna = 0
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
- self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
@@ -1792,8 +2009,6 @@ def __init__(self, data, **kwargs):
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
- self.ax_pos = self.tick_pos - self.tickoffset
-
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@@ -1808,17 +2023,20 @@ def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
def _start_base(self):
return self.bottom
- def _make_plot(self):
+ def _make_plot(self, ax, data, axes_num=0, title=None):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
- pos_prior = neg_prior = np.zeros(len(self.data))
+ pos_prior = neg_prior = np.zeros(len(data))
K = self.nseries
- for i, (label, y) in enumerate(self._iter_data(fillna=0)):
- ax = self._get_ax(i)
+ tick_pos = np.arange(len(data))
+ ax_pos = tick_pos - self.tickoffset
+
+ for i, (label, y) in enumerate(self._iter_data(data)):
+ ax = self._get_ax_by_label(ax, label=label)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
@@ -1838,41 +2056,46 @@ def _make_plot(self):
if self.subplots:
w = self.bar_width / 2
- rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
+ rect = self._plot(ax, ax_pos + w, y, self.bar_width,
start=start, label=label, log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
- rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
+ rect = self._plot(ax, ax_pos + w, y, self.bar_width,
start=start, label=label, log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
- rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
+ rect = self._plot(ax, ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, log=self.log, **kwds)
- self._add_legend_handle(rect, label, index=i)
+ self._add_legend_handle(ax, rect, label=label)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [com.pprint_thing(key) for key in data.index]
else:
- str_index = [com.pprint_thing(key) for key in range(data.shape[0])]
- name = self._get_index_name()
+ str_index = [com.pprint_thing(key) for key in range(len(data))]
+
+ name = self._get_index_name(data)
+
+ tick_pos = np.arange(len(data))
+ ax_pos = tick_pos - self.tickoffset
- s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
- e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
+ s_edge = ax_pos[0] - 0.25 + self.lim_offset
+ e_edge = ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
- self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
+ self._decorate_ticks(ax, name, str_index, tick_pos, s_edge, e_edge)
- def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
+ def _decorate_ticks(self, ax, axis_label, ticklabels, tick_pos,
+ start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
- ax.set_xticks(self.tick_pos)
+ ax.set_xticks(tick_pos)
ax.set_xticklabels(ticklabels)
- if name is not None and self.use_index:
- ax.set_xlabel(name)
+ if axis_label is not None and self.use_index:
+ ax.set_xlabel(axis_label)
class BarhPlot(BarPlot):
@@ -1888,13 +2111,14 @@ def _start_base(self):
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
- def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
+ def _decorate_ticks(self, ax, axis_label, ticklabels, tick_pos,
+ start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
- ax.set_yticks(self.tick_pos)
+ ax.set_yticks(tick_pos)
ax.set_yticklabels(ticklabels)
- if name is not None and self.use_index:
- ax.set_ylabel(name)
+ if axis_label is not None and self.use_index:
+ ax.set_ylabel(axis_label)
class HistPlot(LinePlot):
@@ -1909,14 +2133,20 @@ def __init__(self, data, bins=10, bottom=0, **kwargs):
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
- values = (self.data.convert_objects(datetime=True)
- ._get_numeric_data())
+ from pandas.core.groupby import SeriesGroupBy, DataFrameGroupBy
+ if isinstance(self.data, (SeriesGroupBy, DataFrameGroupBy)):
+ data = self.data.obj
+ else:
+ data = self.data
+ data = data.convert_objects(datetime=True)
+ values = data._get_numeric_data().values
values = np.ravel(values)
values = values[~com.isnull(values)]
+ range = self.kwds.get('range', None)
+ weights = self.kwds.get('weights', None)
hist, self.bins = np.histogram(values, bins=self.bins,
- range=self.kwds.get('range', None),
- weights=self.kwds.get('weights', None))
+ range=range, weights=weights)
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@@ -1926,7 +2156,6 @@ def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
- y = y[~com.isnull(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
@@ -1935,26 +2164,32 @@ def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
cls._update_stacker(ax, stacking_id, n)
return patches
- def _make_plot(self):
+ def _make_plot(self, ax, data, axes_num=0, title=None):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
- for i, (label, y) in enumerate(self._iter_data()):
- ax = self._get_ax(i)
-
+ for i, (label, y) in enumerate(self._iter_data(data)):
+ ax = self._get_ax_by_label(ax, label=label)
kwds = self.kwds.copy()
label = com.pprint_thing(label)
kwds['label'] = label
- style, kwds = self._apply_style_colors(colors, kwds, i, label)
+ if data.shape[1] == 1:
+ # temp until GH 9894
+ style, kwds = self._apply_style_colors(colors, kwds, axes_num, label)
+ else:
+ style, kwds = self._apply_style_colors(colors, kwds, i, label)
+
if style is not None:
kwds['style'] = style
+ # remove na here before calculate kde ind
+ y = remove_na(y)
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
- self._add_legend_handle(artists[0], label, index=i)
+ self._add_legend_handle(ax, artists[0], label)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
@@ -2004,8 +2239,6 @@ def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
- y = remove_na(y)
-
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
@@ -2031,12 +2264,17 @@ def _post_plot_logic(self, ax, data):
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
+ _fillna = 0
+
+ def __init__(self, data, **kwargs):
+ MPLPlot.__init__(self, data, **kwargs)
- def __init__(self, data, kind=None, **kwargs):
- data = data.fillna(value=0)
- if (data < 0).any().any():
- raise ValueError("{0} doesn't allow negative values".format(kind))
- MPLPlot.__init__(self, data, kind=kind, **kwargs)
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+ if isinstance(data, SeriesGroupBy) and not self.subplots:
+ raise ValueError("To plot SeriesGroupBy, specify 'suplots=True'")
+ if isinstance(data, DataFrameGroupBy):
+ msg = "plot kind {0} cannot be used for DataFrameGroupBy".format(self._kind)
+ raise ValueError(msg)
def _args_adjust(self):
self.grid = False
@@ -2044,28 +2282,37 @@ def _args_adjust(self):
self.logx = False
self.loglog = False
+ @property
+ def ndim(self):
+ """
+ Return a tuple of (number of subplots, number of unique series)
+ """
+ ndim = super(PiePlot, self).ndim
+ # subplots by series
+ return ndim[1], ndim[1]
+
def _validate_color_args(self):
pass
- def _make_plot(self):
+ def _make_plot(self, ax, data, axes_num=0, title=None):
colors = self._get_colors(num_colors=len(self.data), color_kwds='colors')
- self.kwds.setdefault('colors', colors)
+ self.kwds.setdefault('colors', self._get_colors(num_colors=len(data),
+ color_kwds='colors'))
+
+ def blank_labeler(label, value):
+ return '' if value == 0 else label
- for i, (label, y) in enumerate(self._iter_data()):
- ax = self._get_ax(i)
+ for i, (label, y) in enumerate(self._iter_data(data)):
+ if (y < 0).any():
+ raise ValueError("{0} doesn't allow negative values".format(self.kind))
+ ax = self._get_ax_by_label(ax, label=label)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
- def blank_labeler(label, value):
- if value == 0:
- return ''
- else:
- return label
-
- idx = [com.pprint_thing(v) for v in self.data.index]
+ idx = [com.pprint_thing(v) for v in data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
@@ -2089,14 +2336,18 @@ def blank_labeler(label, value):
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
- for p, l in zip(patches, leglabels):
- self._add_legend_handle(p, l)
+ if blabels is None:
+ for p, l in zip(patches, leglabels):
+ self._add_legend_handle(ax, p, label=l)
+ else:
+ for p, l in zip(patches, leglabels):
+ if l in blabels:
+ self._add_legend_handle(ax, p, label=l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
-
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
@@ -2109,6 +2360,8 @@ def __init__(self, data, return_type=None, **kwargs):
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
+ self._return_obj = compat.OrderedDict()
+
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
@@ -2187,36 +2440,28 @@ def maybe_color_bp(self, bp):
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
- def _make_plot(self):
- if self.subplots:
- self._return_obj = compat.OrderedDict()
+ def _make_plot(self, ax, data, axes_num=0, title=None):
+ # Boxplot doesn't use _iter_data, thus called explicitly
+ data = self._compute_plot_data(data)
- for i, (label, y) in enumerate(self._iter_data()):
- ax = self._get_ax(i)
- kwds = self.kwds.copy()
+ y = data.values.T
+ ax = self._get_ax_by_label(ax, 0)
+ kwds = self.kwds.copy()
- ret, bp = self._plot(ax, y, column_num=i,
- return_type=self.return_type, **kwds)
- self.maybe_color_bp(bp)
- self._return_obj[label] = ret
+ ret, bp = self._plot(ax, y, column_num=0,
+ return_type=self.return_type,**kwds)
+ self.maybe_color_bp(bp)
- label = [com.pprint_thing(label)]
- self._set_ticklabels(ax, label)
+ if self.subplots:
+ self._return_obj[title] = ret
else:
- y = self.data.values.T
- ax = self._get_ax(0)
- kwds = self.kwds.copy()
-
- ret, bp = self._plot(ax, y, column_num=0,
- return_type=self.return_type, **kwds)
- self.maybe_color_bp(bp)
self._return_obj = ret
- labels = [l for l, y in self._iter_data()]
- labels = [com.pprint_thing(l) for l in labels]
- if not self.use_index:
- labels = [com.pprint_thing(key) for key in range(len(labels))]
- self._set_ticklabels(ax, labels)
+ labels = [l for l, y in self._iter_data(data)]
+ labels = [com.pprint_thing(l) for l in labels]
+ if not self.use_index:
+ labels = [com.pprint_thing(key) for key in range(len(labels))]
+ self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
@@ -2224,7 +2469,7 @@ def _set_ticklabels(self, ax, labels):
else:
ax.set_yticklabels(labels)
- def _make_legend(self):
+ def _make_legend(self, ax, data):
pass
def _post_plot_logic(self, ax, data):
@@ -2275,7 +2520,7 @@ def _plot(data, x=None, y=None, subplots=False,
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
- raise ValueError("plot kind %r can only be used for data frames"
+ raise ValueError("plot kind %r can only be used for DataFrame"
% kind)
elif kind in _series_kinds:
@@ -2512,6 +2757,68 @@ def plot_series(data, kind='line', ax=None, # Series unique
label=label, secondary_y=secondary_y,
**kwds)
+@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
+def plot_grouped_series(grouped=None, subplots=False, sharex=True,
+ sharey=False, use_index=True, figsize=None, grid=None,
+ legend=True, rot=None, ax=None, style=None, title=None,
+ xlim=None, ylim=None, logx=False, logy=False, xticks=None,
+ yticks=None, kind='line', sort_columns=False, fontsize=None,
+ secondary_y=False, transpose=False, **kwds):
+
+ kind = _get_standard_kind(kind.lower().strip())
+ if kind in _all_kinds:
+ klass = _plot_klass[kind]
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
+
+ plot_obj = klass(grouped, kind=kind, subplots=subplots, rot=rot,
+ legend=legend, ax=ax, style=style, fontsize=fontsize,
+ use_index=use_index, sharex=sharex, sharey=sharey,
+ xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
+ title=title, grid=grid, figsize=figsize, logx=logx,
+ logy=logy, sort_columns=sort_columns,
+ secondary_y=secondary_y, **kwds)
+
+ plot_obj.generate()
+ plot_obj.draw()
+ return plot_obj.result
+
+
+@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
+def plot_grouped_frame(grouped=None, x=None, y=None, subplots=True, sharex=True,
+ sharey=False, use_index=True, figsize=None, grid=None,
+ legend=True, rot=None, ax=None, style=None, title=None,
+ xlim=None, ylim=None, logx=False, logy=False, xticks=None,
+ yticks=None, kind='line', sort_columns=False, fontsize=None,
+ secondary_y=False, transpose=False, **kwds):
+
+ kind = _get_standard_kind(kind.lower().strip())
+ if kind in _all_kinds:
+ klass = _plot_klass[kind]
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
+
+ if kind in _dataframe_kinds:
+ plot_obj = klass(grouped, x=x, y=y, kind=kind, subplots=subplots,
+ rot=rot,legend=legend, ax=ax, style=style,
+ fontsize=fontsize, use_index=use_index, sharex=sharex,
+ sharey=sharey, xticks=xticks, yticks=yticks,
+ xlim=xlim, ylim=ylim, title=title, grid=grid,
+ figsize=figsize, logx=logx, logy=logy,
+ sort_columns=sort_columns, secondary_y=secondary_y,
+ **kwds)
+ else:
+ plot_obj = klass(grouped, kind=kind, subplots=subplots, rot=rot,
+ legend=legend, ax=ax, style=style, fontsize=fontsize,
+ use_index=use_index, sharex=sharex, sharey=sharey,
+ xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
+ title=title, grid=grid, figsize=figsize, logx=logx,
+ logy=logy, sort_columns=sort_columns,
+ secondary_y=secondary_y, **kwds)
+
+ plot_obj.generate()
+ plot_obj.draw()
+ return plot_obj.result
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
| Allow to support `DataFrameGroupBy.plot` and `SeriesGroupBy.plot` covering current `DataFrame.hist(by=XXX)` and `DataFrame.boxplot(by=XXX)` usecases. Implementation is incomplete but covering most of usecases / behaviours.
- [x] fix `FIXME`s
- [x] Split to `plot_seriesgroupby` and `plot_dataramegroupby` specifying default values.
- [x] Refactoring `plot_frame` and `plot_series`, modify `docstring`. (#8037)
- [x] Make line subplot to accept multiple colors (#9894)
- [x] secondary_y
- [x] error bars (raise `NotImplementedError`)
- [x] split some cleanups required in prior to this PR (#10717)
- [x] doc
- [x] tests
## Following gist contains all patterns of outputs.
- https://gist.github.com/sinhrks/ad805b6689140930d99a
## Rendered doc

| https://api.github.com/repos/pandas-dev/pandas/pulls/8018 | 2014-08-13T15:39:45Z | 2017-03-20T13:49:20Z | null | 2017-03-20T13:55:29Z |
fix issue #8000 - interpolation extrapolates over trailing missing values | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 48fb75f59ac34..07b0b8da55786 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1543,7 +1543,8 @@ def _interp_limit(invalid, limit):
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
- yvalues[firstIndex:][valid])
+ yvalues[firstIndex:][valid],
+ np.nan, np.nan)
if limit:
result[violate_limit] = np.nan
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 001d6f489e934..63d91a430b4c7 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -638,6 +638,12 @@ def test_interp_datetime64(self):
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
+
+ def test_interp_trailing_nan(self):
+ s = Series([np.nan, 1, np.nan, 3, np.nan])
+ result = s.interpolate()
+ expected = Series([np.nan, 1, 2, 3, np.nan])
+ assert_series_equal(result, expected)
def test_describe(self):
_ = self.series.describe()
| This pull request is in response to [issue #8000](https://github.com/pydata/pandas/issues/8000).
Changes to core/common.py add np.nan as the default value for missing values to the left and right non-missing values during interpolation. This prevents DataFrame.interpolate() from extrapolating the last non-missing value over all trailing missing values (the default).
Changes to tests/test_generic.py add test coverage to the above change. A passing test is where an interpolated series with a trailing missing value maintains that trailing missing value after interpolation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8013 | 2014-08-13T02:04:24Z | 2015-07-28T21:56:26Z | null | 2022-10-13T00:16:06Z |
WIP: generalize categorical to N-dimensions | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index cb6f200b259db..4ed0faef398ee 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -101,7 +101,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):
Parameters
----------
- values : ndarray (1-d)
+ values : ndarray
Sequence
sort : boolean, default False
Sort by values
@@ -129,7 +129,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):
table = hash_klass(len(vals))
uniques = vec_klass()
- labels = table.get_labels(vals, uniques, 0, na_sentinel)
+ labels = table.get_labels(
+ vals.ravel(), uniques, 0, na_sentinel).reshape(vals.shape)
labels = com._ensure_platform_int(labels)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index d049a6d64aac3..04d395e35c4f8 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -269,10 +269,23 @@ def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False,
self.levels = levels
self.name = name
+ def _replace_codes(self, codes):
+ """
+ Returns a new Categorical with replaced codes but the same levels and
+ metadata
+
+ If codes is a scalar, just return that level.
+ """
+ codes = np.asarray(codes)
+ if np.isscalar(codes):
+ return self.levels[codes]
+ else:
+ return Categorical(codes, levels=self.levels, ordered=self.ordered,
+ name=self.name, fastpath=True)
+
def copy(self):
""" Copy constructor. """
- return Categorical(values=self._codes.copy(),levels=self.levels,
- name=self.name, ordered=self.ordered, fastpath=True)
+ return self._replace_codes(self._codes.copy())
@classmethod
def from_array(cls, data):
@@ -431,8 +444,19 @@ def shape(self):
-------
shape : tuple
"""
+ return self._codes.shape
- return tuple([len(self._codes)])
+ @property
+ def size(self):
+ """Size of the Categorical.
+
+ For internal compatibility with numpy arrays.
+
+ Returns
+ -------
+ size : int
+ """
+ return self._codes.size
def __array__(self, dtype=None):
""" The numpy array interface.
@@ -442,11 +466,12 @@ def __array__(self, dtype=None):
values : numpy array
A numpy array of the same dtype as categorical.levels.dtype
"""
- return com.take_1d(self.levels.values, self._codes)
+ return com.take_1d(
+ self.levels.values, self._codes.ravel()).reshape(self.shape)
@property
def T(self):
- return self
+ return self._replace_codes(self._codes.T)
def get_values(self):
""" Return the values.
@@ -558,7 +583,7 @@ def ravel(self, order='C'):
-------
raveled : numpy array
"""
- return np.array(self)
+ return np.array(self._replace_codes(self._codes.ravel(order=order)))
def view(self):
"""Return a view of myself.
@@ -628,9 +653,8 @@ def take_nd(self, indexer, allow_fill=True, fill_value=None):
if allow_fill and fill_value is None:
fill_value = np.nan
- values = com.take_1d(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
- result = Categorical(values=values, levels=self.levels, ordered=self.ordered,
- name=self.name, fastpath=True)
+ codes = com.take_1d(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
+ result = self._replace_codes(codes)
return result
take = take_nd
@@ -646,8 +670,7 @@ def _slice(self, slicer):
slicer = slicer[1]
_codes = self._codes[slicer]
- return Categorical(values=_codes,levels=self.levels, ordered=self.ordered,
- name=self.name, fastpath=True)
+ return self._replace_codes(_codes)
def __len__(self):
return len(self._codes)
@@ -738,15 +761,11 @@ def __unicode__(self):
def __getitem__(self, key):
""" Return an item. """
- if isinstance(key, (int, np.integer)):
- i = self._codes[key]
- if i == -1:
- return np.nan
- else:
- return self.levels[i]
- else:
- return Categorical(values=self._codes[key], levels=self.levels,
- ordered=self.ordered, fastpath=True)
+ return self._replace_codes(self._codes[key])
+ # if np.isscalar(codes):
+ # return self.levels[codes]
+ # else:
+ # return self._replace_codes(codes)
def __setitem__(self, key, value):
""" Item assignment.
@@ -760,40 +779,22 @@ def __setitem__(self, key, value):
"""
- # require identical level set
if isinstance(value, Categorical):
+ # require identical level set
if not value.levels.equals(self.levels):
raise ValueError("cannot set a Categorical with another, without identical levels")
-
- rvalue = value if com.is_list_like(value) else [value]
- to_add = Index(rvalue)-self.levels
- if len(to_add):
- raise ValueError("cannot setitem on a Categorical with a new level,"
- " set the levels first")
-
- # set by position
- if isinstance(key, (int, np.integer)):
- pass
-
- # tuple of indexers
- elif isinstance(key, tuple):
-
- # only allow 1 dimensional slicing, but can
- # in a 2-d case be passd (slice(None),....)
- if len(key) == 2:
- if not _is_null_slice(key[0]):
- raise AssertionError("invalid slicing for a 1-ndim categorical")
- key = key[1]
- elif len(key) == 1:
- key = key[0]
- else:
- raise AssertionError("invalid slicing for a 1-ndim categorical")
+ # we can safely assign codes directly
+ self._codes[key] = value.codes
else:
- key = self._codes[key]
-
- lindexer = self.levels.get_indexer(rvalue)
- self._codes[key] = lindexer
+ value = np.asarray(value)
+ flat_value = value.ravel()
+ to_add = Index(flat_value) - self.levels
+ if len(to_add):
+ raise ValueError("cannot setitem on a Categorical with a new level,"
+ " set the levels first")
+ lindexer = self.levels.get_indexer(flat_value)
+ self._codes[key] = lindexer.reshape(value.shape)
#### reduction ops ####
def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
@@ -871,9 +872,8 @@ def mode(self):
import pandas.hashtable as htable
good = self._codes != -1
- result = Categorical(sorted(htable.mode_int64(com._ensure_int64(self._codes[good]))),
- levels=self.levels,ordered=self.ordered, name=self.name,
- fastpath=True)
+ result = self._replace_codes(
+ sorted(htable.mode_int64(com._ensure_int64(self._codes[good]))))
return result
def unique(self):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f649baeb16278..4bbdc789922e3 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1649,18 +1649,6 @@ def _validate_merge(self, blocks):
return True
- def to_native_types(self, slicer=None, na_rep='', **kwargs):
- """ convert to our native types format, slicing if desired """
-
- values = self.values
- if slicer is not None:
- # Categorical is always one dimension
- values = values[slicer]
- values = np.array(values, dtype=object)
- mask = isnull(values)
- values[mask] = na_rep
- # Blocks.to_native_type returns list of lists, but we are always only a list
- return [values.tolist()]
class DatetimeBlock(Block):
__slots__ = ()
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 6353ad53a88ef..c11f4ae38f242 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -95,6 +95,11 @@ def test_mixed(self):
self.assert_numpy_array_equal(labels, np.array([ 2, 2, -1, 3, 0, 1],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([3.14, np.inf, 'A', 'B'], dtype=object))
+ def test_multidimensional(self):
+ labels, uniques = algos.factorize([['a', 'b'], ['a', 'c']])
+ self.assert_numpy_array_equal(labels, np.array([[0, 1], [0, 2]], dtype=np.int64))
+ self.assert_numpy_array_equal(uniques, np.array(['a', 'b', 'c'], dtype=object))
+
def test_datelike(self):
# M8
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 0aa7f2b67c7c6..d933bdf0ae454 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -222,7 +222,8 @@ def test_print(self):
self.assertEqual(actual, expected)
def test_big_print(self):
- factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)
+ factor = Categorical(np.array([0,1,2,0,1,2]*100), ['a', 'b', 'c'],
+ name='cat', fastpath=True)
expected = [" a", " b", " c", " a", " b", " c", " a", " b", " c",
" a", " b", " c", " a", "...", " c", " a", " b", " c",
" a", " b", " c", " a", " b", " c", " a", " b", " c",
@@ -496,6 +497,26 @@ def test_slicing_directly(self):
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.levels, expected.levels)
+ def test_ndimensional_values(self):
+ exp_arr = np.array([['a', 'b'], ['c', 'b']], dtype=object)
+ cat = Categorical(exp_arr)
+
+ self.assertEqual(cat.shape, (2, 2))
+ self.assert_numpy_array_equal(cat.__array__(), exp_arr)
+ self.assert_numpy_array_equal(cat.T, exp_arr.T)
+ self.assert_numpy_array_equal(cat.ravel(), exp_arr.ravel())
+
+ # test indexing
+ self.assertEqual(cat[0, 0], 'a')
+ self.assert_numpy_array_equal(cat[0], exp_arr[0])
+ self.assert_numpy_array_equal(cat[:, :2], exp_arr)
+ self.assert_numpy_array_equal(cat[[0, 1], [0, 1]], np.diag(exp_arr))
+ self.assert_numpy_array_equal(cat[0, :], ['a', 'b'])
+ self.assert_numpy_array_equal(cat[0, [0, 1]], ['a', 'b'])
+
+ # TODO: repr, __setitem__, take, min, max, order, describe, _cat_compare_op
+
+
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
| https://api.github.com/repos/pandas-dev/pandas/pulls/8012 | 2014-08-13T00:59:30Z | 2014-08-13T00:59:49Z | null | 2014-08-21T19:56:41Z | |
fix issue #8000 | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 48fb75f59ac34..07b0b8da55786 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1543,7 +1543,8 @@ def _interp_limit(invalid, limit):
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
- yvalues[firstIndex:][valid])
+ yvalues[firstIndex:][valid],
+ np.nan, np.nan)
if limit:
result[violate_limit] = np.nan
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 5e91adbe1a2fa..d909420e107e7 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -389,6 +389,12 @@ def test_groupby():
for k, v in grouped:
assert v == expected[k]
+
+
+def test_interpolate_linear():
+ a = Series([np.nan, 1, np.nan, 3, np.nan]))
+ b = a.interpolate()
+ assert(b[4] == np.nan)
def test_is_list_like():
| This pull request is in response to [issue #8000](https://github.com/pydata/pandas/issues/8000).
Changes to core/common.py add np.nan as the default value for missing values to the left and right non-missing values during interpolation. This prevents DataFrame.interpolate() from extrapolating the last non-missing value over all trailing missing values (the default).
Changes to tests/test_common.py add test coverage to the above change. A passing test is where an interpolated series with a trailing missing value maintains that trailing missing value after interpolation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/8010 | 2014-08-12T23:31:03Z | 2014-08-13T02:04:55Z | null | 2014-08-13T13:57:02Z |
Add sharex and sharey to grouped boxplot | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7d0eaea5b36d6..d7661e2eb4ca7 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2685,7 +2685,7 @@ def plot_group(group, ax):
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
- layout=None, **kwds):
+ layout=None, sharex=False, sharey=False, **kwds):
"""
Make box plots from DataFrameGroupBy data.
@@ -2703,6 +2703,8 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
+ sharex: boolean, default False
+ sharey: boolean, default False
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
@@ -2732,7 +2734,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
naxes = len(grouped)
nrows, ncols = _get_layout(naxes, layout=layout)
fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, squeeze=False,
- ax=ax, sharex=False, sharey=True, figsize=figsize)
+ ax=ax, sharex=sharex, sharey=sharey, figsize=figsize)
axes = _flatten(axes)
ret = compat.OrderedDict()
| sharex and sharey should be settable and by default False in a grouped boxplot. E.g.
```
n [97]: df = DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] )
In [98]: df['X'] =['A','A','A','A','A','B','B','B','B','B']
In [99]: df.iloc[0:5, :2] += 10000
In [100]: df.groupby('X').boxplot() # this is unreadable
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/8008 | 2014-08-12T21:59:27Z | 2015-05-09T16:20:05Z | null | 2022-10-13T00:16:06Z |
CLN/DOC/TST: Categorical fixups (GH7768) | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 985f112979a7e..6b1bfdf7b241d 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -66,7 +66,8 @@ Creating a ``DataFrame`` by passing a dict of objects that can be converted to s
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
- 'E' : 'foo' })
+ 'E' : pd.Categorical(["test","train","test","train"]),
+ 'F' : 'foo' })
df2
Having specific :ref:`dtypes <basics.dtypes>`
@@ -635,6 +636,32 @@ the quarter end:
ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
ts.head()
+Categoricals
+------------
+
+Since version 0.15, pandas can include categorical data in a ``DataFrame``. For full docs, see the
+:ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>` .
+
+.. ipython:: python
+
+ df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
+
+ # convert the raw grades to a categorical
+ df["grade"] = pd.Categorical(df["raw_grade"])
+
+ # Alternative: df["grade"] = df["raw_grade"].astype("category")
+ df["grade"]
+
+ # Rename the levels
+ df["grade"].cat.levels = ["very good", "good", "very bad"]
+
+ # Reorder the levels and simultaneously add the missing levels
+ df["grade"].cat.reorder_levels(["very bad", "bad", "medium", "good", "very good"])
+ df["grade"]
+ df.sort("grade")
+ df.groupby("grade").size()
+
+
Plotting
--------
diff --git a/doc/source/api.rst b/doc/source/api.rst
index feb4da700354d..017739adbc8b1 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -521,51 +521,33 @@ Categorical
.. currentmodule:: pandas.core.categorical
If the Series is of dtype ``category``, ``Series.cat`` can be used to access the the underlying
-``Categorical``. This data type is similar to the otherwise underlying numpy array
-and has the following usable methods and properties (all available as
-``Series.cat.<method_or_property>``).
-
+``Categorical``. This accessor is similar to the ``Series.dt`` or ``Series.str``and has the
+following usable methods and properties (all available as ``Series.cat.<method_or_property>``).
.. autosummary::
:toctree: generated/
- Categorical
- Categorical.from_codes
Categorical.levels
Categorical.ordered
Categorical.reorder_levels
Categorical.remove_unused_levels
- Categorical.min
- Categorical.max
- Categorical.mode
- Categorical.describe
-``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts
-the Categorical back to a numpy array, so levels and order information is not preserved!
+The following methods are considered API when using ``Categorical`` directly:
.. autosummary::
:toctree: generated/
- Categorical.__array__
+ Categorical
+ Categorical.from_codes
+ Categorical.codes
-To create compatibility with `pandas.Series` and `numpy` arrays, the following (non-API) methods
-are also introduced.
+``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts
+the Categorical back to a numpy array, so levels and order information is not preserved!
.. autosummary::
:toctree: generated/
- Categorical.from_array
- Categorical.get_values
- Categorical.copy
- Categorical.dtype
- Categorical.ndim
- Categorical.sort
- Categorical.equals
- Categorical.unique
- Categorical.order
- Categorical.argsort
- Categorical.fillna
-
+ Categorical.__array__
Plotting
~~~~~~~~
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index c08351eb87a79..6ed1a7982a64b 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -90,6 +90,7 @@ By using some special functions:
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
df.head(10)
+See :ref:`documentation <reshaping.tile.cut>` for :func:`~pandas.cut`.
`Categoricals` have a specific ``category`` :ref:`dtype <basics.dtypes>`:
@@ -331,6 +332,57 @@ Operations
The following operations are possible with categorical data:
+Comparing `Categoricals` with other objects is possible in two cases:
+
+ * comparing a `Categorical` to another `Categorical`, when `level` and `ordered` is the same or
+ * comparing a `Categorical` to a scalar.
+
+All other comparisons will raise a TypeError.
+
+.. ipython:: python
+
+ cat = pd.Series(pd.Categorical([1,2,3], levels=[3,2,1]))
+ cat_base = pd.Series(pd.Categorical([2,2,2], levels=[3,2,1]))
+ cat_base2 = pd.Series(pd.Categorical([2,2,2]))
+
+ cat
+ cat_base
+ cat_base2
+
+Comparing to a categorical with the same levels and ordering or to a scalar works:
+
+.. ipython:: python
+
+ cat > cat_base
+ cat > 2
+
+This doesn't work because the levels are not the same:
+
+.. ipython:: python
+
+ try:
+ cat > cat_base2
+ except TypeError as e:
+ print("TypeError: " + str(e))
+
+.. note::
+
+ Comparisons with `Series`, `np.array` or a `Categorical` with different levels or ordering
+ will raise an `TypeError` because custom level ordering would result in two valid results:
+ one with taking in account the ordering and one without. If you want to compare a `Categorical`
+ with such a type, you need to be explicit and convert the `Categorical` to values:
+
+.. ipython:: python
+
+ base = np.array([1,2,3])
+
+ try:
+ cat > base
+ except TypeError as e:
+ print("TypeError: " + str(e))
+
+ np.asarray(cat) > base
+
Getting the minimum and maximum, if the categorical is ordered:
.. ipython:: python
@@ -489,34 +541,38 @@ but the levels of these `Categoricals` need to be the same:
.. ipython:: python
- cat = pd.Categorical(["a","b"], levels=["a","b"])
- vals = [1,2]
- df = pd.DataFrame({"cats":cat, "vals":vals})
- res = pd.concat([df,df])
- res
- res.dtypes
+ cat = pd.Categorical(["a","b"], levels=["a","b"])
+ vals = [1,2]
+ df = pd.DataFrame({"cats":cat, "vals":vals})
+ res = pd.concat([df,df])
+ res
+ res.dtypes
- df_different = df.copy()
- df_different["cats"].cat.levels = ["a","b","c"]
+In this case the levels are not the same and so an error is raised:
- try:
- pd.concat([df,df])
- except ValueError as e:
- print("ValueError: " + str(e))
+.. ipython:: python
+
+ df_different = df.copy()
+ df_different["cats"].cat.levels = ["a","b","c"]
+ try:
+ pd.concat([df,df_different])
+ except ValueError as e:
+ print("ValueError: " + str(e))
The same applies to ``df.append(df)``.
Getting Data In/Out
-------------------
-Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype will currently raise ``NotImplementedError``.
+Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype will currently
+raise ``NotImplementedError``.
Writing to a CSV file will convert the data, effectively removing any information about the
`Categorical` (levels and ordering). So if you read back the CSV file you have to convert the
relevant columns back to `category` and assign the right levels and level ordering.
.. ipython:: python
- :suppress:
+ :suppress:
from pandas.compat import StringIO
@@ -548,7 +604,7 @@ default not included in computations. See the :ref:`Missing Data section
<missing_data>`
There are two ways a `np.nan` can be represented in `Categorical`: either the value is not
-available or `np.nan` is a valid level.
+available ("missing value") or `np.nan` is a valid level.
.. ipython:: python
@@ -560,9 +616,25 @@ available or `np.nan` is a valid level.
s2.cat.levels = [1,2,np.nan]
s2
# three levels, np.nan included
- # Note: as int arrays can't hold NaN the levels were converted to float
+ # Note: as int arrays can't hold NaN the levels were converted to object
s2.cat.levels
+.. note::
+ Missing value methods like ``isnull`` and ``fillna`` will take both missing values as well as
+ `np.nan` levels into account:
+
+.. ipython:: python
+
+ c = pd.Categorical(["a","b",np.nan])
+ c.levels = ["a","b",np.nan]
+ # will be inserted as a NA level:
+ c[0] = np.nan
+ s = pd.Series(c)
+ s
+ pd.isnull(s)
+ s.fillna("a")
+
+
Gotchas
-------
@@ -579,7 +651,7 @@ object and not as a low level `numpy` array dtype. This leads to some problems.
try:
np.dtype("category")
except TypeError as e:
- print("TypeError: " + str(e))
+ print("TypeError: " + str(e))
dtype = pd.Categorical(["a"]).dtype
try:
@@ -587,7 +659,10 @@ object and not as a low level `numpy` array dtype. This leads to some problems.
except TypeError as e:
print("TypeError: " + str(e))
- # dtype comparisons work:
+Dtype comparisons work:
+
+.. ipython:: python
+
dtype == np.str_
np.str_ == dtype
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 8d718bacd262b..bcdb9ada15bb3 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -505,3 +505,10 @@ handling of NaN:
pd.factorize(x, sort=True)
np.unique(x, return_inverse=True)[::-1]
+
+.. note::
+ If you just want to handle one column as a categorical variable (like R's factor),
+ you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or
+ ``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`,
+ see the :ref:`Categorical introduction <categorical>` and the
+ :ref:`API documentation <api.categorical>`. This feature was introduced in version 0.15.
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 0223a11d8a011..1186c6e4ade39 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -283,9 +283,10 @@ Categoricals in Series/DataFrame
:class:`~pandas.Categorical` can now be included in `Series` and `DataFrames` and gained new
methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`,
-:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`).
+:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`, :issue:`7768`, :issue:`8006`, :issue:`3678`).
-For full docs, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`.
+For full docs, see the :ref:`Categorical introduction <categorical>` and the
+:ref:`API documentation <api.categorical>`.
.. ipython:: python
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index c9674aea4a715..28cbdc0d5634e 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -2,12 +2,13 @@
import numpy as np
from warnings import warn
+import types
from pandas import compat
from pandas.compat import u
-from pandas.core.algorithms import factorize, unique
-from pandas.core.base import PandasObject
+from pandas.core.algorithms import factorize
+from pandas.core.base import PandasObject, PandasDelegate
from pandas.core.index import Index, _ensure_index
from pandas.core.indexing import _is_null_slice
from pandas.tseries.period import PeriodIndex
@@ -18,16 +19,36 @@
def _cat_compare_op(op):
def f(self, other):
- if isinstance(other, (Categorical, np.ndarray)):
- values = np.asarray(self)
- f = getattr(values, op)
- return f(np.asarray(other))
- else:
+ # On python2, you can usually compare any type to any type, and Categoricals can be
+ # seen as a custom type, but having different results depending whether a level are
+ # the same or not is kind of insane, so be a bit stricter here and use the python3 idea
+ # of comparing only things of equal type.
+ if not self.ordered:
+ if op in ['__lt__', '__gt__','__le__','__ge__']:
+ raise TypeError("Unordered Categoricals can only compare equality or not")
+ if isinstance(other, Categorical):
+ # Two Categoricals can only be be compared if the levels are the same
+ if (len(self.levels) != len(other.levels)) or not ((self.levels == other.levels).all()):
+ raise TypeError("Categoricals can only be compared if 'levels' are the same")
+ if not (self.ordered == other.ordered):
+ raise TypeError("Categoricals can only be compared if 'ordered' is the same")
+ na_mask = (self._codes == -1) | (other._codes == -1)
+ f = getattr(self._codes, op)
+ ret = f(other._codes)
+ if na_mask.any():
+ # In other series, the leads to False, so do that here too
+ ret[na_mask] = False
+ return ret
+ elif np.isscalar(other):
if other in self.levels:
i = self.levels.get_loc(other)
return getattr(self._codes, op)(i)
else:
return np.repeat(False, len(self))
+ else:
+ msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
+ "compare values, use 'np.asarray(cat) <op> other'."
+ raise TypeError(msg.format(op=op,typ=type(other)))
f.__name__ = op
@@ -109,9 +130,9 @@ class Categorical(PandasObject):
Attributes
----------
- levels : ndarray
+ levels : Index
The levels of this categorical
- codes : Index
+ codes : ndarray
The codes (integer positions, which point to the levels) of this categorical, read only
ordered : boolean
Whether or not this Categorical is ordered
@@ -171,6 +192,9 @@ class Categorical(PandasObject):
Categorical.max
"""
+ # For comparisons, so that numpy uses our implementation if the compare ops, which raise
+ __array_priority__ = 1000
+
def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False, compat=False):
if fastpath:
@@ -206,10 +230,15 @@ def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False,
# which is fine, but since factorize does this correctly no need here
# this is an issue because _sanitize_array also coerces np.nan to a string
# under certain versions of numpy as well
- inferred = com._possibly_infer_to_datetimelike(values)
- if not isinstance(inferred, np.ndarray):
+ values = com._possibly_infer_to_datetimelike(values)
+ if not isinstance(values, np.ndarray):
+ values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
- values = _sanitize_array(values, None)
+ # On list with NaNs, int values will be converted to float. Use "object" dtype
+ # to prevent this. In the end objects will be casted to int/... in the level
+ # assignment step.
+ dtype = 'object' if com.isnull(values).any() else None
+ values = _sanitize_array(values, None, dtype=dtype)
if levels is None:
try:
@@ -277,7 +306,7 @@ def from_array(cls, data):
return Categorical(data)
@classmethod
- def from_codes(cls, codes, levels, ordered=True, name=None):
+ def from_codes(cls, codes, levels, ordered=False, name=None):
"""
Make a Categorical type from codes and levels arrays.
@@ -294,7 +323,7 @@ def from_codes(cls, codes, levels, ordered=True, name=None):
The levels for the categorical. Items need to be unique.
ordered : boolean, optional
Whether or not this categorical is treated as a ordered categorical. If not given,
- the resulting categorical will be ordered.
+ the resulting categorical will be unordered.
name : str, optional
Name for the Categorical variable.
"""
@@ -333,12 +362,34 @@ def _set_codes(self, codes):
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
+ def _get_labels(self):
+ """ Get the level labels (deprecated).
+
+ Deprecated, use .codes!
+ """
+ import warnings
+ warnings.warn("'labels' is deprecated. Use 'codes' instead", FutureWarning)
+ return self.codes
+
+ labels = property(fget=_get_labels, fset=_set_codes)
+
_levels = None
@classmethod
def _validate_levels(cls, levels):
"""" Validates that we have good levels """
- levels = _ensure_index(levels)
+ if not isinstance(levels, Index):
+ dtype = None
+ if not hasattr(levels, "dtype"):
+ levels = _convert_to_list_like(levels)
+ # on levels with NaNs, int values would be converted to float. Use "object" dtype
+ # to prevent this.
+ if com.isnull(levels).any():
+ without_na = np.array([x for x in levels if com.notnull(x)])
+ with_na = np.array(levels)
+ if with_na.dtype != without_na.dtype:
+ dtype = "object"
+ levels = Index(levels, dtype=dtype)
if not levels.is_unique:
raise ValueError('Categorical levels must be unique')
return levels
@@ -429,14 +480,61 @@ def __array__(self, dtype=None):
Returns
-------
values : numpy array
- A numpy array of the same dtype as categorical.levels.dtype
+ A numpy array of either the specified dtype or, if dtype==None (default), the same
+ dtype as categorical.levels.dtype
"""
- return com.take_1d(self.levels.values, self._codes)
+ ret = com.take_1d(self.levels.values, self._codes)
+ if dtype and dtype != self.levels.dtype:
+ return np.asarray(ret, dtype)
+ return ret
@property
def T(self):
return self
+ def isnull(self):
+ """
+ Detect missing values
+
+ Both missing values (-1 in .codes) and NA as a level are detected.
+
+ Returns
+ -------
+ a boolean array of whether my values are null
+
+ See also
+ --------
+ pandas.isnull : pandas version
+ Categorical.notnull : boolean inverse of Categorical.isnull
+ """
+
+ ret = self._codes == -1
+
+ # String/object and float levels can hold np.nan
+ if self.levels.dtype.kind in ['S', 'O', 'f']:
+ if np.nan in self.levels:
+ nan_pos = np.where(com.isnull(self.levels))
+ # we only have one NA in levels
+ ret = np.logical_or(ret , self._codes == nan_pos[0])
+ return ret
+
+ def notnull(self):
+ """
+ Reverse of isnull
+
+ Both missing values (-1 in .codes) and NA as a level are detected as null.
+
+ Returns
+ -------
+ a boolean array of whether my values are not null
+
+ See also
+ --------
+ pandas.notnull : pandas version
+ Categorical.isnull : boolean inverse of Categorical.notnull
+ """
+ return ~self.isnull()
+
def get_values(self):
""" Return the values.
@@ -503,10 +601,27 @@ def order(self, inplace=False, ascending=True, na_position='last', **kwargs):
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
- codes = np.sort(self._codes.copy())
+ codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
+ # NaN handling
+ na_mask = (codes==-1)
+ if na_mask.any():
+ n_nans = len(codes[na_mask])
+ if na_position=="first" and not ascending:
+ # in this case sort to the front
+ new_codes = codes.copy()
+ new_codes[0:n_nans] = -1
+ new_codes[n_nans:] = codes[~na_mask]
+ codes = new_codes
+ elif na_position=="last" and not ascending:
+ # ... and to the end
+ new_codes = codes.copy()
+ pos = len(codes)-n_nans
+ new_codes[0:pos] = codes[~na_mask]
+ new_codes[pos:] = -1
+ codes = new_codes
if inplace:
self._codes = codes
return
@@ -595,6 +710,15 @@ def fillna(self, fill_value=None, method=None, limit=None, **kwargs):
values = self._codes
+ # Make sure that we also get NA in levels
+ if self.levels.dtype.kind in ['S', 'O', 'f']:
+ if np.nan in self.levels:
+ values = values.copy()
+ nan_pos = np.where(com.isnull(self.levels))
+ # we only have one NA in levels
+ values[values == nan_pos[0]] = -1
+
+
# pad / bfill
if method is not None:
@@ -608,9 +732,9 @@ def fillna(self, fill_value=None, method=None, limit=None, **kwargs):
if not com.isnull(fill_value) and fill_value not in self.levels:
raise ValueError("fill value must be in levels")
- mask = self._codes==-1
+ mask = values==-1
if mask.any():
- values = self._codes.copy()
+ values = values.copy()
values[mask] = self.levels.get_loc(fill_value)
return Categorical(values, levels=self.levels, ordered=self.ordered,
@@ -760,7 +884,8 @@ def __setitem__(self, key, value):
rvalue = value if com.is_list_like(value) else [value]
to_add = Index(rvalue)-self.levels
- if len(to_add):
+ # no assignments of values not in levels, but it's always ok to set something to np.nan
+ if len(to_add) and not com.isnull(to_add).all():
raise ValueError("cannot setitem on a Categorical with a new level,"
" set the levels first")
@@ -768,9 +893,8 @@ def __setitem__(self, key, value):
if isinstance(key, (int, np.integer)):
pass
- # tuple of indexers
+ # tuple of indexers (dataframe)
elif isinstance(key, tuple):
-
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
@@ -782,10 +906,28 @@ def __setitem__(self, key, value):
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
+ # slicing in Series or Categorical
+ elif isinstance(key, slice):
+ pass
+
+ # Array of True/False in Series or Categorical
else:
- key = self._codes[key]
+ # There is a bug in numpy, which does not accept a Series as a indexer
+ # https://github.com/pydata/pandas/issues/6168
+ # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
+ # FIXME: remove when numpy 1.9 is the lowest numpy version pandas accepts...
+ key = np.asarray(key)
lindexer = self.levels.get_indexer(rvalue)
+
+ # FIXME: the following can be removed after https://github.com/pydata/pandas/issues/7820
+ # is fixed.
+ # float levels do currently return -1 for np.nan, even if np.nan is included in the index
+ # "repair" this here
+ if com.isnull(rvalue).any() and com.isnull(self.levels).any():
+ nan_pos = np.where(com.isnull(self.levels))
+ lindexer[lindexer == -1] = nan_pos
+
self._codes[key] = lindexer
#### reduction ops ####
@@ -916,16 +1058,67 @@ def describe(self):
'values' : self._codes }
).groupby('codes').count()
- counts.index = self.levels.take(counts.index)
- counts = counts.reindex(self.levels)
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
result = concat([counts,freqs],axis=1)
- result.index.name = 'levels'
result.columns = ['counts','freqs']
+
+ # fill in the real levels
+ check = result.index == -1
+ if check.any():
+ # Sort -1 (=NaN) to the last position
+ index = np.arange(0, len(self.levels)+1)
+ index[-1] = -1
+ result = result.reindex(index)
+ # build new index
+ levels = np.arange(0,len(self.levels)+1 ,dtype=object)
+ levels[:-1] = self.levels
+ levels[-1] = np.nan
+ result.index = levels.take(result.index)
+ else:
+ result.index = self.levels.take(result.index)
+ result = result.reindex(self.levels)
+ result.index.name = 'levels'
+
return result
+##### The Series.cat accessor #####
+
+class CategoricalProperties(PandasDelegate):
+ """
+ Accessor object for categorical properties of the Series values.
+
+ Examples
+ --------
+ >>> s.cat.levels
+ >>> s.cat.levels = list('abc')
+ >>> s.cat.reorder_levels(list('cab'))
+
+ Allows accessing to specific getter and access methods
+ """
+
+ def __init__(self, values, index):
+ self.categorical = values
+ self.index = index
+
+ def _delegate_property_get(self, name):
+ return getattr(self.categorical, name)
+
+ def _delegate_property_set(self, name, new_values):
+ return setattr(self.categorical, name, new_values)
+
+ def _delegate_method(self, name, *args, **kwargs):
+ method = getattr(self.categorical, name)
+ return method(*args, **kwargs)
+
+CategoricalProperties._add_delegate_accessors(delegate=Categorical,
+ accessors=["levels", "ordered"],
+ typ='property')
+CategoricalProperties._add_delegate_accessors(delegate=Categorical,
+ accessors=["reorder_levels", "remove_unused_levels"],
+ typ='method')
+
##### utility routines #####
def _get_codes_for_values(values, levels):
@@ -942,3 +1135,17 @@ def _get_codes_for_values(values, levels):
t.map_locations(com._values_from_object(levels))
return com._ensure_platform_int(t.lookup(values))
+def _convert_to_list_like(list_like):
+ if hasattr(list_like, "dtype"):
+ return list_like
+ if isinstance(list_like, list):
+ return list_like
+ if (com._is_sequence(list_like) or isinstance(list_like, tuple)
+ or isinstance(list_like, types.GeneratorType)):
+ return list(list_like)
+ elif np.isscalar(list_like):
+ return [list_like]
+ else:
+ # is this reached?
+ return [list_like]
+
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 48fb75f59ac34..1fc0cd4101cf9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -276,15 +276,22 @@ def _isnull_ndarraylike(obj):
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
- # Working around NumPy ticket 1542
- shape = values.shape
-
- if dtype.kind in ('S', 'U'):
- result = np.zeros(values.shape, dtype=bool)
+ if is_categorical_dtype(values):
+ from pandas import Categorical
+ if not isinstance(values, Categorical):
+ values = values.values
+ result = values.isnull()
else:
- result = np.empty(shape, dtype=bool)
- vec = lib.isnullobj(values.ravel())
- result[...] = vec.reshape(shape)
+
+ # Working around NumPy ticket 1542
+ shape = values.shape
+
+ if dtype.kind in ('S', 'U'):
+ result = np.zeros(values.shape, dtype=bool)
+ else:
+ result = np.empty(shape, dtype=bool)
+ vec = lib.isnullobj(values.ravel())
+ result[...] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
@@ -299,7 +306,6 @@ def _isnull_ndarraylike(obj):
return result
-
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
@@ -2440,7 +2446,7 @@ def _get_callable_name(obj):
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
-
+
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 8f749d07296a7..0539d803a42a4 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -177,7 +177,7 @@ def _get_footer(self):
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if self.name and com.is_categorical_dtype(self.series.dtype):
- level_info = self.series.cat._repr_level_info()
+ level_info = self.series.values._repr_level_info()
if footer:
footer += "\n"
footer += level_info
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3b8a54034d56..44ca001b65296 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1628,6 +1628,27 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
return self.make_block_same_class(new_values, new_mgr_locs)
+ def putmask(self, mask, new, align=True, inplace=False):
+ """ putmask the data to the block; it is possible that we may create a
+ new dtype of block
+
+ return the resulting block(s)
+
+ Parameters
+ ----------
+ mask : the condition to respect
+ new : a ndarray/object
+ align : boolean, perform alignment on other/cond, default is True
+ inplace : perform inplace modification, default is False
+
+ Returns
+ -------
+ a new block(s), the result of the putmask
+ """
+ new_values = self.values if inplace else self.values.copy()
+ new_values[mask] = new
+ return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
+
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 16e6e40802a95..fc51511ff3970 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -522,6 +522,10 @@ def _comp_method_SERIES(op, name, str_rep, masker=False):
code duplication.
"""
def na_op(x, y):
+ if com.is_categorical_dtype(x) != com.is_categorical_dtype(y):
+ msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
+ "compare values, use 'series <op> np.asarray(cat)'."
+ raise TypeError(msg.format(op=op,typ=type(y)))
if x.dtype == np.object_:
if isinstance(y, list):
y = lib.list_to_object_array(y)
@@ -553,11 +557,16 @@ def wrapper(self, other):
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
- elif isinstance(other, (pa.Array, pd.Series, pd.Index)):
+ elif isinstance(other, (pa.Array, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
+ elif isinstance(other, pd.Categorical):
+ if not com.is_categorical_dtype(self):
+ msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
+ "If you want to compare values, use 'series <op> np.asarray(other)'."
+ raise TypeError(msg.format(op=op,typ=self.dtype))
else:
mask = isnull(self)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2f0e651bfc5b1..68f5b4d36392f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -906,7 +906,7 @@ def _repr_footer(self):
# Categorical
if com.is_categorical_dtype(self.dtype):
- level_info = self.cat._repr_level_info()
+ level_info = self.values._repr_level_info()
return u('%sLength: %d, dtype: %s\n%s') % (namestr,
len(self),
str(self.dtype.name),
@@ -2390,11 +2390,12 @@ def dt(self):
#------------------------------------------------------------------------------
# Categorical methods
- @property
+ @cache_readonly
def cat(self):
+ from pandas.core.categorical import CategoricalProperties
if not com.is_categorical_dtype(self.dtype):
raise TypeError("Can only use .cat accessor with a 'category' dtype")
- return self.values
+ return CategoricalProperties(self.values, self.index)
Series._setup_axes(['index'], info_axis=0, stat_axis=0,
aliases={'rows': 0})
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index d07adeadb640c..fcfee8cf9b1ba 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -3,6 +3,7 @@
from datetime import datetime
from pandas.compat import range, lrange, u
import re
+from distutils.version import LooseVersion
import numpy as np
import pandas as pd
@@ -70,6 +71,18 @@ def test_constructor(self):
c2 = Categorical(exp_arr, levels=["c","b","a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
+ # levels must be unique
+ def f():
+ Categorical([1,2], [1,2,2])
+ self.assertRaises(ValueError, f)
+ def f():
+ Categorical(["a","b"], ["a","b","b"])
+ self.assertRaises(ValueError, f)
+ def f():
+ Categorical([1,2], [1,2,np.nan, np.nan])
+ self.assertRaises(ValueError, f)
+
+
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
@@ -110,6 +123,79 @@ def test_constructor(self):
cat = pd.Categorical([1,2,3,np.nan], levels=[1,2,3])
self.assertTrue(com.is_integer_dtype(cat.levels))
+ # https://github.com/pydata/pandas/issues/3678
+ cat = pd.Categorical([np.nan,1, 2, 3])
+ self.assertTrue(com.is_integer_dtype(cat.levels))
+
+ # this should result in floats
+ cat = pd.Categorical([np.nan, 1, 2., 3 ])
+ self.assertTrue(com.is_float_dtype(cat.levels))
+
+ cat = pd.Categorical([np.nan, 1., 2., 3. ])
+ self.assertTrue(com.is_float_dtype(cat.levels))
+
+ # preserve int as far as possible by converting to object if NaN is in levels
+ cat = pd.Categorical([np.nan, 1, 2, 3], levels=[np.nan, 1, 2, 3])
+ self.assertTrue(com.is_object_dtype(cat.levels))
+ # This doesn't work -> this would probably need some kind of "remember the original type"
+ # feature to try to cast the array interface result to...
+ #vals = np.asarray(cat[cat.notnull()])
+ #self.assertTrue(com.is_integer_dtype(vals))
+ cat = pd.Categorical([np.nan,"a", "b", "c"], levels=[np.nan,"a", "b", "c"])
+ self.assertTrue(com.is_object_dtype(cat.levels))
+ # but don't do it for floats
+ cat = pd.Categorical([np.nan, 1., 2., 3.], levels=[np.nan, 1., 2., 3.])
+ self.assertTrue(com.is_float_dtype(cat.levels))
+
+
+ # corner cases
+ cat = pd.Categorical([1])
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == 1)
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ cat = pd.Categorical(["a"])
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == "a")
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ # Scalars should be converted to lists
+ cat = pd.Categorical(1)
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == 1)
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ cat = pd.Categorical([1], levels=1)
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == 1)
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ def test_constructor_with_generator(self):
+ # This was raising an Error in isnull(single_val).any() because isnull returned a scalar
+ # for a generator
+ from pandas.compat import range as xrange
+
+ exp = Categorical([0,1,2])
+ cat = Categorical((x for x in [0,1,2]))
+ self.assertTrue(cat.equals(exp))
+ cat = Categorical(xrange(3))
+ self.assertTrue(cat.equals(exp))
+
+ # This uses xrange internally
+ from pandas.core.index import MultiIndex
+ MultiIndex.from_product([range(5), ['a', 'b', 'c']])
+
+ # check that levels accept generators and sequences
+ cat = pd.Categorical([0,1,2], levels=(x for x in [0,1,2]))
+ self.assertTrue(cat.equals(exp))
+ cat = pd.Categorical([0,1,2], levels=xrange(3))
+ self.assertTrue(cat.equals(exp))
+
+
def test_from_codes(self):
# too few levels
@@ -133,7 +219,7 @@ def f():
self.assertRaises(ValueError, f)
- exp = Categorical(["a","b","c"])
+ exp = Categorical(["a","b","c"], ordered=False)
res = Categorical.from_codes([0,1,2], ["a","b","c"])
self.assertTrue(exp.equals(res))
@@ -178,6 +264,62 @@ def test_comparisons(self):
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
+ # comparisons with categoricals
+ cat_rev = pd.Categorical(["a","b","c"], levels=["c","b","a"])
+ cat_rev_base = pd.Categorical(["b","b","b"], levels=["c","b","a"])
+ cat = pd.Categorical(["a","b","c"])
+ cat_base = pd.Categorical(["b","b","b"], levels=cat.levels)
+
+ # comparisons need to take level ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = np.array([True, False, False])
+ self.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = np.array([False, False, True])
+ self.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = np.array([False, False, True])
+ self.assert_numpy_array_equal(res, exp)
+
+ # Only categories with same levels can be compared
+ def f():
+ cat > cat_rev
+ self.assertRaises(TypeError, f)
+
+ cat_rev_base2 = pd.Categorical(["b","b","b"], levels=["c","b","a","d"])
+ def f():
+ cat_rev > cat_rev_base2
+ self.assertRaises(TypeError, f)
+
+ # Only categories with same ordering information can be compared
+ cat_unorderd = cat.copy()
+ cat_unorderd.ordered = False
+ self.assertFalse((cat > cat).any())
+ def f():
+ cat > cat_unorderd
+ self.assertRaises(TypeError, f)
+
+ # comparison (in both directions) with Series will raise
+ s = Series(["b","b","b"])
+ self.assertRaises(TypeError, lambda: cat > s)
+ self.assertRaises(TypeError, lambda: cat_rev > s)
+ self.assertRaises(TypeError, lambda: s < cat)
+ self.assertRaises(TypeError, lambda: s < cat_rev)
+
+ # comparison with numpy.array will raise in both direction, but only on newer
+ # numpy versions
+ a = np.array(["b","b","b"])
+ self.assertRaises(TypeError, lambda: cat > a)
+ self.assertRaises(TypeError, lambda: cat_rev > a)
+
+ # The following work via '__array_priority__ = 1000'
+ # works only on numpy >= 1.7.1 and not on PY3.2
+ if LooseVersion(np.__version__) > "1.7.1" and not compat.PY3_2:
+ self.assertRaises(TypeError, lambda: a < cat)
+ self.assertRaises(TypeError, lambda: a < cat_rev)
+
def test_na_flags_int_levels(self):
# #1457
@@ -204,6 +346,16 @@ def test_describe(self):
).set_index('levels')
tm.assert_frame_equal(desc, expected)
+ # check unused levels
+ cat = self.factor.copy()
+ cat.levels = ["a","b","c","d"]
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[3, 2, 3, np.nan],
+ freqs=[3/8., 2/8., 3/8., np.nan],
+ levels=['a', 'b', 'c', 'd'])
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
@@ -213,6 +365,47 @@ def test_describe(self):
).set_index('levels')
tm.assert_frame_equal(desc, expected)
+ # https://github.com/pydata/pandas/issues/3678
+ # describe should work with NaN
+ cat = pd.Categorical([np.nan,1, 2, 2])
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[1, 2, 1],
+ freqs=[1/4., 2/4., 1/4.],
+ levels=[1,2,np.nan]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # having NaN as level and as "not available" should also print two NaNs in describe!
+ cat = pd.Categorical([np.nan,1, 2, 2])
+ cat.levels = [1,2,np.nan]
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[1, 2, np.nan, 1],
+ freqs=[1/4., 2/4., np.nan, 1/4.],
+ levels=[1,2,np.nan,np.nan]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # empty levels show up as NA
+ cat = Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True)
+ result = cat.describe()
+
+ expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]],
+ columns=['counts','freqs'],
+ index=Index(['a','b','c'],name='levels'))
+ tm.assert_frame_equal(result,expected)
+
+ # NA as a level
+ cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] )
+ result = cat.describe()
+
+ expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]],
+ columns=['counts','freqs'],
+ index=Index(['b','a','c',np.nan],name='levels'))
+ tm.assert_frame_equal(result,expected)
+
+
def test_print(self):
expected = [" a", " b", " b", " a", " a", " c", " c", " c",
"Levels (3, object): [a < b < c]"]
@@ -361,6 +554,23 @@ def test_nan_handling(self):
self.assert_numpy_array_equal(c.levels , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))
+ def test_isnull(self):
+ exp = np.array([False, False, True])
+ c = Categorical(["a","b",np.nan])
+ res = c.isnull()
+ self.assert_numpy_array_equal(res, exp)
+
+ c = Categorical(["a","b",np.nan], levels=["a","b",np.nan])
+ res = c.isnull()
+ self.assert_numpy_array_equal(res, exp)
+
+ exp = np.array([True, False, True])
+ c = Categorical(["a","b",np.nan])
+ c.levels = ["a","b",np.nan]
+ c[0] = np.nan
+ res = c.isnull()
+ self.assert_numpy_array_equal(res, exp)
+
def test_codes_immutable(self):
# Codes should be read only
@@ -492,6 +702,54 @@ def test_slicing_directly(self):
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.levels, expected.levels)
+ def test_set_item_nan(self):
+ cat = pd.Categorical([1,2,3])
+ exp = pd.Categorical([1,np.nan,3], levels=[1,2,3])
+ cat[1] = np.nan
+ self.assertTrue(cat.equals(exp))
+
+ # if nan in levels, the proper code should be set!
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1] = np.nan
+ exp = np.array([0,3,2,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = np.nan
+ exp = np.array([0,3,3,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = [np.nan, 1]
+ exp = np.array([0,3,0,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = [np.nan, np.nan]
+ exp = np.array([0,3,3,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2, np.nan, 3], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[pd.isnull(cat)] = np.nan
+ exp = np.array([0,1,3,2])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ def test_deprecated_labels(self):
+ # labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ exp = cat.codes
+ with tm.assert_produces_warning(FutureWarning):
+ res = cat.labels
+ self.assert_numpy_array_equal(res, exp)
+ self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
+
+
+
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
@@ -592,7 +850,7 @@ def test_creation_astype(self):
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either the series or the
- # categorical should not change the values in the other one!
+ # categorical should not change the values in the other one, IF you specify copy!
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
@@ -612,7 +870,7 @@ def test_sideeffects_free(self):
# so this WILL change values
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat)
- self.assertTrue(s.cat is cat)
+ self.assertTrue(s.values is cat)
s.cat.levels = [1,2,3]
exp_s = np.array([1,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
@@ -628,20 +886,32 @@ def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.levels, np.array(["a","b"]))
- self.assert_numpy_array_equal(s.cat._codes, np.array([0,1,-1,0]))
+ self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))
# If levels have nan included, the label should point to that instead
s2 = Series(Categorical(["a","b",np.nan,"a"], levels=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.levels,
np.array(["a","b",np.nan], dtype=np.object_))
- self.assert_numpy_array_equal(s2.cat._codes, np.array([0,1,2,0]))
+ self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))
# Changing levels should also make the replaced level np.nan
s3 = Series(Categorical(["a","b","c","a"]))
s3.cat.levels = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.levels,
np.array(["a","b",np.nan], dtype=np.object_))
- self.assert_numpy_array_equal(s3.cat._codes, np.array([0,1,2,0]))
+ self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))
+
+ def test_cat_accessor(self):
+ s = Series(Categorical(["a","b",np.nan,"a"]))
+ self.assert_numpy_array_equal(s.cat.levels, np.array(["a","b"]))
+ self.assertEqual(s.cat.ordered, True)
+ exp = Categorical(["a","b",np.nan,"a"], levels=["b","a"])
+ s.cat.reorder_levels(["b", "a"])
+ self.assertTrue(s.values.equals(exp))
+ exp = Categorical(["a","b",np.nan,"a"], levels=["b","a"])
+ s[:] = "a"
+ s.cat.remove_unused_levels()
+ self.assert_numpy_array_equal(s.cat.levels, np.array(["a"]))
def test_sequence_like(self):
@@ -651,8 +921,8 @@ def test_sequence_like(self):
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
- result = list(df.grade.cat)
- expected = np.array(df.grade.cat).tolist()
+ result = list(df.grade.values)
+ expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result,expected)
# iteration
@@ -694,7 +964,7 @@ def test_series_delegations(self):
exp_values = np.array(["a","b","c","a"])
s.cat.reorder_levels(["c","b","a"])
self.assert_numpy_array_equal(s.cat.levels, exp_levels)
- self.assert_numpy_array_equal(s.cat.__array__(), exp_values)
+ self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused levels
@@ -703,7 +973,7 @@ def test_series_delegations(self):
exp_values = np.array(["a","b","b","a"])
s.cat.remove_unused_levels()
self.assert_numpy_array_equal(s.cat.levels, exp_levels)
- self.assert_numpy_array_equal(s.cat.__array__(), exp_values)
+ self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error on wrong inputs:
@@ -749,10 +1019,6 @@ def test_assignment_to_dataframe(self):
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s))
- # FIXME?
- #### what does this compare to? ###
- result = df.sort_index()
-
cat = pd.Categorical([1,2,3,10], levels=[1,2,3,4,10])
df = pd.DataFrame(pd.Series(cat))
@@ -762,31 +1028,16 @@ def test_describe(self):
result = self.cat.describe()
self.assertEquals(len(result.columns),1)
- # empty levels show up as NA
- s = Series(Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True))
- result = s.cat.describe()
- expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]],
- columns=['counts','freqs'],
- index=Index(['a','b','c'],name='levels'))
- tm.assert_frame_equal(result,expected)
+ # In a frame, describe() for the cat should be the same as for string arrays (count, unique,
+ # top, freq)
+ cat = Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True)
+ s = Series(cat)
result = s.describe()
expected = Series([4,2,"b",3],index=['count','unique','top', 'freq'])
tm.assert_series_equal(result,expected)
- # NA as a level
- cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] )
- result = cat.describe()
-
- expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]],
- columns=['counts','freqs'],
- index=Index(['b','a','c',np.nan],name='levels'))
- tm.assert_frame_equal(result,expected)
-
-
- # In a frame, describe() for the cat should be the same as for string arrays (count, unique,
- # top, freq)
cat = pd.Series(pd.Categorical(["a","b","c","c"]))
df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]})
res = df3.describe()
@@ -966,7 +1217,7 @@ def test_sort(self):
# Cats must be sorted in a dataframe
res = df.sort(columns=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
- self.assert_numpy_array_equal(res["sort"].cat.__array__(), exp)
+ self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort(columns=["sort"], ascending=False)
@@ -1009,17 +1260,29 @@ def f():
res = cat.order(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_levels = np.array(["a","b","c","d"],dtype=object)
- # FIXME: IndexError: Out of bounds on buffer access (axis 0)
- #self.assert_numpy_array_equal(res.__array__(), exp_val)
- #self.assert_numpy_array_equal(res.levels, exp_levels)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_levels = np.array(["a","b","c","d"],dtype=object)
- # FIXME: IndexError: Out of bounds on buffer access (axis 0)
- #self.assert_numpy_array_equal(res.__array__(), exp_val)
- #self.assert_numpy_array_equal(res.levels, exp_levels)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
+
+ cat = Categorical(["a","c","b","d", np.nan], ordered=True)
+ res = cat.order(ascending=False, na_position='first')
+ exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
+ exp_levels = np.array(["a","b","c","d"],dtype=object)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
+
+ cat = Categorical(["a","c","b","d", np.nan], ordered=True)
+ res = cat.order(ascending=False, na_position='last')
+ exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
+ exp_levels = np.array(["a","b","c","d"],dtype=object)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
def test_slicing(self):
cat = Series(Categorical([1,2,3,4]))
@@ -1253,6 +1516,12 @@ def test_assigning_ops(self):
df.iloc[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
+
+ df = orig.copy()
+ df.iloc[df.index == "j",0] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+
# - assign a single value not in the current level set
def f():
df = orig.copy()
@@ -1310,6 +1579,10 @@ def f():
df.loc["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
+ df = orig.copy()
+ df.loc[df.index == "j","cats"] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
# - assign a single value not in the current level set
def f():
df = orig.copy()
@@ -1367,6 +1640,10 @@ def f():
df.ix["j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
+ df = orig.copy()
+ df.ix[df.index == "j",0] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
# - assign a single value not in the current level set
def f():
df = orig.copy()
@@ -1469,6 +1746,92 @@ def f():
df.loc[2:3,"b"] = pd.Categorical(["b","b"], levels=["a","b"])
tm.assert_frame_equal(df, exp)
+ ######### Series ##########
+ orig = Series(pd.Categorical(["b","b"], levels=["a","b"]))
+ s = orig.copy()
+ s[:] = "a"
+ exp = Series(pd.Categorical(["a","a"], levels=["a","b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[1] = "a"
+ exp = Series(pd.Categorical(["b","a"], levels=["a","b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[s.index > 0] = "a"
+ exp = Series(pd.Categorical(["b","a"], levels=["a","b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[[False, True]] = "a"
+ exp = Series(pd.Categorical(["b","a"], levels=["a","b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.index = ["x", "y"]
+ s["y"] = "a"
+ exp = Series(pd.Categorical(["b","a"], levels=["a","b"]), index=["x", "y"])
+ tm.assert_series_equal(s, exp)
+
+ # ensure that one can set something to np.nan
+ s = Series(Categorical([1,2,3]))
+ exp = Series(Categorical([1,np.nan,3]))
+ s[1] = np.nan
+ tm.assert_series_equal(s, exp)
+
+
+ def test_comparisons(self):
+ tests_data = [(list("abc"), list("cba"), list("bbb")),
+ ([1,2,3], [3,2,1], [2,2,2])]
+ for data , reverse, base in tests_data:
+ cat_rev = pd.Series(pd.Categorical(data, levels=reverse))
+ cat_rev_base = pd.Series(pd.Categorical(base, levels=reverse))
+ cat = pd.Series(pd.Categorical(data))
+ cat_base = pd.Series(pd.Categorical(base, levels=cat.cat.levels))
+ s = Series(base)
+ a = np.array(base)
+
+ # comparisons need to take level ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = Series([True, False, False])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = Series([False, False, True])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = Series([False, False, True])
+ tm.assert_series_equal(res, exp)
+
+ # Only categories with same levels can be compared
+ def f():
+ cat > cat_rev
+ self.assertRaises(TypeError, f)
+
+ # categorical cannot be compared to Series or numpy array, and also not the other way
+ # around
+ self.assertRaises(TypeError, lambda: cat > s)
+ self.assertRaises(TypeError, lambda: cat_rev > s)
+ self.assertRaises(TypeError, lambda: cat > a)
+ self.assertRaises(TypeError, lambda: cat_rev > a)
+
+ self.assertRaises(TypeError, lambda: s < cat)
+ self.assertRaises(TypeError, lambda: s < cat_rev)
+
+ self.assertRaises(TypeError, lambda: a < cat)
+ self.assertRaises(TypeError, lambda: a < cat_rev)
+
+ # Categoricals can be compared to scalar values
+ res = cat_rev > base[0]
+ tm.assert_series_equal(res, exp)
+
+ # And test NaN handling...
+ cat = pd.Series(pd.Categorical(["a","b","c", np.nan]))
+ exp = Series([True, True, True, False])
+ res = (cat == cat)
+ tm.assert_series_equal(res, exp)
def test_concat(self):
cat = pd.Categorical(["a","b"], levels=["a","b"])
@@ -1558,6 +1921,16 @@ def f():
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
+ # make sure that fillna takes both missing values and NA levels into account
+ c = Categorical(["a","b",np.nan])
+ c.levels = ["a","b",np.nan]
+ c[0] = np.nan
+ df = pd.DataFrame({"cats":c, "vals":[1,2,3]})
+ df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]})
+ res = df.fillna("a")
+ tm.assert_frame_equal(res, df_exp)
+
+
def test_astype_to_other(self):
s = self.cat['value_group']
@@ -1607,6 +1980,18 @@ def test_numeric_like_ops(self):
# invalid ufunc
self.assertRaises(TypeError, lambda : np.log(s))
+ def test_cat_tab_completition(self):
+ # test the tab completion display
+ ok_for_cat = ['levels','ordered','reorder_levels','remove_unused_levels']
+ def get_dir(s):
+ results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]
+ return list(sorted(set(results)))
+
+ s = Series(list('aabbcde')).astype('category')
+ results = get_dir(s)
+ tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))
+
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| replaces #7768, #8006
closes #3678
Categorical: preserve ints when NaN are present
`Categorical([1, np.nan])` would end up with a single `1.` float level.
This commit ensures that if `values` is a list of ints and contains np.nan,
the float conversation does not take place.
Categorical: fix describe with np.nan
Categorical: ensure that one can assign np.nan
Categorical: fix assigning NaN if NaN in levels
API: change default Categorical.from_codes() to ordered=False
In the normal constructor `ordered=True` is only assumed if the levels
are given or the values are sortable (which is most of the cases), but
in `from_codes(...)` we can't asssume this so the default should be
`False`.
Categorical: add some links to Categorical in the other docs
Categorical: use s.values when calling private methods
s.values is the underlying Categorical object, s.cat will be changed
to only expose the API methods/properties.
Categorical: Change series.cat to only expose the API
Categorical: Fix order and na_position
Categorical: Fix comparison of Categoricals and Series|Categorical|np.array
Categorical can only be comapred to another Categorical with the same levels
and the same ordering or to a scalar value.
If the Categorical has no order defined (cat.ordered == False), only equal
(and not equal) are defined.
Categorical: Tab completition tests
Categorical: Fix for NA handling/float converting in levels
Categorical: make sure fillna gets both missing values and NA-levels
Categorical: add back labels as deprecated property
Categorical: Fix assigment to a Series(Categorical)) and remove Series.cat.codes
Categorical: declare most methods in Categorical NON-API
| https://api.github.com/repos/pandas-dev/pandas/pulls/8007 | 2014-08-12T18:14:31Z | 2014-08-19T13:58:05Z | 2014-08-19T13:58:05Z | 2014-08-29T18:41:49Z |
CLN/DOC/TST: categorical fixups (GH7768) | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 985f112979a7e..6424b82779f0f 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -66,7 +66,8 @@ Creating a ``DataFrame`` by passing a dict of objects that can be converted to s
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
- 'E' : 'foo' })
+ 'E' : pd.Categorical(["test","train","test","train"]),
+ 'F' : 'foo' })
df2
Having specific :ref:`dtypes <basics.dtypes>`
@@ -635,6 +636,32 @@ the quarter end:
ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
ts.head()
+Categoricals
+------------
+
+Since version 0.15, pandas can include categorical data in a `DataFrame`. For full docs, see the
+:ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>` .
+
+.. ipython:: python
+
+ df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
+
+ # convert the raw grades to a categorical
+ df["grade"] = pd.Categorical(df["raw_grade"])
+
+ # Alternative: df["grade"] = df["raw_grade"].astype("category")
+ df["grade"]
+
+ # Rename the levels
+ df["grade"].cat.levels = ["very good", "good", "very bad"]
+
+ # Reorder the levels and simultaneously add the missing levels
+ df["grade"].cat.reorder_levels(["very bad", "bad", "medium", "good", "very good"])
+ df["grade"]
+ df.sort("grade")
+ df.groupby("grade").size()
+
+
Plotting
--------
diff --git a/doc/source/api.rst b/doc/source/api.rst
index ec6e2aff870c6..158fe5624087e 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -528,11 +528,17 @@ and has the following usable methods and properties (all available as
:toctree: generated/
Categorical
- Categorical.from_codes
Categorical.levels
Categorical.ordered
Categorical.reorder_levels
Categorical.remove_unused_levels
+
+The following methods are considered API when using ``Categorical`` directly:
+
+.. autosummary::
+ :toctree: generated/
+
+ Categorical.from_codes
Categorical.min
Categorical.max
Categorical.mode
@@ -547,7 +553,7 @@ the Categorical back to a numpy array, so levels and order information is not pr
Categorical.__array__
To create compatibility with `pandas.Series` and `numpy` arrays, the following (non-API) methods
-are also introduced.
+are also introduced and available when ``Categorical`` is used directly.
.. autosummary::
:toctree: generated/
@@ -563,7 +569,8 @@ are also introduced.
Categorical.order
Categorical.argsort
Categorical.fillna
-
+ Categorical.notnull
+ Categorical.isnull
Plotting
~~~~~~~~
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index c08351eb87a79..95229c4bef3a8 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -90,6 +90,7 @@ By using some special functions:
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
df.head(10)
+See :ref:`documentation <reshaping.tile.cut>` for :func:`~pandas.cut`.
`Categoricals` have a specific ``category`` :ref:`dtype <basics.dtypes>`:
@@ -210,11 +211,9 @@ Renaming levels is done by assigning new values to the ``Category.levels`` or
Levels must be unique or a `ValueError` is raised:
.. ipython:: python
+ :okexcept:
- try:
- s.cat.levels = [1,1,1]
- except ValueError as e:
- print("ValueError: " + str(e))
+ s.cat.levels = [1,1,1]
Appending levels can be done by assigning a levels list longer than the current levels:
@@ -268,12 +267,11 @@ meaning and certain operations are possible. If the categorical is unordered, a
raised.
.. ipython:: python
+ :okexcept:
s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=False))
- try:
- s.sort()
- except TypeError as e:
- print("TypeError: " + str(e))
+ s.sort()
+
s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=True))
s.sort()
s
@@ -331,6 +329,44 @@ Operations
The following operations are possible with categorical data:
+Comparing `Categoricals` with other objects is possible in two cases:
+ * comparing a `Categorical` to another `Categorical`, when `level` and `ordered` is the same or
+ * comparing a `Categorical` to a scalar.
+All other comparisons will raise a TypeError.
+
+.. ipython:: python
+
+ cat = pd.Series(pd.Categorical([1,2,3], levels=[3,2,1]))
+ cat
+ cat_base = pd.Series(pd.Categorical([2,2,2], levels=[3,2,1]))
+ cat_base
+ cat_base2 = pd.Series(pd.Categorical([2,2,2]))
+ cat_base2
+
+ cat > cat_base
+ cat > 2
+
+This doesn't work because the levels are not the same
+
+.. ipython:: python
+ :okexcept:
+
+ cat > cat_base2
+
+.. note::
+
+ Comparisons with `Series`, `np.array` or a `Categorical` with different levels or ordering
+ will raise an `TypeError` because custom level ordering would result in two valid results:
+ one with taking in account the ordering and one without. If you want to compare a `Categorical`
+ with such a type, you need to be explicit and convert the `Categorical` to values:
+
+.. ipython:: python
+ :okexcept:
+
+ base = np.array([1,2,3])
+ cat > base
+ np.asarray(cat) > base
+
Getting the minimum and maximum, if the categorical is ordered:
.. ipython:: python
@@ -454,21 +490,22 @@ Setting values in a categorical column (or `Series`) works as long as the value
df.iloc[2:4,:] = [["b",2],["b",2]]
df
- try:
- df.iloc[2:4,:] = [["c",3],["c",3]]
- except ValueError as e:
- print("ValueError: " + str(e))
+
+The value is not included in the levels here.
+
+.. ipython:: python
+ :okexcept:
+
+ df.iloc[2:4,:] = [["c",3],["c",3]]
Setting values by assigning a `Categorical` will also check that the `levels` match:
.. ipython:: python
+ :okexcept:
df.loc["j":"k","cats"] = pd.Categorical(["a","a"], levels=["a","b"])
df
- try:
- df.loc["j":"k","cats"] = pd.Categorical(["b","b"], levels=["a","b","c"])
- except ValueError as e:
- print("ValueError: " + str(e))
+ df.loc["j":"k","cats"] = pd.Categorical(["b","b"], levels=["a","b","c"])
Assigning a `Categorical` to parts of a column of other types will use the values:
@@ -489,27 +526,30 @@ but the levels of these `Categoricals` need to be the same:
.. ipython:: python
- cat = pd.Categorical(["a","b"], levels=["a","b"])
- vals = [1,2]
- df = pd.DataFrame({"cats":cat, "vals":vals})
- res = pd.concat([df,df])
- res
- res.dtypes
+ cat = pd.Categorical(["a","b"], levels=["a","b"])
+ vals = [1,2]
+ df = pd.DataFrame({"cats":cat, "vals":vals})
+ res = pd.concat([df,df])
+ res
+ res.dtypes
- df_different = df.copy()
- df_different["cats"].cat.levels = ["a","b","c"]
+ df_different = df.copy()
+ df_different["cats"].cat.levels = ["a","b","c"]
- try:
- pd.concat([df,df])
- except ValueError as e:
- print("ValueError: " + str(e))
+These levels are not the same
+
+.. ipython:: python
+ :okexcept:
+
+ pd.concat([df,df])
The same applies to ``df.append(df)``.
Getting Data In/Out
-------------------
-Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype will currently raise ``NotImplementedError``.
+Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype will currently
+raise ``NotImplementedError``.
Writing to a CSV file will convert the data, effectively removing any information about the
`Categorical` (levels and ordering). So if you read back the CSV file you have to convert the
@@ -575,33 +615,26 @@ object and not as a low level `numpy` array dtype. This leads to some problems.
`numpy` itself doesn't know about the new `dtype`:
.. ipython:: python
+ :okexcept:
- try:
- np.dtype("category")
- except TypeError as e:
- print("TypeError: " + str(e))
+ np.dtype("category")
+ dtype = pd.Categorical(["a"]).dtype
+ np.dtype(dtype)
- dtype = pd.Categorical(["a"]).dtype
- try:
- np.dtype(dtype)
- except TypeError as e:
- print("TypeError: " + str(e))
-
- # dtype comparisons work:
- dtype == np.str_
- np.str_ == dtype
+ # dtype comparisons work:
+ dtype == np.str_
+ np.str_ == dtype
Using `numpy` functions on a `Series` of type ``category`` should not work as `Categoricals`
are not numeric data (even in the case that ``.levels`` is numeric).
.. ipython:: python
+ :okexcept:
- s = pd.Series(pd.Categorical([1,2,3,4]))
- try:
- np.sum(s)
- #same with np.log(s),..
- except TypeError as e:
- print("TypeError: " + str(e))
+ s = pd.Series(pd.Categorical([1,2,3,4]))
+
+ #same with np.log(s),..
+ np.sum(s)
.. note::
If such a function works, please file a bug at https://github.com/pydata/pandas!
@@ -647,14 +680,14 @@ Both `Series` and `Categorical` have a method ``.reorder_levels()`` but for diff
Series of type ``category`` this means that there is some danger to confuse both methods.
.. ipython:: python
+ :okexcept:
s = pd.Series(pd.Categorical([1,2,3,4]))
print(s.cat.levels)
+
# wrong and raises an error:
- try:
- s.reorder_levels([4,3,2,1])
- except Exception as e:
- print("Exception: " + str(e))
+ s.reorder_levels([4,3,2,1])
+
# right
s.cat.reorder_levels([4,3,2,1])
print(s.cat.levels)
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 92a35d0276e22..3d40be37dbbb3 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -503,3 +503,10 @@ handling of NaN:
pd.factorize(x, sort=True)
np.unique(x, return_inverse=True)[::-1]
+
+.. note::
+ If you just want to handle one column as a categorical variable (like R's factor),
+ you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or
+ ``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`,
+ see the :ref:`Categorical introduction <categorical>` and the
+ :ref:`API documentation <api.categorical>`. This feature was introduced in version 0.15.
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 322bcba9664d9..aa28796061599 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -226,7 +226,8 @@ Categoricals in Series/DataFrame
methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`,
:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`).
-For full docs, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`.
+For full docs, see the :ref:`Categorical introduction <categorical>` and the
+:ref:`API documentation <api.categorical>`.
.. ipython:: python
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index c9674aea4a715..43217f2abe240 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -2,12 +2,13 @@
import numpy as np
from warnings import warn
+import types
from pandas import compat
from pandas.compat import u
-from pandas.core.algorithms import factorize, unique
-from pandas.core.base import PandasObject
+from pandas.core.algorithms import factorize
+from pandas.core.base import PandasObject, PandasDelegate
from pandas.core.index import Index, _ensure_index
from pandas.core.indexing import _is_null_slice
from pandas.tseries.period import PeriodIndex
@@ -18,16 +19,36 @@
def _cat_compare_op(op):
def f(self, other):
- if isinstance(other, (Categorical, np.ndarray)):
- values = np.asarray(self)
- f = getattr(values, op)
- return f(np.asarray(other))
- else:
+ # On python2, you can usually compare any type to any type, and Categoricals can be
+ # seen as a custom type, but having different results depending whether a level are
+ # the same or not is kind of insane, so be a bit stricter here and use the python3 idea
+ # of comparing only things of equal type.
+ if not self.ordered:
+ if op in ['__lt__', '__gt__','__le__','__ge__']:
+ raise TypeError("Unordered Categoricals can only compare equality or not")
+ if isinstance(other, Categorical):
+ # Two Categoricals can only be be compared if the levels are the same
+ if (len(self.levels) != len(other.levels)) or not ((self.levels == other.levels).all()):
+ raise TypeError("Categoricals can only be compared if 'levels' are the same")
+ if not (self.ordered == other.ordered):
+ raise TypeError("Categoricals can only be compared if 'ordered' is the same")
+ na_mask = (self._codes == -1) | (other._codes == -1)
+ f = getattr(self._codes, op)
+ ret = f(other._codes)
+ if na_mask.any():
+ # In other series, the leads to False, so do that here too
+ ret[na_mask] = False
+ return ret
+ elif np.isscalar(other):
if other in self.levels:
i = self.levels.get_loc(other)
return getattr(self._codes, op)(i)
else:
return np.repeat(False, len(self))
+ else:
+ msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
+ "compare values, use 'np.asarray(cat) <op> other'."
+ raise TypeError(msg.format(op=op,typ=type(other)))
f.__name__ = op
@@ -109,9 +130,9 @@ class Categorical(PandasObject):
Attributes
----------
- levels : ndarray
+ levels : Index
The levels of this categorical
- codes : Index
+ codes : ndarray
The codes (integer positions, which point to the levels) of this categorical, read only
ordered : boolean
Whether or not this Categorical is ordered
@@ -171,6 +192,9 @@ class Categorical(PandasObject):
Categorical.max
"""
+ # For comparisons, so that numpy uses our implementation if the compare ops, which raise
+ __array_priority__ = 1000
+
def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False, compat=False):
if fastpath:
@@ -208,8 +232,23 @@ def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False,
# under certain versions of numpy as well
inferred = com._possibly_infer_to_datetimelike(values)
if not isinstance(inferred, np.ndarray):
+
+ # isnull doesn't work with generators/xrange, so convert all to lists
+ if com._is_sequence(values) or isinstance(values, types.GeneratorType):
+ values = list(values)
+ elif np.isscalar(values):
+ values = [values]
+
from pandas.core.series import _sanitize_array
- values = _sanitize_array(values, None)
+ # On list with NaNs, int values will be converted to float. Use "object" dtype
+ # to prevent this. In the end objects will be casted to int/... in the level
+ # assignment step.
+ # tuple are list_like but com.isnull(<tuple>) will return a single bool,
+ # which then raises an AttributeError: 'bool' object has no attribute 'any'
+ has_null = (com.is_list_like(values) and not isinstance(values, tuple)
+ and com.isnull(values).any())
+ dtype = 'object' if has_null else None
+ values = _sanitize_array(values, None, dtype=dtype)
if levels is None:
try:
@@ -277,7 +316,7 @@ def from_array(cls, data):
return Categorical(data)
@classmethod
- def from_codes(cls, codes, levels, ordered=True, name=None):
+ def from_codes(cls, codes, levels, ordered=False, name=None):
"""
Make a Categorical type from codes and levels arrays.
@@ -294,7 +333,7 @@ def from_codes(cls, codes, levels, ordered=True, name=None):
The levels for the categorical. Items need to be unique.
ordered : boolean, optional
Whether or not this categorical is treated as a ordered categorical. If not given,
- the resulting categorical will be ordered.
+ the resulting categorical will be unordered.
name : str, optional
Name for the Categorical variable.
"""
@@ -429,9 +468,13 @@ def __array__(self, dtype=None):
Returns
-------
values : numpy array
- A numpy array of the same dtype as categorical.levels.dtype
+ A numpy array of either the specified dtype or, if dtype==None (default), the same
+ dtype as categorical.levels.dtype
"""
- return com.take_1d(self.levels.values, self._codes)
+ ret = com.take_1d(self.levels.values, self._codes)
+ if dtype and dtype != self.levels.dtype:
+ return np.asarray(ret, dtype)
+ return ret
@property
def T(self):
@@ -503,10 +546,27 @@ def order(self, inplace=False, ascending=True, na_position='last', **kwargs):
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
- codes = np.sort(self._codes.copy())
+ codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
+ # NaN handling
+ na_mask = (codes==-1)
+ if na_mask.any():
+ n_nans = len(codes[na_mask])
+ if na_position=="first" and not ascending:
+ # in this case sort to the front
+ new_codes = codes.copy()
+ new_codes[0:n_nans] = -1
+ new_codes[n_nans:] = codes[~na_mask]
+ codes = new_codes
+ elif na_position=="last" and not ascending:
+ # ... and to the end
+ new_codes = codes.copy()
+ pos = len(codes)-n_nans
+ new_codes[0:pos] = codes[~na_mask]
+ new_codes[pos:] = -1
+ codes = new_codes
if inplace:
self._codes = codes
return
@@ -542,6 +602,32 @@ def sort(self, inplace=True, ascending=True, na_position='last', **kwargs):
"""
return self.order(inplace=inplace, ascending=ascending, **kwargs)
+ def isnull(self):
+ """
+ Returns
+ -------
+ a boolean array of whether my values are null
+
+ """
+
+ ret = self._codes == -1
+
+ # String/object and float levels can hold np.nan
+ if self.levels.dtype.kind in ['S', 'O', 'f']:
+ if np.nan in self.levels:
+ nan_pos = np.where(com.isnull(self.levels))
+ ret = ret | self == nan_pos
+ return ret
+
+ def notnull(self):
+ """
+ Returns
+ -------
+ a boolean array of whether my values are not null
+
+ """
+ return ~self.isnull()
+
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
@@ -760,7 +846,8 @@ def __setitem__(self, key, value):
rvalue = value if com.is_list_like(value) else [value]
to_add = Index(rvalue)-self.levels
- if len(to_add):
+ # no assignments of values not in levels, but it's always ok to set something to np.nan
+ if len(to_add) and not com.isnull(to_add).all():
raise ValueError("cannot setitem on a Categorical with a new level,"
" set the levels first")
@@ -786,6 +873,13 @@ def __setitem__(self, key, value):
key = self._codes[key]
lindexer = self.levels.get_indexer(rvalue)
+
+ # float levels do currently return -1 for np.nan, even if np.nan is included in the index
+ # "repair" this here
+ if com.isnull(rvalue).any() and com.isnull(self.levels).any():
+ nan_pos = np.where(com.isnull(self.levels))
+ lindexer[lindexer == -1] = nan_pos
+
self._codes[key] = lindexer
#### reduction ops ####
@@ -916,16 +1010,67 @@ def describe(self):
'values' : self._codes }
).groupby('codes').count()
- counts.index = self.levels.take(counts.index)
- counts = counts.reindex(self.levels)
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
result = concat([counts,freqs],axis=1)
- result.index.name = 'levels'
result.columns = ['counts','freqs']
+
+ # fill in the real levels
+ check = result.index == -1
+ if check.any():
+ # Sort -1 (=NaN) to the last position
+ index = np.arange(0, len(self.levels)+1)
+ index[-1] = -1
+ result = result.reindex(index)
+ # build new index
+ levels = np.arange(0,len(self.levels)+1 ,dtype=object)
+ levels[:-1] = self.levels
+ levels[-1] = np.nan
+ result.index = levels.take(result.index)
+ else:
+ result.index = self.levels.take(result.index)
+ result = result.reindex(self.levels)
+ result.index.name = 'levels'
+
return result
+##### The Series.cat accessor #####
+
+class CategoricalProperties(PandasDelegate):
+ """
+ Accessor object for categorical properties of the Series values.
+
+ Examples
+ --------
+ >>> s.cat.levels
+ >>> s.cat.levels = list('abc')
+ >>> s.cat.reorder_levels('cab')
+
+ Allows accessing to specific getter and access methods
+ """
+
+ def __init__(self, values, index):
+ self.categorical = values
+ self.index = index
+
+ def _delegate_property_get(self, name):
+ return getattr(self.categorical, name)
+
+ def _delegate_property_set(self, name, new_values):
+ return setattr(self.categorical, name, new_values)
+
+ def _delegate_method(self, name, *args, **kwargs):
+ method = getattr(self.categorical, name)
+ return method(*args, **kwargs)
+
+CategoricalProperties._add_delegate_accessors(delegate=Categorical,
+ accessors=["levels", "codes", "ordered"],
+ typ='property')
+CategoricalProperties._add_delegate_accessors(delegate=Categorical,
+ accessors=["reorder_levels", "remove_unused_levels"],
+ typ='method')
+
##### utility routines #####
def _get_codes_for_values(values, levels):
diff --git a/pandas/core/common.py b/pandas/core/common.py
index bc4c95ed3323e..8ec47a94d3c73 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -276,15 +276,22 @@ def _isnull_ndarraylike(obj):
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
- # Working around NumPy ticket 1542
- shape = values.shape
-
- if dtype.kind in ('S', 'U'):
- result = np.zeros(values.shape, dtype=bool)
+ if is_categorical_dtype(values):
+ from pandas import Categorical
+ if not isinstance(values, Categorical):
+ values = values.values
+ result = values.isnull()
else:
- result = np.empty(shape, dtype=bool)
- vec = lib.isnullobj(values.ravel())
- result[...] = vec.reshape(shape)
+
+ # Working around NumPy ticket 1542
+ shape = values.shape
+
+ if dtype.kind in ('S', 'U'):
+ result = np.zeros(values.shape, dtype=bool)
+ else:
+ result = np.empty(shape, dtype=bool)
+ vec = lib.isnullobj(values.ravel())
+ result[...] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
@@ -299,7 +306,6 @@ def _isnull_ndarraylike(obj):
return result
-
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
@@ -2448,7 +2454,7 @@ def _get_callable_name(obj):
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
-
+
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 8f749d07296a7..0539d803a42a4 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -177,7 +177,7 @@ def _get_footer(self):
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if self.name and com.is_categorical_dtype(self.series.dtype):
- level_info = self.series.cat._repr_level_info()
+ level_info = self.series.values._repr_level_info()
if footer:
footer += "\n"
footer += level_info
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 9f29570af6f4f..de3b8d857617f 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -524,6 +524,10 @@ def _comp_method_SERIES(op, name, str_rep, masker=False):
code duplication.
"""
def na_op(x, y):
+ if com.is_categorical_dtype(x) != com.is_categorical_dtype(y):
+ msg = "Cannot compare a Categorical for op {op} with type {typ}. If you want to \n" \
+ "compare values, use 'series <op> np.asarray(cat)'."
+ raise TypeError(msg.format(op=op,typ=type(y)))
if x.dtype == np.object_:
if isinstance(y, list):
y = lib.list_to_object_array(y)
@@ -555,11 +559,16 @@ def wrapper(self, other):
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
- elif isinstance(other, (pa.Array, pd.Series, pd.Index)):
+ elif isinstance(other, (pa.Array, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
+ elif isinstance(other, pd.Categorical):
+ if not com.is_categorical_dtype(self):
+ msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
+ "If you want to compare values, use 'series <op> np.asarray(other)'."
+ raise TypeError(msg.format(op=op,typ=self.dtype))
else:
mask = isnull(self)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5a490992c478c..ef6bdf99915b1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -900,7 +900,7 @@ def _repr_footer(self):
# Categorical
if com.is_categorical_dtype(self.dtype):
- level_info = self.cat._repr_level_info()
+ level_info = self.values._repr_level_info()
return u('%sLength: %d, dtype: %s\n%s') % (namestr,
len(self),
str(self.dtype.name),
@@ -2415,11 +2415,12 @@ def dt(self):
#------------------------------------------------------------------------------
# Categorical methods
- @property
+ @cache_readonly
def cat(self):
+ from pandas.core.categorical import CategoricalProperties
if not com.is_categorical_dtype(self.dtype):
raise TypeError("Can only use .cat accessor with a 'category' dtype")
- return self.values
+ return CategoricalProperties(self.values, self.index)
Series._setup_axes(['index'], info_axis=0, stat_axis=0,
aliases={'rows': 0})
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 421e05f5a3bc7..dbe7aad723ee7 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -111,6 +111,50 @@ def test_constructor(self):
cat = pd.Categorical([1,2,3,np.nan], levels=[1,2,3])
self.assertTrue(com.is_integer_dtype(cat.levels))
+ # https://github.com/pydata/pandas/issues/3678
+ cat = pd.Categorical([np.nan,1, 2, 3])
+ self.assertTrue(com.is_integer_dtype(cat.levels))
+
+ # this should result in floats
+ cat = pd.Categorical([np.nan, 1, 2., 3 ])
+ self.assertTrue(com.is_float_dtype(cat.levels))
+
+ cat = pd.Categorical([np.nan, 1., 2., 3. ])
+ self.assertTrue(com.is_float_dtype(cat.levels))
+
+ # corner cases
+ cat = pd.Categorical([1])
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == 1)
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ cat = pd.Categorical(["a"])
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == "a")
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+ # Scalars should be converted to lists
+ cat = pd.Categorical(1)
+ self.assertTrue(len(cat.levels) == 1)
+ self.assertTrue(cat.levels[0] == 1)
+ self.assertTrue(len(cat.codes) == 1)
+ self.assertTrue(cat.codes[0] == 0)
+
+
+ def test_constructor_with_generator(self):
+ # This was raising an Error in isnull(single_val).any() because isnull returned a scalar
+ # for a generator
+
+ a = (a for x in [1,2])
+ cat = Categorical(a)
+
+ # This does actually a xrange, which is a sequence instead of a generator
+ from pandas.core.index import MultiIndex
+ MultiIndex.from_product([range(5), ['a', 'b', 'c']])
+
+
def test_from_codes(self):
# too few levels
@@ -134,7 +178,7 @@ def f():
self.assertRaises(ValueError, f)
- exp = Categorical(["a","b","c"])
+ exp = Categorical(["a","b","c"], ordered=False)
res = Categorical.from_codes([0,1,2], ["a","b","c"])
self.assertTrue(exp.equals(res))
@@ -179,6 +223,63 @@ def test_comparisons(self):
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
+ # comparisons with categoricals
+ cat_rev = pd.Categorical(["a","b","c"], levels=["c","b","a"])
+ cat_rev_base = pd.Categorical(["b","b","b"], levels=["c","b","a"])
+ cat = pd.Categorical(["a","b","c"])
+ cat_base = pd.Categorical(["b","b","b"], levels=cat.levels)
+
+ # comparisons need to take level ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = np.array([True, False, False])
+ self.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = np.array([False, False, True])
+ self.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = np.array([False, False, True])
+ self.assert_numpy_array_equal(res, exp)
+
+ # Only categories with same levels can be compared
+ def f():
+ cat > cat_rev
+ self.assertRaises(TypeError, f)
+
+ cat_rev_base2 = pd.Categorical(["b","b","b"], levels=["c","b","a","d"])
+ def f():
+ cat_rev > cat_rev_base2
+ self.assertRaises(TypeError, f)
+
+ # Only categories with same ordering information can be compared
+ cat_unorderd = cat.copy()
+ cat_unorderd.ordered = False
+ self.assertFalse((cat > cat).any())
+ def f():
+ cat > cat_unorderd
+ self.assertRaises(TypeError, f)
+
+ # comparison (in both directions) with Series will raise
+ s = Series(["b","b","b"])
+ self.assertRaises(TypeError, lambda: cat > s)
+ self.assertRaises(TypeError, lambda: cat_rev > s)
+ self.assertRaises(TypeError, lambda: s < cat)
+ self.assertRaises(TypeError, lambda: s < cat_rev)
+
+ # comparison with numpy.array will raise in both direction, but only on newer
+ # numpy versions
+ a = np.array(["b","b","b"])
+ self.assertRaises(TypeError, lambda: cat > a)
+ self.assertRaises(TypeError, lambda: cat_rev > a)
+
+ # The following work via '__array_priority__ = 1000'
+ # and py3_2 is not friendly
+ tm._skip_if_not_numpy17_friendly()
+ if not compat.PY3_2:
+ self.assertRaises(TypeError, lambda: a < cat)
+ self.assertRaises(TypeError, lambda: a < cat_rev)
+
def test_na_flags_int_levels(self):
# #1457
@@ -205,6 +306,16 @@ def test_describe(self):
).set_index('levels')
tm.assert_frame_equal(desc, expected)
+ # check unused levels
+ cat = self.factor.copy()
+ cat.levels = ["a","b","c","d"]
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[3, 2, 3, np.nan],
+ freqs=[3/8., 2/8., 3/8., np.nan],
+ levels=['a', 'b', 'c', 'd'])
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
@@ -214,6 +325,47 @@ def test_describe(self):
).set_index('levels')
tm.assert_frame_equal(desc, expected)
+ # https://github.com/pydata/pandas/issues/3678
+ # describe should work with NaN
+ cat = pd.Categorical([np.nan,1, 2, 2])
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[1, 2, 1],
+ freqs=[1/4., 2/4., 1/4.],
+ levels=[1,2,np.nan]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # having NaN as level and as "not available" should also print two NaNs in describe!
+ cat = pd.Categorical([np.nan,1, 2, 2])
+ cat.levels = [1,2,np.nan]
+ desc = cat.describe()
+ expected = DataFrame.from_dict(dict(counts=[1, 2, np.nan, 1],
+ freqs=[1/4., 2/4., np.nan, 1/4.],
+ levels=[1,2,np.nan,np.nan]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # empty levels show up as NA
+ cat = Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True)
+ result = cat.describe()
+
+ expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]],
+ columns=['counts','freqs'],
+ index=Index(['a','b','c'],name='levels'))
+ tm.assert_frame_equal(result,expected)
+
+ # NA as a level
+ cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] )
+ result = cat.describe()
+
+ expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]],
+ columns=['counts','freqs'],
+ index=Index(['b','a','c',np.nan],name='levels'))
+ tm.assert_frame_equal(result,expected)
+
+
def test_print(self):
expected = [" a", " b", " b", " a", " a", " c", " c", " c",
"Levels (3, object): [a < b < c]"]
@@ -496,6 +648,44 @@ def test_slicing_directly(self):
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.levels, expected.levels)
+ def test_set_item_nan(self):
+ cat = pd.Categorical([1,2,3])
+ exp = pd.Categorical([1,np.nan,3], levels=[1,2,3])
+ cat[1] = np.nan
+ self.assertTrue(cat.equals(exp))
+
+ # if nan in levels, the proper code should be set!
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1] = np.nan
+ exp = np.array([0,3,2,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = np.nan
+ exp = np.array([0,3,3,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = [np.nan, 1]
+ exp = np.array([0,3,0,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[1:3] = [np.nan, np.nan]
+ exp = np.array([0,3,3,-1])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+ cat = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
+ cat.levels = [1,2,3, np.nan]
+ cat[pd.isnull(cat)] = np.nan
+ exp = np.array([0,1,2,3])
+ self.assert_numpy_array_equal(cat.codes, exp)
+
+
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
@@ -616,7 +806,7 @@ def test_sideeffects_free(self):
# so this WILL change values
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat)
- self.assertTrue(s.cat is cat)
+ self.assertTrue(s.values is cat)
s.cat.levels = [1,2,3]
exp_s = np.array([1,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
@@ -632,20 +822,20 @@ def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.levels, np.array(["a","b"]))
- self.assert_numpy_array_equal(s.cat._codes, np.array([0,1,-1,0]))
+ self.assert_numpy_array_equal(s.cat.codes, np.array([0,1,-1,0]))
# If levels have nan included, the label should point to that instead
s2 = Series(Categorical(["a","b",np.nan,"a"], levels=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.levels,
np.array(["a","b",np.nan], dtype=np.object_))
- self.assert_numpy_array_equal(s2.cat._codes, np.array([0,1,2,0]))
+ self.assert_numpy_array_equal(s2.cat.codes, np.array([0,1,2,0]))
# Changing levels should also make the replaced level np.nan
s3 = Series(Categorical(["a","b","c","a"]))
s3.cat.levels = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.levels,
np.array(["a","b",np.nan], dtype=np.object_))
- self.assert_numpy_array_equal(s3.cat._codes, np.array([0,1,2,0]))
+ self.assert_numpy_array_equal(s3.cat.codes, np.array([0,1,2,0]))
def test_sequence_like(self):
@@ -655,8 +845,8 @@ def test_sequence_like(self):
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
- result = list(df.grade.cat)
- expected = np.array(df.grade.cat).tolist()
+ result = list(df.grade.values)
+ expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result,expected)
# iteration
@@ -698,7 +888,7 @@ def test_series_delegations(self):
exp_values = np.array(["a","b","c","a"])
s.cat.reorder_levels(["c","b","a"])
self.assert_numpy_array_equal(s.cat.levels, exp_levels)
- self.assert_numpy_array_equal(s.cat.__array__(), exp_values)
+ self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused levels
@@ -707,7 +897,7 @@ def test_series_delegations(self):
exp_values = np.array(["a","b","b","a"])
s.cat.remove_unused_levels()
self.assert_numpy_array_equal(s.cat.levels, exp_levels)
- self.assert_numpy_array_equal(s.cat.__array__(), exp_values)
+ self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error on wrong inputs:
@@ -716,6 +906,16 @@ def f():
self.assertRaises(Exception, f)
# right: s.cat.reorder_levels([4,3,2,1])
+ # test the tab completion display
+ ok_for_cat = ['levels','codes','ordered','reorder_levels','remove_unused_levels']
+ def get_dir(s):
+ results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]
+ return list(sorted(set(results)))
+
+ s = Series(list('aabbcde')).astype('category')
+ results = get_dir(s)
+ tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))
+
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
@@ -766,31 +966,16 @@ def test_describe(self):
result = self.cat.describe()
self.assertEquals(len(result.columns),1)
- # empty levels show up as NA
- s = Series(Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True))
- result = s.cat.describe()
- expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]],
- columns=['counts','freqs'],
- index=Index(['a','b','c'],name='levels'))
- tm.assert_frame_equal(result,expected)
+ # In a frame, describe() for the cat should be the same as for string arrays (count, unique,
+ # top, freq)
+ cat = Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True)
+ s = Series(cat)
result = s.describe()
expected = Series([4,2,"b",3],index=['count','unique','top', 'freq'])
tm.assert_series_equal(result,expected)
- # NA as a level
- cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] )
- result = cat.describe()
-
- expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]],
- columns=['counts','freqs'],
- index=Index(['b','a','c',np.nan],name='levels'))
- tm.assert_frame_equal(result,expected)
-
-
- # In a frame, describe() for the cat should be the same as for string arrays (count, unique,
- # top, freq)
cat = pd.Series(pd.Categorical(["a","b","c","c"]))
df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]})
res = df3.describe()
@@ -970,7 +1155,7 @@ def test_sort(self):
# Cats must be sorted in a dataframe
res = df.sort(columns=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
- self.assert_numpy_array_equal(res["sort"].cat.__array__(), exp)
+ self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort(columns=["sort"], ascending=False)
@@ -1013,17 +1198,29 @@ def f():
res = cat.order(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_levels = np.array(["a","b","c","d"],dtype=object)
- # FIXME: IndexError: Out of bounds on buffer access (axis 0)
- #self.assert_numpy_array_equal(res.__array__(), exp_val)
- #self.assert_numpy_array_equal(res.levels, exp_levels)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
+
+ cat = Categorical(["a","c","b","d", np.nan], ordered=True)
+ res = cat.order(ascending=False, na_position='first')
+ exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
+ exp_levels = np.array(["a","b","c","d"],dtype=object)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_levels = np.array(["a","b","c","d"],dtype=object)
- # FIXME: IndexError: Out of bounds on buffer access (axis 0)
- #self.assert_numpy_array_equal(res.__array__(), exp_val)
- #self.assert_numpy_array_equal(res.levels, exp_levels)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
+
+ cat = Categorical(["a","c","b","d", np.nan], ordered=True)
+ res = cat.order(ascending=False, na_position='last')
+ exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
+ exp_levels = np.array(["a","b","c","d"],dtype=object)
+ self.assert_numpy_array_equal(res.__array__(), exp_val)
+ self.assert_numpy_array_equal(res.levels, exp_levels)
def test_slicing(self):
cat = Series(Categorical([1,2,3,4]))
@@ -1473,6 +1670,63 @@ def f():
df.loc[2:3,"b"] = pd.Categorical(["b","b"], levels=["a","b"])
tm.assert_frame_equal(df, exp)
+ # ensure that one can set something to np.nan
+ s = Series(Categorical([1,2,3]))
+ exp = Series(Categorical([1,np.nan,3]))
+ s[1] = np.nan
+ tm.assert_series_equal(s, exp)
+
+ def test_comparisons(self):
+ tests_data = [(list("abc"), list("cba"), list("bbb")),
+ ([1,2,3], [3,2,1], [2,2,2])]
+ for data , reverse, base in tests_data:
+ cat_rev = pd.Series(pd.Categorical(data, levels=reverse))
+ cat_rev_base = pd.Series(pd.Categorical(base, levels=reverse))
+ cat = pd.Series(pd.Categorical(data))
+ cat_base = pd.Series(pd.Categorical(base, levels=cat.cat.levels))
+ s = Series(base)
+ a = np.array(base)
+
+ # comparisons need to take level ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = Series([True, False, False])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = Series([False, False, True])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = Series([False, False, True])
+ tm.assert_series_equal(res, exp)
+
+ # Only categories with same levels can be compared
+ def f():
+ cat > cat_rev
+ self.assertRaises(TypeError, f)
+
+ # categorical cannot be compared to Series or numpy array, and also not the other way
+ # around
+ self.assertRaises(TypeError, lambda: cat > s)
+ self.assertRaises(TypeError, lambda: cat_rev > s)
+ self.assertRaises(TypeError, lambda: cat > a)
+ self.assertRaises(TypeError, lambda: cat_rev > a)
+
+ self.assertRaises(TypeError, lambda: s < cat)
+ self.assertRaises(TypeError, lambda: s < cat_rev)
+
+ self.assertRaises(TypeError, lambda: a < cat)
+ self.assertRaises(TypeError, lambda: a < cat_rev)
+
+ # Categoricals can be compared to scalar values
+ res = cat_rev > base[0]
+ tm.assert_series_equal(res, exp)
+
+ # And test NaN handling...
+ cat = pd.Series(pd.Categorical(["a","b","c", np.nan]))
+ exp = Series([True, True, True, False])
+ res = (cat == cat)
+ tm.assert_series_equal(res, exp)
def test_concat(self):
cat = pd.Categorical(["a","b"], levels=["a","b"])
| replaces #7768
closes #3678
Categorical([1, np.nan]) would end up with a single `1.` float level.
This commit ensures that if `values` is a list of ints and contains np.nan,
the float conversation does not take place.
Categorical: fix describe with np.nan
Categorical: ensure that one can assign np.nan
Categorical: fix assigning NaN if NaN in levels
API: change default Categorical.from_codes() to ordered=False
In the normal constructor `ordered=True` is only assumed if the levels
are given or the values are sortable (which is most of the cases), but
in `from_codes(...)` we can't asssume this so the default should be
`False`.
Categorical: add some links to Categorical in the other docs
Categorical: use s.values when calling private methods
s.values is the underlying Categorical object, s.cat will be changed
to only expose the API methods/properties.
Categorical: Change series.cat to only expose the API
Categorical: Fix order and na_position
Categorical: Fix comparison of Categoricals and Series|Categorical|np.array
Categorical can only be comapred to another Categorical with the same levels
and the same ordering or to a scalar value.
If the Categorical has no order defined (cat.ordered == False), only equal
(and not equal) are defined.
DOC: use okexcept in docs rather than try: except:
CLN: revised isnull treatement
TST: test for tab completion
| https://api.github.com/repos/pandas-dev/pandas/pulls/8006 | 2014-08-12T16:18:20Z | 2014-08-12T18:14:25Z | null | 2014-08-12T18:15:28Z |
API: consistency in .loc indexing when no values are found in a list-like indexer GH7999) | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 25233d970b3a6..e5c58bf817a36 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -282,7 +282,7 @@ Selection By Label
See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`
pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol.
-**ALL** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, the start bound is *included*, **AND** the stop bound is *included*. Integers are valid labels, but they refer to the label **and not the position**.
+**at least 1** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, the start bound is *included*, **AND** the stop bound is *included*. Integers are valid labels, but they refer to the label **and not the position**.
The ``.loc`` attribute is the primary access method. The following are valid inputs:
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 322bcba9664d9..0cbcff9670379 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -172,6 +172,50 @@ API changes
as the ``left`` argument. (:issue:`7737`)
- Histogram from ``DataFrame.plot`` with ``kind='hist'`` (:issue:`7809`), See :ref:`the docs<visualization.hist>`.
+- Consistency when indexing with ``.loc`` and a list-like indexer when no values are found.
+
+ .. ipython:: python
+
+ df = DataFrame([['a'],['b']],index=[1,2])
+ df
+
+ In prior versions there was a difference in these two constructs:
+
+ - ``df.loc[[3]]`` would (prior to 0.15.0) return a frame reindexed by 3 (with all ``np.nan`` values)
+ - ``df.loc[[3],:]`` would raise ``KeyError``.
+
+ Both will now raise a ``KeyError``. The rule is that *at least 1* indexer must be found when using a list-like and ``.loc`` (:issue:`7999`)
+
+ There was also a difference between ``df.loc[[1,3]]`` (returns a frame reindexed by ``[1, 3]``) and ``df.loc[[1, 3],:]`` (would raise ``KeyError`` prior to 0.15.0). Both will now return a reindexed frame.
+
+ .. ipython:: python
+
+ df.loc[[1,3]]
+ df.loc[[1,3],:]
+
+ This can also be seen in multi-axis indexing with a ``Panel``.
+
+ .. ipython:: python
+
+ p = Panel(np.arange(2*3*4).reshape(2,3,4),
+ items=['ItemA','ItemB'],major_axis=[1,2,3],minor_axis=['A','B','C','D'])
+ p
+
+ The following would raise ``KeyError`` prior to 0.15.0:
+
+ .. ipython:: python
+
+ p.loc[['ItemA','ItemD'],:,'D']
+
+ Furthermore, ``.loc`` will raise If no values are found in a multi-index with a list-like indexer:
+
+ .. ipython:: python
+ :okexcept:
+
+ s = Series(np.arange(3,dtype='int64'),index=MultiIndex.from_product([['A'],['foo','bar','baz']],
+ names=['one','two'])).sortlevel()
+ s
+ s.loc[['D']]
.. _whatsnew_0150.dt:
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 6ee03eab4bab8..dfc552e8df0d7 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -132,6 +132,16 @@ def _has_valid_tuple(self, key):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
+ def _should_validate_iterable(self, axis=0):
+ """ return a boolean whether this axes needs validation for a passed iterable """
+ ax = self.obj._get_axis(axis)
+ if isinstance(ax, MultiIndex):
+ return False
+ elif ax.is_floating():
+ return False
+
+ return True
+
def _is_nested_tuple_indexer(self, tup):
if any([ isinstance(ax, MultiIndex) for ax in self.obj.axes ]):
return any([ _is_nested_tuple(tup,ax) for ax in self.obj.axes ])
@@ -762,7 +772,7 @@ def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
- return self._getitem_axis(tup, axis=axis, validate_iterable=True)
+ return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
@@ -825,7 +835,7 @@ def _getitem_nested_tuple(self, tup):
return result
# this is a series with a multi-index specified a tuple of selectors
- return self._getitem_axis(tup, axis=0, validate_iterable=True)
+ return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
@@ -838,7 +848,7 @@ def _getitem_nested_tuple(self, tup):
continue
current_ndim = obj.ndim
- obj = getattr(obj, self.name)._getitem_axis(key, axis=axis, validate_iterable=True)
+ obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
@@ -859,9 +869,11 @@ def _getitem_nested_tuple(self, tup):
return obj
- def _getitem_axis(self, key, axis=0, validate_iterable=False):
+ def _getitem_axis(self, key, axis=0):
+
+ if self._should_validate_iterable(axis):
+ self._has_valid_type(key, axis)
- self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
@@ -888,17 +900,29 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False):
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
+ if self._should_validate_iterable(axis):
+ self._has_valid_type(key, axis)
+
labels = self.obj._get_axis(axis)
def _reindex(keys, level=None):
+
try:
- return self.obj.reindex_axis(keys, axis=axis, level=level)
+ result = self.obj.reindex_axis(keys, axis=axis, level=level)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keys, level=level)
+ # this is an error as we are trying to find
+ # keys in a multi-index that don't exist
+ if isinstance(labels, MultiIndex) and level is not None:
+ if hasattr(result,'ndim') and not np.prod(result.shape) and len(keys):
+ raise KeyError("cannot index a multi-index axis with these keys")
+
+ return result
+
if com._is_bool_indexer(key):
key = _check_bool_indexer(labels, key)
inds, = key.nonzero()
@@ -1149,7 +1173,7 @@ def __getitem__(self, key):
else:
return self._getitem_axis(key, axis=0)
- def _getitem_axis(self, key, axis=0, validate_iterable=False):
+ def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
@@ -1223,11 +1247,11 @@ def _has_valid_type(self, key, axis):
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
- # require all elements in the index
+ # require at least 1 element in the index
idx = _ensure_index(key)
- if not idx.isin(ax).all():
+ if len(idx) and not idx.isin(ax).any():
- raise KeyError("[%s] are not in ALL in the [%s]" %
+ raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
@@ -1256,7 +1280,7 @@ def error():
return True
- def _getitem_axis(self, key, axis=0, validate_iterable=False):
+ def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
@@ -1280,9 +1304,6 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
- if validate_iterable:
- self._has_valid_type(key, axis)
-
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
@@ -1389,7 +1410,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
- def _getitem_axis(self, key, axis=0, validate_iterable=False):
+ def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 3552c75900745..e7bb716de60f3 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -743,11 +743,14 @@ def test_loc_getitem_label_list(self):
self.check_result('list lbl', 'loc', [Timestamp('20130102'),Timestamp('20130103')], 'ix',
[Timestamp('20130102'),Timestamp('20130103')], typs = ['ts'], axes=0)
- # fails
self.check_result('list lbl', 'loc', [0,1,2], 'indexer', [0,1,2], typs = ['empty'], fails = KeyError)
self.check_result('list lbl', 'loc', [0,2,3], 'ix', [0,2,3], typs = ['ints'], axes=0, fails = KeyError)
- self.check_result('list lbl', 'loc', [3,6,7], 'ix', [3,6,9], typs = ['ints'], axes=1, fails = KeyError)
- self.check_result('list lbl', 'loc', [4,8,10], 'ix', [4,8,12], typs = ['ints'], axes=2, fails = KeyError)
+ self.check_result('list lbl', 'loc', [3,6,7], 'ix', [3,6,7], typs = ['ints'], axes=1, fails = KeyError)
+ self.check_result('list lbl', 'loc', [4,8,10], 'ix', [4,8,10], typs = ['ints'], axes=2, fails = KeyError)
+
+ # fails
+ self.check_result('list lbl', 'loc', [20,30,40], 'ix', [20,30,40], typs = ['ints'], axes=1, fails = KeyError)
+ self.check_result('list lbl', 'loc', [20,30,40], 'ix', [20,30,40], typs = ['ints'], axes=2, fails = KeyError)
# array like
self.check_result('array like', 'loc', Series(index=[0,2,4]).index, 'ix', [0,2,4], typs = ['ints'], axes=0)
@@ -815,14 +818,9 @@ def test_loc_to_fail(self):
s.loc['a'] = 2
self.assertRaises(KeyError, lambda : s.loc[-1])
+ self.assertRaises(KeyError, lambda : s.loc[[-1, -2]])
- result = s.loc[[-1, -2]]
- expected = Series(np.nan,index=[-1,-2])
- assert_series_equal(result, expected)
-
- result = s.loc[['4']]
- expected = Series(np.nan,index=['4'])
- assert_series_equal(result, expected)
+ self.assertRaises(KeyError, lambda : s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1,-2]]
@@ -830,15 +828,25 @@ def test_loc_to_fail(self):
assert_series_equal(result, expected)
s['a'] = 2
- result = s.loc[[-2]]
- expected = Series([np.nan],index=[-2])
- assert_series_equal(result, expected)
+ self.assertRaises(KeyError, lambda : s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
+ # inconsistency between .loc[values] and .loc[values,:]
+ # GH 7999
+ df = DataFrame([['a'],['b']],index=[1,2],columns=['value'])
+
+ def f():
+ df.loc[[3],:]
+ self.assertRaises(KeyError, f)
+
+ def f():
+ df.loc[[3]]
+ self.assertRaises(KeyError, f)
+
def test_loc_getitem_label_slice(self):
# label slices (with ints)
@@ -1575,11 +1583,13 @@ def f():
self.assertRaises(ValueError, f)
# ambiguous cases
- # these can be multiply interpreted
- # but we can catch this in some cases
- def f():
- df.loc[(slice(None),[1])]
- self.assertRaises(KeyError, f)
+ # these can be multiply interpreted (e.g. in this case
+ # as df.loc[slice(None),[1]] as well
+ self.assertRaises(KeyError, lambda : df.loc[slice(None),[1]])
+
+ result = df.loc[(slice(None),[1]),:]
+ expected = df.iloc[[0,3]]
+ assert_frame_equal(result, expected)
# not lexsorted
self.assertEqual(df.index.lexsort_depth,2)
@@ -1960,9 +1970,12 @@ def f():
result = s.loc[['A','D']]
assert_series_equal(result,expected)
- # empty series
- result = s.loc[['D']]
- expected = s.loc[[]]
+ # not any values found
+ self.assertRaises(KeyError, lambda : s.loc[['D']])
+
+ # empty ok
+ result = s.loc[[]]
+ expected = s.iloc[[]]
assert_series_equal(result,expected)
idx = pd.IndexSlice
@@ -2788,9 +2801,8 @@ def test_series_partial_set(self):
result = ser.loc[[3, 2, 3]]
assert_series_equal(result, expected)
- expected = Series([np.nan, np.nan, np.nan], index=[3, 3, 3])
- result = ser.loc[[3, 3, 3]]
- assert_series_equal(result, expected)
+ # raises as nothing in in the index
+ self.assertRaises(KeyError, lambda : ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
| closes #7999
| https://api.github.com/repos/pandas-dev/pandas/pulls/8003 | 2014-08-12T15:22:40Z | 2014-08-14T17:13:13Z | 2014-08-14T17:13:12Z | 2014-08-14T17:13:13Z |
ENH/CLN: add BoxPlot class inheriting MPLPlot | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 4a39dd73da7d0..95bf2918f8992 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -216,6 +216,7 @@ API changes
as the ``left`` argument. (:issue:`7737`)
- Histogram from ``DataFrame.plot`` with ``kind='hist'`` (:issue:`7809`), See :ref:`the docs<visualization.hist>`.
+- Boxplot from ``DataFrame.plot`` with ``kind='box'`` (:issue:`7998`), See :ref:`the docs<visualization.box>`.
- Consistency when indexing with ``.loc`` and a list-like indexer when no values are found.
.. ipython:: python
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index d517e08a34b2d..1cce55cd53e11 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -124,6 +124,7 @@ These include:
* :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots
* :ref:`'hist' <visualization.hist>` for histogram
+* :ref:`'box' <visualization.box>` for boxplot
* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots
* :ref:`'area' <visualization.area_plot>` for area plots
* :ref:`'scatter' <visualization.scatter>` for scatter plots
@@ -244,7 +245,7 @@ See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the
`matplotlib hist documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
-The previous interface ``DataFrame.hist`` to plot histogram still can be used.
+The existing interface ``DataFrame.hist`` to plot histogram still can be used.
.. ipython:: python
@@ -288,12 +289,65 @@ The ``by`` keyword can be specified to plot grouped histograms:
Box Plots
~~~~~~~~~
-DataFrame has a :meth:`~DataFrame.boxplot` method that allows you to visualize the
-distribution of values within each column.
+Boxplot can be drawn calling a ``Series`` and ``DataFrame.plot`` with ``kind='box'``,
+or ``DataFrame.boxplot`` to visualize the distribution of values within each column.
+
+.. versionadded:: 0.15.0
+
+``plot`` method now supports ``kind='box'`` to draw boxplot.
For instance, here is a boxplot representing five trials of 10 observations of
a uniform random variable on [0,1).
+.. ipython:: python
+ :suppress:
+
+ np.random.seed(123456)
+
+.. ipython:: python
+
+ df = DataFrame(rand(10, 5), columns=['A', 'B', 'C', 'D', 'E'])
+
+ @savefig box_plot_new.png
+ df.plot(kind='box')
+
+Boxplot can be colorized by passing ``color`` keyword. You can pass a ``dict``
+whose keys are ``boxes``, ``whiskers``, ``medians`` and ``caps``.
+If some keys are missing in the ``dict``, default colors are used
+for the corresponding artists. Also, boxplot has ``sym`` keyword to specify fliers style.
+
+When you pass other type of arguments via ``color`` keyword, it will be directly
+passed to matplotlib for all the ``boxes``, ``whiskers``, ``medians`` and ``caps``
+colorization.
+
+The colors are applied to every boxes to be drawn. If you want
+more complicated colorization, you can get each drawn artists by passing
+:ref:`return_type <visualization.box.return>`.
+
+.. ipython:: python
+
+ color = dict(boxes='DarkGreen', whiskers='DarkOrange',
+ medians='DarkBlue', caps='Gray')
+
+ @savefig box_new_colorize.png
+ df.plot(kind='box', color=color, sym='r+')
+
+Also, you can pass other keywords supported by matplotlib ``boxplot``.
+For example, horizontal and custom-positioned boxplot can be drawn by
+``vert=False`` and ``positions`` keywords.
+
+.. ipython:: python
+
+ @savefig box_new_kwargs.png
+ df.plot(kind='box', vert=False, positions=[1, 4, 5, 6, 8])
+
+
+See the :meth:`boxplot <matplotlib.axes.Axes.boxplot>` method and the
+`matplotlib boxplot documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot>`__ for more.
+
+
+The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
+
.. ipython:: python
:suppress:
@@ -354,18 +408,23 @@ columns:
.. _visualization.box.return:
-The return type of ``boxplot`` depends on two keyword arguments: ``by`` and ``return_type``.
-When ``by`` is ``None``:
+Basically, plot functions return :class:`matplotlib Axes <matplotlib.axes.Axes>` as a return value.
+In ``boxplot``, the return type can be changed by argument ``return_type``, and whether the subplots is enabled (``subplots=True`` in ``plot`` or ``by`` is specified in ``boxplot``).
+
+When ``subplots=False`` / ``by`` is ``None``:
* if ``return_type`` is ``'dict'``, a dictionary containing the :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned. The keys are "boxes", "caps", "fliers", "medians", and "whiskers".
- This is the default.
+ This is the default of ``boxplot`` in historical reason.
+ Note that ``plot(kind='box')`` returns ``Axes`` as default as the same as other plots.
* if ``return_type`` is ``'axes'``, a :class:`matplotlib Axes <matplotlib.axes.Axes>` containing the boxplot is returned.
* if ``return_type`` is ``'both'`` a namedtuple containging the :class:`matplotlib Axes <matplotlib.axes.Axes>`
and :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned
-When ``by`` is some column of the DataFrame, a dict of ``return_type`` is returned, where
-the keys are the columns of the DataFrame. The plot has a facet for each column of
-the DataFrame, with a separate box for each value of ``by``.
+When ``subplots=True`` / ``by`` is some column of the DataFrame:
+
+* A dict of ``return_type`` is returned, where the keys are the columns
+ of the DataFrame. The plot has a facet for each column of
+ the DataFrame, with a separate box for each value of ``by``.
Finally, when calling boxplot on a :class:`Groupby` object, a dict of ``return_type``
is returned, where the keys are the same as the Groupby object. The plot has a
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 6435f8e741f96..2c99b9befd42b 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -365,7 +365,8 @@ def _check_has_errorbars(self, axes, xerr=0, yerr=0):
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
- def _check_box_return_type(self, returned, return_type, expected_keys=None):
+ def _check_box_return_type(self, returned, return_type, expected_keys=None,
+ check_ax_title=True):
"""
Check box returned type is correct
@@ -377,6 +378,10 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None):
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
+ check_ax_title : bool
+ Whether to check the ax.title is the same as expected_key
+ Intended to be checked by calling from ``boxplot``.
+ Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
@@ -402,14 +407,17 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
- self.assertEqual(value.get_title(), key)
+ if check_ax_title:
+ self.assertEqual(value.get_title(), key)
elif return_type == 'both':
- self.assertEqual(value.ax.get_title(), key)
+ if check_ax_title:
+ self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
- self.assertEqual(line.get_axes().get_title(), key)
+ if check_ax_title:
+ self.assertEqual(line.get_axes().get_title(), key)
else:
raise AssertionError
@@ -452,7 +460,7 @@ def test_plot(self):
_check_plot_works(self.ts.plot, kind='area', stacked=False)
_check_plot_works(self.iseries.plot)
- for kind in ['line', 'bar', 'barh', 'kde', 'hist']:
+ for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
@@ -767,6 +775,15 @@ def test_hist_kde_color(self):
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
+ @slow
+ def test_boxplot_series(self):
+ ax = self.ts.plot(kind='box', logy=True)
+ self._check_ax_scales(ax, yaxis='log')
+ xlabels = ax.get_xticklabels()
+ self._check_text_labels(xlabels, [self.ts.name])
+ ylabels = ax.get_yticklabels()
+ self._check_text_labels(ylabels, [''] * len(ylabels))
+
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
@@ -1650,6 +1667,99 @@ def test_bar_log_subplots(self):
@slow
def test_boxplot(self):
+ df = self.hist_df
+ series = df['height']
+ numeric_cols = df._get_numeric_data().columns
+ labels = [com.pprint_thing(c) for c in numeric_cols]
+
+ ax = _check_plot_works(df.plot, kind='box')
+ self._check_text_labels(ax.get_xticklabels(), labels)
+ assert_array_equal(ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1))
+ self.assertEqual(len(ax.lines), 8 * len(numeric_cols))
+
+ axes = _check_plot_works(df.plot, kind='box', subplots=True, logy=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ self._check_ax_scales(axes, yaxis='log')
+ for ax, label in zip(axes, labels):
+ self._check_text_labels(ax.get_xticklabels(), [label])
+ self.assertEqual(len(ax.lines), 8)
+
+ axes = series.plot(kind='box', rot=40)
+ self._check_ticks_props(axes, xrot=40, yrot=0)
+ tm.close()
+
+ ax = _check_plot_works(series.plot, kind='box')
+
+ positions = np.array([1, 6, 7])
+ ax = df.plot(kind='box', positions=positions)
+ numeric_cols = df._get_numeric_data().columns
+ labels = [com.pprint_thing(c) for c in numeric_cols]
+ self._check_text_labels(ax.get_xticklabels(), labels)
+ assert_array_equal(ax.xaxis.get_ticklocs(), positions)
+ self.assertEqual(len(ax.lines), 8 * len(numeric_cols))
+
+ @slow
+ def test_boxplot_vertical(self):
+ df = self.hist_df
+ series = df['height']
+ numeric_cols = df._get_numeric_data().columns
+ labels = [com.pprint_thing(c) for c in numeric_cols]
+
+ # if horizontal, yticklabels are rotated
+ ax = df.plot(kind='box', rot=50, fontsize=8, vert=False)
+ self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
+ self._check_text_labels(ax.get_yticklabels(), labels)
+ self.assertEqual(len(ax.lines), 8 * len(numeric_cols))
+
+ axes = _check_plot_works(df.plot, kind='box', subplots=True,
+ vert=False, logx=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
+ self._check_ax_scales(axes, xaxis='log')
+ for ax, label in zip(axes, labels):
+ self._check_text_labels(ax.get_yticklabels(), [label])
+ self.assertEqual(len(ax.lines), 8)
+
+ positions = np.array([3, 2, 8])
+ ax = df.plot(kind='box', positions=positions, vert=False)
+ self._check_text_labels(ax.get_yticklabels(), labels)
+ assert_array_equal(ax.yaxis.get_ticklocs(), positions)
+ self.assertEqual(len(ax.lines), 8 * len(numeric_cols))
+
+ @slow
+ def test_boxplot_return_type(self):
+ df = DataFrame(randn(6, 4),
+ index=list(string.ascii_letters[:6]),
+ columns=['one', 'two', 'three', 'four'])
+ with tm.assertRaises(ValueError):
+ df.plot(kind='box', return_type='NOTATYPE')
+
+ result = df.plot(kind='box', return_type='dict')
+ self._check_box_return_type(result, 'dict')
+
+ result = df.plot(kind='box', return_type='axes')
+ self._check_box_return_type(result, 'axes')
+
+ result = df.plot(kind='box', return_type='both')
+ self._check_box_return_type(result, 'both')
+
+ @slow
+ def test_boxplot_subplots_return_type(self):
+ df = self.hist_df
+
+ # normal style: return_type=None
+ result = df.plot(kind='box', subplots=True)
+ self.assertIsInstance(result, np.ndarray)
+ self._check_box_return_type(result, None,
+ expected_keys=['height', 'weight', 'category'])
+
+ for t in ['dict', 'axes', 'both']:
+ returned = df.plot(kind='box', return_type=t, subplots=True)
+ self._check_box_return_type(returned, t,
+ expected_keys=['height', 'weight', 'category'],
+ check_ax_title=False)
+
+ @slow
+ def test_boxplot_legacy(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
@@ -1693,7 +1803,7 @@ def test_boxplot(self):
self.assertEqual(len(ax.get_lines()), len(lines))
@slow
- def test_boxplot_return_type(self):
+ def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pydata/pandas/pull/7096
import matplotlib as mpl
@@ -2315,6 +2425,61 @@ def test_kde_colors(self):
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ @slow
+ def test_boxplot_colors(self):
+
+ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'):
+ self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes']))
+ self._check_colors(bp['whiskers'], linecolors=[whiskers_c] * len(bp['whiskers']))
+ self._check_colors(bp['medians'], linecolors=[medians_c] * len(bp['medians']))
+ self._check_colors(bp['fliers'], linecolors=[fliers_c] * len(bp['fliers']))
+ self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps']))
+
+ default_colors = self.plt.rcParams.get('axes.color_cycle')
+
+ df = DataFrame(randn(5, 5))
+ bp = df.plot(kind='box', return_type='dict')
+ _check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
+ tm.close()
+
+ dict_colors = dict(boxes='#572923', whiskers='#982042',
+ medians='#804823', caps='#123456')
+ bp = df.plot(kind='box', color=dict_colors, sym='r+', return_type='dict')
+ _check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
+ dict_colors['medians'], dict_colors['caps'], 'r')
+ tm.close()
+
+ # partial colors
+ dict_colors = dict(whiskers='c', medians='m')
+ bp = df.plot(kind='box', color=dict_colors, return_type='dict')
+ _check_colors(bp, default_colors[0], 'c', 'm')
+ tm.close()
+
+ from matplotlib import cm
+ # Test str -> colormap functionality
+ bp = df.plot(kind='box', colormap='jet', return_type='dict')
+ jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
+ _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
+ tm.close()
+
+ # Test colormap functionality
+ bp = df.plot(kind='box', colormap=cm.jet, return_type='dict')
+ _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
+ tm.close()
+
+ # string color is applied to all artists except fliers
+ bp = df.plot(kind='box', color='DodgerBlue', return_type='dict')
+ _check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
+ 'DodgerBlue')
+
+ # tuple is also applied to all artists except fliers
+ bp = df.plot(kind='box', color=(0, 1, 0), sym='#123456', return_type='dict')
+ _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456')
+
+ with tm.assertRaises(ValueError):
+ # Color contains invalid key results in ValueError
+ df.plot(kind='box', color=dict(boxes='red', xxxx='blue'))
+
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
plt.rcParams['axes.color_cycle'] = list('rgbk')
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 5fa326a88b682..11f267d55fa09 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1256,10 +1256,7 @@ def _get_style(self, i, col_name):
def _get_colors(self, num_colors=None, color_kwds='color'):
from pandas.core.frame import DataFrame
if num_colors is None:
- if isinstance(self.data, DataFrame):
- num_colors = len(self.data.columns)
- else:
- num_colors = 1
+ num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
@@ -1980,7 +1977,6 @@ def _post_plot_logic(self):
class PiePlot(MPLPlot):
-
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
@@ -2031,12 +2027,152 @@ def _make_plot(self):
self._add_legend_handle(p, l)
-class BoxPlot(MPLPlot):
- pass
+class BoxPlot(LinePlot):
+ _layout_type = 'horizontal'
+
+ _valid_return_types = (None, 'axes', 'dict', 'both')
+ # namedtuple to hold results
+ BP = namedtuple("Boxplot", ['ax', 'lines'])
+
+ def __init__(self, data, return_type=None, **kwargs):
+ # Do not call LinePlot.__init__ which may fill nan
+ if return_type not in self._valid_return_types:
+ raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
+
+ self.return_type = return_type
+ MPLPlot.__init__(self, data, **kwargs)
+
+ def _args_adjust(self):
+ if self.subplots:
+ # Disable label ax sharing. Otherwise, all subplots shows last column label
+ if self.orientation == 'vertical':
+ self.sharex = False
+ else:
+ self.sharey = False
+
+ def _get_plot_function(self):
+ def plotf(ax, y, column_num=None, **kwds):
+ if y.ndim == 2:
+ y = [remove_na(v) for v in y]
+ else:
+ y = remove_na(y)
+ bp = ax.boxplot(y, **kwds)
+
+ if self.return_type == 'dict':
+ return bp, bp
+ elif self.return_type == 'both':
+ return self.BP(ax=ax, lines=bp), bp
+ else:
+ return ax, bp
+ return plotf
+
+ def _validate_color_args(self):
+ if 'color' in self.kwds:
+ if self.colormap is not None:
+ warnings.warn("'color' and 'colormap' cannot be used "
+ "simultaneously. Using 'color'")
+ self.color = self.kwds.pop('color')
+
+ if isinstance(self.color, dict):
+ valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
+ for key, values in compat.iteritems(self.color):
+ if key not in valid_keys:
+ raise ValueError("color dict contains invalid key '{0}' "
+ "The key must be either {1}".format(key, valid_keys))
+ else:
+ self.color = None
+
+ # get standard colors for default
+ colors = _get_standard_colors(num_colors=3,
+ colormap=self.colormap,
+ color=None)
+ # use 2 colors by default, for box/whisker and median
+ # flier colors isn't needed here
+ # because it can be specified by ``sym`` kw
+ self._boxes_c = colors[0]
+ self._whiskers_c = colors[0]
+ self._medians_c = colors[2]
+ self._caps_c = 'k' # mpl default
+
+ def _get_colors(self, num_colors=None, color_kwds='color'):
+ pass
+
+ def maybe_color_bp(self, bp):
+ if isinstance(self.color, dict):
+ boxes = self.color.get('boxes', self._boxes_c)
+ whiskers = self.color.get('whiskers', self._whiskers_c)
+ medians = self.color.get('medians', self._medians_c)
+ caps = self.color.get('caps', self._caps_c)
+ else:
+ # Other types are forwarded to matplotlib
+ # If None, use default colors
+ boxes = self.color or self._boxes_c
+ whiskers = self.color or self._whiskers_c
+ medians = self.color or self._medians_c
+ caps = self.color or self._caps_c
+
+ from matplotlib.artist import setp
+ setp(bp['boxes'], color=boxes, alpha=1)
+ setp(bp['whiskers'], color=whiskers, alpha=1)
+ setp(bp['medians'], color=medians, alpha=1)
+ setp(bp['caps'], color=caps, alpha=1)
+
+ def _make_plot(self):
+ plotf = self._get_plot_function()
+ if self.subplots:
+ self._return_obj = compat.OrderedDict()
+
+ for i, (label, y) in enumerate(self._iter_data()):
+ ax = self._get_ax(i)
+ kwds = self.kwds.copy()
+
+ ret, bp = plotf(ax, y, column_num=i, **kwds)
+ self.maybe_color_bp(bp)
+ self._return_obj[label] = ret
+
+ label = [com.pprint_thing(label)]
+ self._set_ticklabels(ax, label)
+ else:
+ y = self.data.values.T
+ ax = self._get_ax(0)
+ kwds = self.kwds.copy()
+
+ ret, bp = plotf(ax, y, column_num=0, **kwds)
+ self.maybe_color_bp(bp)
+ self._return_obj = ret
+
+ labels = [l for l, y in self._iter_data()]
+ labels = [com.pprint_thing(l) for l in labels]
+ if not self.use_index:
+ labels = [com.pprint_thing(key) for key in range(len(labels))]
+ self._set_ticklabels(ax, labels)
+
+ def _set_ticklabels(self, ax, labels):
+ if self.orientation == 'vertical':
+ ax.set_xticklabels(labels)
+ else:
+ ax.set_yticklabels(labels)
+
+ def _post_plot_logic(self):
+ pass
+
+ @property
+ def orientation(self):
+ if self.kwds.get('vert', True):
+ return 'vertical'
+ else:
+ return 'horizontal'
+
+ @property
+ def result(self):
+ if self.return_type is None:
+ return super(BoxPlot, self).result
+ else:
+ return self._return_obj
# kinds supported by both dataframe and series
-_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist']
+_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
@@ -2044,7 +2180,7 @@ class BoxPlot(MPLPlot):
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
- 'kde': KdePlot, 'hist': HistPlot,
+ 'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
@@ -2091,13 +2227,14 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
ax : matplotlib axis object, default None
style : list or dict
matplotlib line style per column
- kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'scatter', 'hexbin'}
+ kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box', 'scatter', 'hexbin'}
line : line plot
bar : vertical bar plot
barh : horizontal bar plot
hist : histogram
kde/density : Kernel Density Estimation plot
area : area plot
+ box : box plot
scatter : scatter plot
hexbin : hexbin plot
logx : boolean, default False
@@ -2237,13 +2374,14 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
Parameters
----------
label : label argument to provide to plot
- kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area'}
+ kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box'}
line : line plot
bar : vertical bar plot
barh : horizontal bar plot
hist : histogram
kde/density : Kernel Density Estimation plot
area : area plot
+ box : box plot
use_index : boolean, default True
Plot index as axis tick labels
rot : int, default None
@@ -2373,8 +2511,8 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None,
# validate return_type:
valid_types = (None, 'axes', 'dict', 'both')
- if return_type not in valid_types:
- raise ValueError("return_type")
+ if return_type not in BoxPlot._valid_return_types:
+ raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
@@ -2391,8 +2529,6 @@ def maybe_color_bp(bp):
setp(bp['whiskers'],color=colors[0],alpha=1)
setp(bp['medians'],color=colors[2],alpha=1)
- BP = namedtuple("Boxplot", ['ax', 'lines']) # namedtuple to hold results
-
def plot_group(keys, values, ax):
keys = [com.pprint_thing(x) for x in keys]
values = [remove_na(v) for v in values]
@@ -2407,7 +2543,7 @@ def plot_group(keys, values, ax):
if return_type == 'dict':
return bp
elif return_type == 'both':
- return BP(ax=ax, lines=bp)
+ return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
| Like #7809, added `DataFrame.plot(kind='box')` to plot boxplot under the common plotting method.
**NOTE:** `DataFrame.boxplot` remains untouched (few lines are modified to share some definitions).
**NOTE:** This is based on #7736.
```
import pandas as pd
import numpy as np
pd.options.display.mpl_style = 'default'
df = pd.DataFrame(np.random.randn(100, 5))
ax = df.plot(kind='box')
```

One enhancement is easier colorization by passing `color` keyword. You can pass `dict` with keys are `boxes`, `whiskers`, `medians` and `caps`. If some keys are missing in the `dict`, default colors are used for the corresponding artists. Also, boxplot has `sym` keyword to specify fliers style (This is mpl keyword and simply forward to mpl).
```
color = dict(boxes='DarkGreen', whiskers='DarkOrange',
medians='DarkBlue', caps='Gray')
ax = df.plot(kind='box', color=color)
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/7998 | 2014-08-12T13:37:51Z | 2014-08-25T04:10:14Z | 2014-08-25T04:10:14Z | 2014-08-30T01:19:34Z |
DOC: Release note format correction | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index b91c306e9b193..d15a48535f1eb 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -151,7 +151,7 @@ API changes
Out[3]: array([ True, False, True, True, False, True], dtype=bool)
- ``tz_localize(None)`` for tz-aware ``Timestamp`` and ``DatetimeIndex`` now removes timezone holding local time,
-previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
+ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
.. ipython:: python
@@ -305,8 +305,8 @@ Enhancements
- ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`)
--``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue: `7070`)
--``pandas.tseries.holiday.Holiday`` now supports a list of offsets in Python3 (:issue: `7070`)
+- ``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue:`7070`)
+- ``pandas.tseries.holiday.Holiday`` now supports a list of offsets in Python3 (:issue:`7070`)
| https://api.github.com/repos/pandas-dev/pandas/pulls/7995 | 2014-08-12T01:59:18Z | 2014-08-12T06:13:18Z | 2014-08-12T06:13:18Z | 2014-08-12T22:11:19Z | |
COMPAT: change pytables to use 3.0.0 API (GH7990) | diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt
index c5addb93be38d..5b77428a0f6d7 100644
--- a/ci/requirements-2.7.txt
+++ b/ci/requirements-2.7.txt
@@ -5,7 +5,7 @@ numpy==1.8.1
cython==0.19.1
bottleneck==0.6.0
numexpr==2.2.2
-tables==2.3.1
+tables==3.0.0
matplotlib==1.3.1
openpyxl==1.6.2
xlsxwriter==0.4.6
diff --git a/doc/source/install.rst b/doc/source/install.rst
index c30a086295f00..5595f60c6789c 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -26,7 +26,7 @@ Installing pandas
Trying out pandas, no installation required!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The easiest way to start experimenting with pandas doesn't involve installing
+The easiest way to start experimenting with pandas doesn't involve installing
pandas at all.
`Wakari <https://wakari.io>`__ is a free service that provides a hosted
@@ -35,10 +35,10 @@ pandas at all.
Simply create an account, and have access to pandas from within your brower via
an `IPython Notebook <http://ipython.org/notebook.html>`__ in a few minutes.
-Installing pandas with Anaconda
+Installing pandas with Anaconda
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Installing pandas and the rest of the `NumPy <http://www.numpy.org/>`__ and
+Installing pandas and the rest of the `NumPy <http://www.numpy.org/>`__ and
`SciPy <http://www.scipy.org/>`__ stack can be a little
difficult for inexperienced users.
@@ -57,8 +57,8 @@ anything else, and without needing to wait for any software to be compiled.
Installation instructions for `Anaconda <http://docs.continuum.io/anaconda/>`__
`can be found here <http://docs.continuum.io/anaconda/install.html>`__.
-A full list of the packages available as part of the
-`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution
+A full list of the packages available as part of the
+`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution
`can be found here <http://docs.continuum.io/anaconda/pkg-docs.html>`__.
An additional advantage of installing with Anaconda is that you don't require
@@ -78,7 +78,7 @@ If you want to have more control on which packages, or have a limited internet
bandwidth, then installing pandas with
`Miniconda <http://conda.pydata.org/miniconda.html>`__ may be a better solution.
-`Conda <http://conda.pydata.org/docs/>`__ is the package manager that the
+`Conda <http://conda.pydata.org/docs/>`__ is the package manager that the
`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution is built upon.
It is a package manager that is both cross-platform and language agnostic
(it can play a similar role to a pip and virtualenv combination).
@@ -90,7 +90,7 @@ minimal self contained Python installation, and then use the
First you will need `Conda <http://conda.pydata.org/docs/>`__ to be installed and
downloading and running the `Miniconda
<http://conda.pydata.org/miniconda.html>`__
-will do this for you. The installer
+will do this for you. The installer
`can be found here <http://conda.pydata.org/miniconda.html>`__
The next step is to create a new conda environment (these are analogous to a
@@ -98,7 +98,7 @@ virtualenv but they also allow you to specify precisely which Python version
to install also). Run the following commands from a terminal window::
conda create -n name_of_my_env python
-
+
This will create a minimal environment with only Python installed in it.
To put your self inside this environment run::
@@ -108,7 +108,7 @@ On Windows the command is::
activate name_of_my_env
-The final step required is to install pandas. This can be done with the
+The final step required is to install pandas. This can be done with the
following command::
conda install pandas
@@ -143,7 +143,7 @@ pandas can be installed via pip from
pip install pandas
This will likely require the installation of a number of dependencies,
-including NumPy, will require a compiler to compile required bits of code,
+including NumPy, will require a compiler to compile required bits of code,
and can take a few minutes to complete.
Installing using your Linux distribution's package manager.
@@ -259,6 +259,7 @@ Recommended Dependencies
* `numexpr <http://code.google.com/p/numexpr/>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
+ If installed, must be Version 2.1 or higher.
* `bottleneck <http://berkeleyanalytics.com/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups.
@@ -277,7 +278,7 @@ Optional Dependencies
* `Cython <http://www.cython.org>`__: Only necessary to build development
version. Version 0.17.1 or higher.
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
- * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage
+ * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended.
* `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
* `statsmodels <http://statsmodels.sourceforge.net/>`__
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 40977aee44cdd..f4065d736a674 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2199,12 +2199,9 @@ the high performance HDF5 format using the excellent `PyTables
<http://www.pytables.org/>`__ library. See the :ref:`cookbook <cookbook.hdf>`
for some advanced strategies
-.. note::
+.. warning::
- ``PyTables`` 3.0.0 was recently released to enable support for Python 3.
- pandas should be fully compatible (and previously written stores should be
- backwards compatible) with all ``PyTables`` >= 2.3. For ``python >= 3.2``,
- ``pandas >= 0.12.0`` is required for compatibility.
+ As of version 0.15.0, pandas requires ``PyTables`` >= 3.0.0. Stores written with prior versions of pandas / ``PyTables`` >= 2.3 are fully compatible (this was the previous minimum ``PyTables`` required version).
.. ipython:: python
:suppress:
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index d15a48535f1eb..322bcba9664d9 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -11,7 +11,8 @@ users upgrade to this version.
- The ``Categorical`` type was integrated as a first-class pandas type, see :ref:`here <whatsnew_0150.cat>`
- Internal refactoring of the ``Index`` class to no longer sub-class ``ndarray``, see :ref:`Internal Refactoring <whatsnew_0150.refactoring>`
- - New datetimelike properties accessor ``.dt`` for Series, see :ref:`Dateimelike Properties <whatsnew_0150.dt>`
+ - New datetimelike properties accessor ``.dt`` for Series, see :ref:`Datetimelike Properties <whatsnew_0150.dt>`
+ - dropping support for ``PyTables`` less than version 3.0.0, and ``numexpr`` less than version 2.1 (:issue:`7990`)
- :ref:`Other Enhancements <whatsnew_0150.enhancements>`
diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py
index 82c68fb10e7d6..e3096a85ca7d7 100644
--- a/pandas/computation/eval.py
+++ b/pandas/computation/eval.py
@@ -42,9 +42,9 @@ def _check_engine(engine):
"if 'numexpr' is not installed")
else:
ne_version = numexpr.__version__
- if ne_version < LooseVersion('2.0'):
+ if ne_version < LooseVersion('2.1'):
raise ImportError("'numexpr' version is %s, "
- "must be >= 2.0" % ne_version)
+ "must be >= 2.1" % ne_version)
def _check_parser(parser):
diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py
index 47d3fce618f89..bd00dbbb444b6 100644
--- a/pandas/computation/expressions.py
+++ b/pandas/computation/expressions.py
@@ -13,7 +13,12 @@
try:
import numexpr as ne
- _NUMEXPR_INSTALLED = ne.__version__ >= LooseVersion('2.0')
+ ver = ne.__version__
+ _NUMEXPR_INSTALLED = ver >= LooseVersion('2.1')
+ if not _NUMEXPR_INSTALLED:
+ warnings.warn("The installed version of numexpr {ver} is not supported "
+ "in pandas and will be not be used".format(ver=ver), UserWarning)
+
except ImportError: # pragma: no cover
_NUMEXPR_INSTALLED = False
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 5489893df06b9..56d6ccd0abd9b 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -81,7 +81,6 @@ def _is_py3_complex_incompat(result, expected):
_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms)
-
class TestEvalNumexprPandas(tm.TestCase):
@classmethod
@@ -1515,9 +1514,9 @@ def testit():
except ImportError:
raise nose.SkipTest("no numexpr")
else:
- if ne.__version__ < LooseVersion('2.0'):
+ if ne.__version__ < LooseVersion('2.1'):
with tm.assertRaisesRegexp(ImportError, "'numexpr' version is "
- ".+, must be >= 2.0"):
+ ".+, must be >= 2.1"):
testit()
else:
testit()
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 78e7c43de678f..989249994d953 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -228,20 +228,18 @@ class DuplicateWarning(Warning):
# oh the troubles to reduce import time
_table_mod = None
-_table_supports_index = False
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
- global _table_supports_index
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
- ver = tables.__version__
- _table_supports_index = LooseVersion(ver) >= '2.3'
+ if LooseVersion(tables.__version__) < '3.0.0':
+ raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
@@ -509,7 +507,7 @@ def open(self, mode='a', **kwargs):
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
- See HDFStore docstring or tables.openFile for info about modes
+ See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
@@ -542,11 +540,11 @@ def open(self, mode='a', **kwargs):
fletcher32=self._fletcher32)
try:
- self._handle = tables.openFile(self._path, self._mode, **kwargs)
+ self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
- self._handle = tables.openFile(self._path, 'r', **kwargs)
+ self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
@@ -561,7 +559,7 @@ def open(self, mode='a', **kwargs):
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 which allows\n"
"files to be opened multiple times at once\n".format(version=tables.__version__,
- hdf_version=tables.getHDF5Version()))
+ hdf_version=tables.get_hdf5_version()))
raise e
@@ -1018,9 +1016,6 @@ def create_table_index(self, key, **kwargs):
# version requirements
_tables()
- if not _table_supports_index:
- raise ValueError("PyTables >= 2.3 is required for table indexing")
-
s = self.get_storer(key)
if s is None:
return
@@ -1037,7 +1032,7 @@ def groups(self):
_tables()
self._check_if_open()
return [
- g for g in self._handle.walkNodes()
+ g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
@@ -1050,7 +1045,7 @@ def get_node(self, key):
try:
if not key.startswith('/'):
key = '/' + key
- return self._handle.getNode(self.root, key)
+ return self._handle.get_node(self.root, key)
except:
return None
@@ -1235,7 +1230,7 @@ def _write_to_group(self, key, value, format, index=True, append=False,
# remove the node if we are not appending
if group is not None and not append:
- self._handle.removeNode(group, recursive=True)
+ self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
@@ -1257,7 +1252,7 @@ def _write_to_group(self, key, value, format, index=True, append=False,
new_path += p
group = self.get_node(new_path)
if group is None:
- group = self._handle.createGroup(path, p)
+ group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
@@ -2162,7 +2157,7 @@ def write(self, **kwargs):
def delete(self, where=None, start=None, stop=None, **kwargs):
""" support fully deleting the node in its entirety (only) - where specification must be None """
if where is None and start is None and stop is None:
- self._handle.removeNode(self.group, recursive=True)
+ self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
@@ -2404,7 +2399,7 @@ def write_array_empty(self, key, value):
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
- self._handle.createArray(self.group, key, arr)
+ self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
@@ -2414,7 +2409,7 @@ def _is_empty_array(self, shape):
def write_array(self, key, value, items=None):
if key in self.group:
- self._handle.removeNode(self.group, key)
+ self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
@@ -2438,7 +2433,7 @@ def write_array(self, key, value, items=None):
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
- ca = self._handle.createCArray(self.group, key, atom,
+ ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
@@ -2466,7 +2461,7 @@ def write_array(self, key, value, items=None):
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning)
- vlarr = self._handle.createVLArray(self.group, key,
+ vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
@@ -2474,15 +2469,15 @@ def write_array(self, key, value, items=None):
self.write_array_empty(key, value)
else:
if value.dtype.type == np.datetime64:
- self._handle.createArray(self.group, key, value.view('i8'))
+ self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif value.dtype.type == np.timedelta64:
- self._handle.createArray(self.group, key, value.view('i8'))
+ self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
- self._handle.createArray(self.group, key, value)
+ self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
@@ -2586,7 +2581,7 @@ def write(self, obj, **kwargs):
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
- node = self._handle.createGroup(self.group, key)
+ node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
@@ -2622,7 +2617,7 @@ def write(self, obj, **kwargs):
for name, sdf in compat.iteritems(obj):
key = 'sparse_frame_%s' % name
if key not in self.group._v_children:
- node = self._handle.createGroup(self.group, key)
+ node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseFrameFixed(self.parent, node)
@@ -3043,18 +3038,18 @@ def create_index(self, columns=None, optlevel=None, kind=None):
cur_kind = index.kind
if kind is not None and cur_kind != kind:
- v.removeIndex()
+ v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
- v.removeIndex()
+ v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
- v.createIndex(**kw)
+ v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
@@ -3617,7 +3612,7 @@ def write(self, obj, axes=None, append=False, complib=None,
chunksize=None, expectedrows=None, dropna=True, **kwargs):
if not append and self.is_exists:
- self._handle.removeNode(self.group, 'table')
+ self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
@@ -3636,7 +3631,7 @@ def write(self, obj, axes=None, append=False, complib=None,
self.set_attrs()
# create the table
- table = self._handle.createTable(self.group, **options)
+ table = self._handle.create_table(self.group, **options)
else:
table = self.table
@@ -3765,12 +3760,12 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
- self._handle.removeNode(self.group, recursive=True)
+ self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
- nrows = self.table.removeRows(start=start, stop=stop)
+ nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
@@ -3809,7 +3804,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
- table.removeRows(start=rows[rows.index[0]
+ table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
@@ -4352,10 +4347,10 @@ def select(self):
generate the selection
"""
if self.condition is not None:
- return self.table.table.readWhere(self.condition.format(),
+ return self.table.table.read_where(self.condition.format(),
start=self.start, stop=self.stop)
elif self.coordinates is not None:
- return self.table.table.readCoordinates(self.coordinates)
+ return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
@@ -4374,7 +4369,7 @@ def select_coords(self):
stop += nrows
if self.condition is not None:
- return self.table.table.getWhereList(self.condition.format(),
+ return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 89809b47d76eb..2a0796e90e418 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -373,7 +373,7 @@ def test_repr(self):
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
- store._handle.createGroup(store._handle.root,'bah')
+ store._handle.create_group(store._handle.root,'bah')
repr(store)
str(store)
@@ -541,11 +541,8 @@ def test_open_args(self):
store.close()
- # only supported on pytable >= 3.0.0
- if LooseVersion(tables.__version__) >= '3.0.0':
-
- # the file should not have actually been written
- self.assertFalse(os.path.exists(path))
+ # the file should not have actually been written
+ self.assertFalse(os.path.exists(path))
def test_flush(self):
@@ -881,8 +878,6 @@ def check(format,index):
def test_encoding(self):
- if LooseVersion(tables.__version__) < '3.0.0':
- raise nose.SkipTest('tables version does not support proper encoding')
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
@@ -1471,29 +1466,6 @@ def col(t,column):
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
- # try to change the version supports flag
- from pandas.io import pytables
- pytables._table_supports_index = False
- self.assertRaises(Exception, store.create_table_index, 'f')
-
- # test out some versions
- original = tables.__version__
-
- for v in ['2.2', '2.2b']:
- pytables._table_mod = None
- pytables._table_supports_index = False
- tables.__version__ = v
- self.assertRaises(Exception, store.create_table_index, 'f')
-
- for v in ['2.3.1', '2.3.1b', '2.4dev', '2.4', '3.0.0', '3.1.0', original]:
- pytables._table_mod = None
- pytables._table_supports_index = False
- tables.__version__ = v
- store.create_table_index('f')
- pytables._table_mod = None
- pytables._table_supports_index = False
- tables.__version__ = original
-
def test_big_table_frame(self):
raise nose.SkipTest('no big table frame')
| closes #7990
| https://api.github.com/repos/pandas-dev/pandas/pulls/7994 | 2014-08-11T22:54:44Z | 2014-08-12T13:04:01Z | 2014-08-12T13:04:01Z | 2014-08-20T08:07:14Z |
Allow deprecate_kwarg to transform arguments | diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py
new file mode 100644
index 0000000000000..76b49a5f976bd
--- /dev/null
+++ b/pandas/tests/test_util.py
@@ -0,0 +1,64 @@
+
+import warnings
+
+import nose
+
+import pandas.util
+from pandas.util.decorators import deprecate_kwarg
+import pandas.util.testing as tm
+
+class TestDecorators(tm.TestCase):
+ def setUp(self):
+ @deprecate_kwarg('old', 'new')
+ def _f1(new=False):
+ return new
+
+ @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
+ def _f2(new=False):
+ return new
+
+ @deprecate_kwarg('old', 'new', lambda x: x+1)
+ def _f3(new=0):
+ return new
+
+ self.f1 = _f1
+ self.f2 = _f2
+ self.f3 = _f3
+
+ def test_deprecate_kwarg(self):
+ x = 78
+ with tm.assert_produces_warning(FutureWarning):
+ result = self.f1(old=x)
+ self.assertIs(result, x)
+ with tm.assert_produces_warning(None):
+ self.f1(new=x)
+
+ def test_dict_deprecate_kwarg(self):
+ x = 'yes'
+ with tm.assert_produces_warning(FutureWarning):
+ result = self.f2(old=x)
+ self.assertEqual(result, True)
+
+ def test_missing_deprecate_kwarg(self):
+ x = 'bogus'
+ with tm.assert_produces_warning(FutureWarning):
+ result = self.f2(old=x)
+ self.assertEqual(result, 'bogus')
+
+ def test_callable_deprecate_kwarg(self):
+ x = 5
+ with tm.assert_produces_warning(FutureWarning):
+ result = self.f3(old=x)
+ self.assertEqual(result, x+1)
+ with tm.assertRaises(TypeError):
+ self.f3(old='hello')
+
+ def test_bad_deprecate_kwarg(self):
+ with tm.assertRaises(TypeError):
+ @deprecate_kwarg('old', 'new', 0)
+ def f4(new=None):
+ pass
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index d94897a6685a2..288ec164198e4 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -15,7 +15,7 @@ def wrapper(*args, **kwargs):
return wrapper
-def deprecate_kwarg(old_arg_name, new_arg_name):
+def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):
"""Decorator to deprecate a keyword argument of a function
Parameters
@@ -24,6 +24,10 @@ def deprecate_kwarg(old_arg_name, new_arg_name):
Name of argument in function to deprecate
new_arg_name : str
Name of prefered argument in function
+ mapping : dict or callable
+ If mapping is present, use it to translate old arguments to
+ new arguments. A callable must do its own value checking;
+ values not found in a dict will be forwarded unchanged.
Examples
--------
@@ -31,7 +35,7 @@ def deprecate_kwarg(old_arg_name, new_arg_name):
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
- ... print columns
+ ... print(columns)
...
>>> f(columns='should work ok')
should work ok
@@ -41,22 +45,46 @@ def deprecate_kwarg(old_arg_name, new_arg_name):
should raise warning
>>> f(cols='should error', columns="can't pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
+ >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no', False})
+ ... def f(new=False):
+ ... print('yes!' if new else 'no!')
+ ...
+ >>> f(old='yes')
+ FutureWarning: old='yes' is deprecated, use new=True instead
+ warnings.warn(msg, FutureWarning)
+ yes!
"""
+ if mapping is not None and not hasattr(mapping, 'get') and \
+ not callable(mapping):
+ raise TypeError("mapping from old to new argument values "
+ "must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
- msg = "the '%s' keyword is deprecated, use '%s' instead" % \
- (old_arg_name, new_arg_name)
+ if mapping is not None:
+ if hasattr(mapping, 'get'):
+ new_arg_value = mapping.get(old_arg_value,
+ old_arg_value)
+ else:
+ new_arg_value = mapping(old_arg_value)
+ msg = "the %s=%r keyword is deprecated, " \
+ "use %s=%r instead" % \
+ (old_arg_name, old_arg_value,
+ new_arg_name, new_arg_value)
+ else:
+ new_arg_value = old_arg_value
+ msg = "the '%s' keyword is deprecated, " \
+ "use '%s' instead" % (old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
(old_arg_name, new_arg_name)
raise TypeError(msg)
else:
- kwargs[new_arg_name] = old_arg_value
+ kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
| This will allow deprecated kwargs to be transformed before being passed to a function. For example, in #7963, one could write
``` python
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous=None):
...
```
Open issues:
- should we test for a bad mapping at creation time? (in this PR: yes)
- how much error checking should we do? (in this PR: pass args unrecognized by a mapping through unchanged; assume a function will do its own error handling)
- should we support both mappings and callables? (in this PR: yes)
- if so, how should we recognize mappings? (in this PR: existence of a `get` method)
| https://api.github.com/repos/pandas-dev/pandas/pulls/7991 | 2014-08-11T21:17:48Z | 2014-09-09T19:46:29Z | 2014-09-09T19:46:29Z | 2014-09-09T20:09:08Z |
bug fix for 7987 and add day of week functionality to Holiday | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 0223a11d8a011..5c85e9e684bf3 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -367,7 +367,7 @@ Enhancements
- ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`)
- ``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue:`7070`)
- ``pandas.tseries.holiday.Holiday`` now supports a list of offsets in Python3 (:issue:`7070`)
-
+- ``pandas.tseries.holiday.Holiday`` now supports a days_of_week parameter (:issue:`7070`)
@@ -529,6 +529,7 @@ Bug Fixes
- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`)
+- ``Holiday`` bug in Holiday with no offset or observance (:issue:`7987`)
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index f42ad174b8f0f..ea85f35cd4ca2 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -4,6 +4,7 @@
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
from pandas.tseries.offsets import Easter, Day
+
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
@@ -116,7 +117,8 @@ class Holiday(object):
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
- observance=None, start_date=None, end_date=None):
+ observance=None, start_date=None, end_date=None,
+ days_of_week=None):
"""
Parameters
----------
@@ -127,6 +129,24 @@ class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
+ days_of_week:
+ provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
+ Monday=0,..,Sunday=6
+
+ Examples
+ --------
+ >>> from pandas.tseries.holiday import Holiday, nearest_workday
+ >>> from pandas import DateOffset
+ >>> from dateutil.relativedelta import MO
+ >>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
+ offset=DateOffset(weekday=MO(1)))
+ >>> USLaborDay = Holiday('Labor Day', month=9, day=1,
+ offset=DateOffset(weekday=MO(1)))
+ >>> July3rd = Holiday('July 3rd', month=7, day=3,)
+ >>> NewYears = Holiday('New Years Day', month=1, day=1,
+ observance=nearest_workday),
+ >>> July3rd = Holiday('July 3rd', month=7, day=3,
+ days_of_week=(0, 1, 2, 3))
"""
self.name = name
self.year = year
@@ -136,6 +156,8 @@ class from pandas.tseries.offsets
self.start_date = start_date
self.end_date = end_date
self.observance = observance
+ assert (days_of_week is None or type(days_of_week) == tuple)
+ self.days_of_week = days_of_week
def __repr__(self):
info = ''
@@ -183,11 +205,15 @@ def dates(self, start_date, end_date, return_name=False):
year_offset = DateOffset(years=1)
base_date = Timestamp(datetime(start_date.year, self.month, self.day))
dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset)
- holiday_dates = list(self._apply_rule(dates))
-
+ holiday_dates = self._apply_rule(dates)
+ if self.days_of_week is not None:
+ holiday_dates = list(filter(lambda x: x is not None and
+ x.dayofweek in self.days_of_week,
+ holiday_dates))
+ else:
+ holiday_dates = list(filter(lambda x: x is not None, holiday_dates))
if return_name:
return Series(self.name, index=holiday_dates)
-
return holiday_dates
def _apply_rule(self, dates):
@@ -207,14 +233,13 @@ def _apply_rule(self, dates):
if self.observance is not None:
return map(lambda d: self.observance(d), dates)
- if not isinstance(self.offset, list):
- offsets = [self.offset]
- else:
- offsets = self.offset
-
- for offset in offsets:
- dates = list(map(lambda d: d + offset, dates))
-
+ if self.offset is not None:
+ if not isinstance(self.offset, list):
+ offsets = [self.offset]
+ else:
+ offsets = self.offset
+ for offset in offsets:
+ dates = list(map(lambda d: d + offset, dates))
return dates
holiday_calendars = {}
diff --git a/pandas/tseries/tests/test_holiday.py b/pandas/tseries/tests/test_holiday.py
index adc2c0d237265..c2300481eca43 100644
--- a/pandas/tseries/tests/test_holiday.py
+++ b/pandas/tseries/tests/test_holiday.py
@@ -72,6 +72,22 @@ def test_usmemorialday(self):
]
self.assertEqual(list(holidays), holidayList)
+ def test_non_observed_holiday(self):
+ july_3rd = Holiday('July 4th Eve', month=7, day=3)
+ result = july_3rd.dates("2001-01-01", "2003-03-03")
+ expected = [Timestamp('2001-07-03 00:00:00'),
+ Timestamp('2002-07-03 00:00:00')]
+ self.assertEqual(list(result), expected)
+ july_3rd = Holiday('July 4th Eve', month=7, day=3,
+ days_of_week=(0, 1, 2, 3))
+ result = july_3rd.dates("2001-01-01", "2008-03-03")
+ expected = [Timestamp('2001-07-03 00:00:00'),
+ Timestamp('2002-07-03 00:00:00'),
+ Timestamp('2003-07-03 00:00:00'),
+ Timestamp('2006-07-03 00:00:00'),
+ Timestamp('2007-07-03 00:00:00')]
+ self.assertEqual(list(result), expected)
+
def test_easter(self):
holidays = EasterMonday.dates(self.start_date,
self.end_date)
| This should fix issue 7987 where there is no offset or observance.
closes https://github.com/pydata/pandas/issues/7987
| https://api.github.com/repos/pandas-dev/pandas/pulls/7988 | 2014-08-11T18:43:05Z | 2014-08-19T17:07:30Z | 2014-08-19T17:07:30Z | 2014-08-19T19:33:11Z |
Fix DataFrame.to_latex() midrule positioning with MultiIndex columns | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 401cbb6e219f6..81b6c38c74a57 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -429,7 +429,7 @@ Bug Fixes
- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`)
-
+- Bug in ``DataFrame.to_latex`` formatting when columns or index is a ``MultiIndex`` (:issue:`7982`).
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 8f749d07296a7..41637cf60cc96 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -532,7 +532,7 @@ def write(buf, frame, column_format, strcols, longtable=False):
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
- nlevels = frame.index.nlevels
+ nlevels = frame.columns.nlevels
for i, row in enumerate(zip(*strcols)):
if i == nlevels:
buf.write('\\midrule\n') # End of header
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 5d785df355aa3..d010222038a09 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2083,6 +2083,31 @@ def test_to_latex(self):
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
+ def test_to_latex_multiindex(self):
+ df = DataFrame({('x', 'y'): ['a']})
+ result = df.to_latex()
+ expected = r"""\begin{tabular}{ll}
+\toprule
+{} & x \\
+{} & y \\
+\midrule
+0 & a \\
+\bottomrule
+\end{tabular}
+"""
+ self.assertEqual(result, expected)
+
+ result = df.T.to_latex()
+ expected = r"""\begin{tabular}{ll}
+\toprule
+{} & 0 \\
+\midrule
+x y & a \\
+\bottomrule
+\end{tabular}
+"""
+ self.assertEqual(result, expected)
+
def test_to_latex_escape(self):
a = 'a'
b = 'b'
| Currently, the positioning of \midrule is determined by the number of index
levels, not columns levels:
```
>>> print pd.DataFrame({('x', 'y'): ['a']}).to_latex()
\begin{tabular}{ll}
\toprule
{} & x \\
\midrule
{} & y \\
0 & a \\
\bottomrule
\end{tabular}
```
The fix is simple: use the number of column levels instead of the number
of index levels.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7982 | 2014-08-10T20:38:49Z | 2014-08-19T17:51:26Z | 2014-08-19T17:51:26Z | 2014-09-09T05:46:21Z |
ENH: Add duplicated/drop_duplicates to Index | diff --git a/doc/source/api.rst b/doc/source/api.rst
index ec6e2aff870c6..feb4da700354d 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -374,6 +374,8 @@ Reindexing / Selection / Label manipulation
Series.align
Series.drop
+ Series.drop_duplicates
+ Series.duplicated
Series.equals
Series.first
Series.head
@@ -1165,6 +1167,8 @@ Modifying and Computations
Index.diff
Index.sym_diff
Index.drop
+ Index.drop_duplicates
+ Index.duplicated
Index.equals
Index.factorize
Index.identical
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index c6e784ac93e92..db3fea7d1e24f 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -179,6 +179,15 @@ API changes
- Histogram from ``DataFrame.plot`` with ``kind='hist'`` (:issue:`7809`), See :ref:`the docs<visualization.hist>`.
+- ``Index`` now supports ``duplicated`` and ``drop_duplicates``. (:issue:`4060`)
+
+ .. ipython:: python
+
+ idx = Index([1, 2, 3, 4, 1, 2])
+ idx
+ idx.duplicated()
+ idx.drop_duplicates()
+
.. _whatsnew_0150.dt:
.dt accessor
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1655d2a4e4e23..348fb4f23cefc 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -8,8 +8,14 @@
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas.tslib as tslib
+import pandas.lib as lib
from pandas.util.decorators import Appender, cache_readonly
+
+_shared_docs = dict()
+_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='')
+
+
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
@@ -474,12 +480,66 @@ def searchsorted(self, key, side='left'):
#### needs tests/doc-string
return self.values.searchsorted(key, side=side)
+ _shared_docs['drop_duplicates'] = (
+ """Return %(klass)s with duplicate values removed
+
+ Parameters
+ ----------
+ take_last : boolean, default False
+ Take the last observed index in a group. Default first
+ %(inplace)s
+
+ Returns
+ -------
+ deduplicated : %(klass)s
+ """)
+
+ @Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
+ def drop_duplicates(self, take_last=False, inplace=False):
+ duplicated = self.duplicated(take_last=take_last)
+ result = self[~duplicated.values]
+ if inplace:
+ return self._update_inplace(result)
+ else:
+ return result
+
+ _shared_docs['duplicated'] = (
+ """Return boolean %(klass)s denoting duplicate values
+
+ Parameters
+ ----------
+ take_last : boolean, default False
+ Take the last observed index in a group. Default first
+
+ Returns
+ -------
+ duplicated : %(klass)s
+ """)
+
+ @Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
+ def duplicated(self, take_last=False):
+ keys = com._ensure_object(self.values)
+ duplicated = lib.duplicated(keys, take_last=take_last)
+ try:
+ return self._constructor(duplicated,
+ index=self.index).__finalize__(self)
+ except AttributeError:
+ from pandas.core.index import Index
+ return Index(duplicated)
+
#----------------------------------------------------------------------
# unbox reductions
all = _unbox(np.ndarray.all)
any = _unbox(np.ndarray.any)
+ #----------------------------------------------------------------------
+ # abstracts
+
+ def _update_inplace(self, result):
+ raise NotImplementedError
+
+
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified inteface datetimelike Index """
@@ -497,7 +557,6 @@ def _box_values(self, values):
"""
apply box func to passed values
"""
- import pandas.lib as lib
return lib.map_infer(values, self._box_func)
@cache_readonly
diff --git a/pandas/core/index.py b/pandas/core/index.py
index a58a3331f9759..0c0969cce8018 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -12,7 +12,7 @@
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, is_datetime_array
-from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin
+from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs
from pandas.util.decorators import Appender, cache_readonly, deprecate
from pandas.core.common import isnull, array_equivalent
import pandas.core.common as com
@@ -30,6 +30,8 @@
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
+_index_doc_kwargs = dict(klass='Index', inplace='')
+
def _try_get_item(x):
try:
@@ -209,6 +211,10 @@ def _simple_new(cls, values, name=None, **kwargs):
result._reset_identity()
return result
+ def _update_inplace(self, result):
+ # guard when called from IndexOpsMixin
+ raise TypeError("Index can't be updated inplace")
+
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
@@ -2019,6 +2025,15 @@ def drop(self, labels):
raise ValueError('labels %s not contained in axis' % labels[mask])
return self.delete(indexer)
+ @Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
+ def drop_duplicates(self, take_last=False):
+ result = super(Index, self).drop_duplicates(take_last=take_last)
+ return self._constructor(result)
+
+ @Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
+ def duplicated(self, take_last=False):
+ return super(Index, self).duplicated(take_last=take_last)
+
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5a490992c478c..2f0e651bfc5b1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -52,10 +52,13 @@
__all__ = ['Series']
+
_shared_doc_kwargs = dict(
axes='index',
klass='Series',
- axes_single_arg="{0,'index'}"
+ axes_single_arg="{0,'index'}",
+ inplace="""inplace : boolean, default False
+ If True, performs operation inplace and returns None."""
)
@@ -265,6 +268,9 @@ def _set_subtyp(self, is_all_dates):
else:
object.__setattr__(self, '_subtyp', 'series')
+ def _update_inplace(self, result):
+ return generic.NDFrame._update_inplace(self, result)
+
# ndarray compatibility
@property
def dtype(self):
@@ -1114,45 +1120,14 @@ def mode(self):
from pandas.core.algorithms import mode
return mode(self)
+ @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, take_last=False, inplace=False):
- """
- Return Series with duplicate values removed
-
- Parameters
- ----------
- take_last : boolean, default False
- Take the last observed index in a group. Default first
- inplace : boolean, default False
- If True, performs operation inplace and returns None.
-
- Returns
- -------
- deduplicated : Series
- """
- duplicated = self.duplicated(take_last=take_last)
- result = self[-duplicated]
- if inplace:
- return self._update_inplace(result)
- else:
- return result
+ return super(Series, self).drop_duplicates(take_last=take_last,
+ inplace=inplace)
+ @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, take_last=False):
- """
- Return boolean Series denoting duplicate values
-
- Parameters
- ----------
- take_last : boolean, default False
- Take the last observed index in a group. Default first
-
- Returns
- -------
- duplicated : Series
- """
- keys = _ensure_object(self.values)
- duplicated = lib.duplicated(keys, take_last=take_last)
- return self._constructor(duplicated,
- index=self.index).__finalize__(self)
+ return super(Series, self).duplicated(take_last=take_last)
def idxmin(self, axis=None, out=None, skipna=True):
"""
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index b171b31528a55..8b0605dd391be 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -339,9 +339,13 @@ def test_value_counts_unique_nunique(self):
# freq must be specified because repeat makes freq ambiguous
expected_index = o[::-1]
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
- else:
+ elif isinstance(o, Index):
expected_index = values[::-1]
o = klass(np.repeat(values, range(1, len(o) + 1)))
+ else:
+ expected_index = values[::-1]
+ idx = np.repeat(o.index.values, range(1, len(o) + 1))
+ o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx)
expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
@@ -374,11 +378,16 @@ def test_value_counts_unique_nunique(self):
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
+ # freq must be specified because repeat makes freq ambiguous
expected_index = o
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
- else:
+ elif isinstance(o, Index):
expected_index = values
o = klass(np.repeat(values, range(1, len(o) + 1)))
+ else:
+ expected_index = values
+ idx = np.repeat(o.index.values, range(1, len(o) + 1))
+ o = klass(np.repeat(values, range(1, len(o) + 1)), index=idx)
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=expected_index[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=expected_index[9:1:-1], dtype='int64')
@@ -571,6 +580,63 @@ def test_factorize(self):
expected = o[5:].append(o[:5])
self.assertTrue(uniques.equals(expected))
+ def test_duplicated_drop_duplicates(self):
+ # GH 4060
+ for original in self.objs:
+
+ if isinstance(original, Index):
+ # original doesn't have duplicates
+ expected = Index([False] * len(original))
+ tm.assert_index_equal(original.duplicated(), expected)
+ result = original.drop_duplicates()
+ tm.assert_index_equal(result, original)
+ self.assertFalse(result is original)
+
+ # create repeated values, 3rd and 5th values are duplicated
+ idx = original[list(range(len(original))) + [5, 3]]
+ expected = Index([False] * len(original) + [True, True])
+ tm.assert_index_equal(idx.duplicated(), expected)
+ tm.assert_index_equal(idx.drop_duplicates(), original)
+
+ last_base = [False] * len(idx)
+ last_base[3] = True
+ last_base[5] = True
+ expected = Index(last_base)
+ tm.assert_index_equal(idx.duplicated(take_last=True), expected)
+ tm.assert_index_equal(idx.drop_duplicates(take_last=True),
+ idx[~np.array(last_base)])
+
+ with tm.assertRaisesRegexp(TypeError,
+ "drop_duplicates\(\) got an unexpected keyword argument"):
+ idx.drop_duplicates(inplace=True)
+
+ else:
+ expected = Series([False] * len(original), index=original.index)
+ tm.assert_series_equal(original.duplicated(), expected)
+ result = original.drop_duplicates()
+ tm.assert_series_equal(result, original)
+ self.assertFalse(result is original)
+
+ idx = original.index[list(range(len(original))) + [5, 3]]
+ values = original.values[list(range(len(original))) + [5, 3]]
+ s = Series(values, index=idx)
+
+ expected = Series([False] * len(original) + [True, True], index=idx)
+ tm.assert_series_equal(s.duplicated(), expected)
+ tm.assert_series_equal(s.drop_duplicates(), original)
+
+ last_base = [False] * len(idx)
+ last_base[3] = True
+ last_base[5] = True
+ expected = Series(last_base, index=idx)
+ expected
+ tm.assert_series_equal(s.duplicated(take_last=True), expected)
+ tm.assert_series_equal(s.drop_duplicates(take_last=True),
+ s[~np.array(last_base)])
+
+ s.drop_duplicates(inplace=True)
+ tm.assert_series_equal(s, original)
+
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index ed078ae5749de..a0c5d3ce5959a 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2031,6 +2031,20 @@ def test_duplicate_mi(self):
result = df.loc[('foo','bar')]
assert_frame_equal(result,expected)
+ def test_duplicated_drop_duplicates(self):
+ # GH 4060
+ idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2 ,3], [1, 1, 1, 1, 2, 2]))
+
+ expected = Index([False, False, False, True, False, False])
+ tm.assert_index_equal(idx.duplicated(), expected)
+ expected = MultiIndex.from_arrays(([1, 2, 3, 2 ,3], [1, 1, 1, 2, 2]))
+ tm.assert_index_equal(idx.drop_duplicates(), expected)
+
+ expected = Index([True, False, False, False, False, False])
+ tm.assert_index_equal(idx.duplicated(take_last=True), expected)
+ expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
+ tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
+
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
| Closes #4060.
```
idx = pd.Index([1, 2, 3, 4, 1, 2])
idx.duplicated()
# Index([False, False, False, False, True, True], dtype='bool')
idx.drop_duplicates()
# Int64Index([1, 2, 3, 4], dtype='int64')
idx.duplicated(take_last=True)
# Index([True, True, False, False, False, False], dtype='bool')
idx.drop_duplicates(take_last=True)
# Int64Index([3, 4, 1, 2], dtype='int64')
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/7979 | 2014-08-10T15:25:48Z | 2014-08-15T12:54:51Z | 2014-08-15T12:54:51Z | 2014-08-15T13:01:47Z |
PERF: perf improvements for Series.transform (revised) (GH6496) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index e6c442159336b..16033dd75204c 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -162,7 +162,7 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
didx
didx.tz_localize(None)
-- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
+- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
for localizing a specific level of a MultiIndex (:issue:`7846`)
.. _whatsnew_0150.refactoring:
@@ -302,6 +302,7 @@ Performance
- Performance improvements in ``DatetimeIndex.__iter__`` to allow faster iteration (:issue:`7683`)
- Performance improvements in ``Period`` creation (and ``PeriodIndex`` setitem) (:issue:`5155`)
+- Improvements in Series.transform for significant performance gains (revised) (:issue:`6496`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 484d1d413c6c6..f26a7269772a3 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2270,14 +2270,21 @@ def transform(self, func, *args, **kwargs):
-------
transformed : Series
"""
- dtype = self._selected_obj.dtype
+ # if string function
if isinstance(func, compat.string_types):
- wrapper = lambda x: getattr(x, func)(*args, **kwargs)
- else:
- wrapper = lambda x: func(x, *args, **kwargs)
+ return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
+
+ # do we have a cython function
+ cyfunc = _intercept_cython(func)
+ if cyfunc and not args and not kwargs:
+ return self._transform_fast(cyfunc)
+ # reg transform
+ dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
+
+ wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
@@ -2302,6 +2309,29 @@ def transform(self, func, *args, **kwargs):
index=self._selected_obj.index,
name=self._selected_obj.name)
+ def _transform_fast(self, func):
+ """
+ fast version of transform, only applicable to builtin/cythonizable functions
+ """
+ if isinstance(func, compat.string_types):
+ func = getattr(self,func)
+ values = func().values
+ counts = self.count().values
+ values = np.repeat(values, counts)
+
+ # the values/counts are repeated according to the group index
+ indices = self.indices
+
+ # shortcut of we have an already ordered grouper
+ if Index(self.grouper.group_info[0]).is_monotonic:
+ result = Series(values, index=self.obj.index)
+ else:
+ index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
+ result = Series(values, index=index).sort_index()
+ result.index = self.obj.index
+
+ return result
+
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c88ba836886bf..f621b0fb94eaf 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -795,6 +795,20 @@ def test_transform(self):
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
+ def test_transform_fast(self):
+
+ df = DataFrame( { 'id' : np.arange( 100000 ) / 3,
+ 'val': np.random.randn( 100000) } )
+
+ grp=df.groupby('id')['val']
+
+ expected = pd.Series(np.repeat(grp.mean().values, grp.count().values),index=df.index)
+ result = grp.transform(np.mean)
+ assert_series_equal(result,expected)
+
+ result = grp.transform('mean')
+ assert_series_equal(result,expected)
+
def test_transform_broadcast(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
@@ -858,12 +872,14 @@ def test_transform_select_columns(self):
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(self):
+
+ # this also tests orderings in transform between
+ # series/frame to make sure its consistent
expected = {}
grouped = self.df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
-
result = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py
index 9f520df122c2f..788f228c81edd 100644
--- a/vb_suite/groupby.py
+++ b/vb_suite/groupby.py
@@ -444,5 +444,13 @@ def f(g):
df = DataFrame({ 'signal' : np.random.rand(N)})
"""
-
groupby_transform_series = Benchmark("df['signal'].groupby(g).transform(np.mean)", setup)
+
+setup = common_setup + """
+np.random.seed(0)
+
+df=DataFrame( { 'id' : np.arange( 100000 ) / 3,
+ 'val': np.random.randn( 100000) } )
+"""
+
+groupby_transform_series2 = Benchmark("df.groupby('id')['val'].transform(np.mean)", setup)
| xref #6496
Additional perf improvements for Series.transform (when specifying cythonizable functions)
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
groupby_transform_series2 | 98.7293 | 3621.1437 | 0.0273 |
groupby_transform2 | 19.5080 | 168.8046 | 0.1156 |
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/7975 | 2014-08-10T01:24:50Z | 2014-08-10T01:40:24Z | 2014-08-10T01:40:24Z | 2014-08-10T01:40:24Z |
BUG: Allow __name__less callables as groupby hows (GH7929) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 16033dd75204c..5824e5824e8b5 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -389,9 +389,8 @@ Bug Fixes
- Bug in ``GroupBy.transform()`` where int groups with a transform that
didn't preserve the index were incorrectly truncated (:issue:`7972`).
-
-
-
+- Bug in ``groupby`` where callable objects without name attributes would take the wrong path,
+ and produce a ``DataFrame`` instead of a ``Series`` (:issue:`7929`)
- Bug in ``read_html`` where the ``infer_types`` argument forced coercion of
diff --git a/pandas/core/common.py b/pandas/core/common.py
index d8314977742a4..bc4c95ed3323e 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -9,6 +9,7 @@
import csv
import types
from datetime import datetime, timedelta
+from functools import partial
from numpy.lib.format import read_array, write_array
import numpy as np
@@ -2432,7 +2433,22 @@ def _is_sequence(x):
except (TypeError, AttributeError):
return False
-
+def _get_callable_name(obj):
+ # typical case has name
+ if hasattr(obj, '__name__'):
+ return getattr(obj, '__name__')
+ # some objects don't; could recurse
+ if isinstance(obj, partial):
+ return _get_callable_name(obj.func)
+ # fall back to class name
+ if hasattr(obj, '__call__'):
+ return obj.__class__.__name__
+ # everything failed (probably because the argument
+ # wasn't actually callable); we return None
+ # instead of the empty string in this case to allow
+ # distinguishing between no name and a name of ''
+ return None
+
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index f26a7269772a3..1f89bfe4cec9a 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1221,7 +1221,8 @@ def apply(self, f, data, axis=0):
group_keys = self._get_group_keys()
# oh boy
- if (f.__name__ not in _plotting_methods and
+ f_name = com._get_callable_name(f)
+ if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
@@ -2185,11 +2186,11 @@ def _aggregate_multiple_funcs(self, arg):
if isinstance(f, compat.string_types):
columns.append(f)
else:
- columns.append(f.__name__)
+ # protect against callables without names
+ columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
-
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index a52be0ee6a82e..5e91adbe1a2fa 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -38,6 +38,26 @@ def __getitem__(self):
assert(not is_seq(A()))
+def test_get_callable_name():
+ from functools import partial
+ getname = com._get_callable_name
+
+ def fn(x):
+ return x
+ lambda_ = lambda x: x
+ part1 = partial(fn)
+ part2 = partial(part1)
+ class somecall(object):
+ def __call__(self):
+ return x
+
+ assert getname(fn) == 'fn'
+ assert getname(lambda_)
+ assert getname(part1) == 'fn'
+ assert getname(part2) == 'fn'
+ assert getname(somecall()) == 'somecall'
+ assert getname(1) is None
+
def test_notnull():
assert notnull(1.)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f621b0fb94eaf..3a744129f0685 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -25,6 +25,7 @@
from pandas.core.panel import Panel
from pandas.tools.merge import concat
from collections import defaultdict
+from functools import partial
import pandas.core.common as com
import numpy as np
@@ -2910,6 +2911,24 @@ def test_multi_function_flexible_mix(self):
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
+ def test_agg_callables(self):
+ # GH 7929
+ df = DataFrame({'foo' : [1,2], 'bar' :[3,4]}).astype(np.int64)
+
+ class fn_class(object):
+ def __call__(self, x):
+ return sum(x)
+
+ equiv_callables = [sum, np.sum,
+ lambda x: sum(x),
+ lambda x: x.sum(),
+ partial(sum), fn_class()]
+
+ expected = df.groupby("foo").agg(sum)
+ for ecall in equiv_callables:
+ result = df.groupby('foo').agg(ecall)
+ assert_frame_equal(result, expected)
+
def test_set_group_name(self):
def f(group):
assert group.name is not None
@@ -4530,6 +4549,8 @@ def test_transform_doesnt_clobber_ints(self):
tm.assert_frame_equal(result, expected)
+
+
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index ff8b6945a23be..f4a96f5defab0 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1,6 +1,7 @@
# pylint: disable=E1101
from datetime import datetime, timedelta
+from functools import partial
from pandas.compat import range, lrange, zip, product
import numpy as np
@@ -140,6 +141,30 @@ def _ohlc(group):
exc.args += ('how=%s' % arg,)
raise
+ def test_resample_how_callables(self):
+ # GH 7929
+ data = np.arange(5, dtype=np.int64)
+ ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
+ df = pd.DataFrame({"A": data, "B": data}, index=ind)
+
+ def fn(x, a=1):
+ return str(type(x))
+
+ class fn_class:
+ def __call__(self, x):
+ return str(type(x))
+
+ df_standard = df.resample("M", how=fn)
+ df_lambda = df.resample("M", how=lambda x: str(type(x)))
+ df_partial = df.resample("M", how=partial(fn))
+ df_partial2 = df.resample("M", how=partial(fn, a=2))
+ df_class = df.resample("M", how=fn_class())
+
+ assert_frame_equal(df_standard, df_lambda)
+ assert_frame_equal(df_standard, df_partial)
+ assert_frame_equal(df_standard, df_partial2)
+ assert_frame_equal(df_standard, df_class)
+
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
@@ -765,6 +790,7 @@ def test_resample_timegrouper(self):
assert_frame_equal(result, expected)
+
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
| Allow `functools.partial` objects (and similar callables without `__name__` attributes) to be used as `groupby` functions, which also adds support along the tree (e.g. `resample`, which is specifically tested.)
Closes #7929.
This does not address the potential enhancements allowing automatic naming of duplicate-named functions (relatively minor) and the use of a Series to specify names (more useful).
| https://api.github.com/repos/pandas-dev/pandas/pulls/7974 | 2014-08-10T01:13:29Z | 2014-08-10T03:47:08Z | 2014-08-10T03:47:08Z | 2014-08-10T03:47:49Z |
BUG: fix transform with integers | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index a9266c24df8ee..e6c442159336b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -385,7 +385,8 @@ Bug Fixes
-
+- Bug in ``GroupBy.transform()`` where int groups with a transform that
+ didn't preserve the index were incorrectly truncated (:issue:`7972`).
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 212e5086ee543..484d1d413c6c6 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2858,7 +2858,9 @@ def transform(self, func, *args, **kwargs):
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
- if not isinstance(obj.index,MultiIndex) and type(result.index) != type(obj.index):
+ if ((not isinstance(obj.index,MultiIndex) and
+ type(result.index) != type(obj.index)) or
+ len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
@@ -2868,7 +2870,7 @@ def transform(self, func, *args, **kwargs):
# we can merge the result in
# GH 7383
names = result.columns
- result = obj.merge(result, how='outer', left_index=True, right_index=True).ix[:,-result.shape[1]:]
+ result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 8e9503b4fe1a3..c88ba836886bf 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3103,7 +3103,6 @@ def test_groupby_categorical_no_compress(self):
exp = np.array([1,2,4,np.nan])
self.assert_numpy_array_equivalent(result, exp)
-
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
@@ -4500,6 +4499,20 @@ def test_nsmallest(self):
[0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
+ def test_transform_doesnt_clobber_ints(self):
+ # GH 7972
+ n = 6
+ x = np.arange(n)
+ df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
+ df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
+
+ gb = df.groupby('a')
+ result = gb.transform('mean')
+
+ gb2 = df2.groupby('a')
+ expected = gb2.transform('mean')
+ tm.assert_frame_equal(result, expected)
+
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
| closes #7972
| https://api.github.com/repos/pandas-dev/pandas/pulls/7973 | 2014-08-10T00:03:55Z | 2014-08-10T00:27:49Z | 2014-08-10T00:27:49Z | 2014-08-10T00:27:51Z |
CI/WIP: Use conda for most deps | diff --git a/.travis.yml b/.travis.yml
index d13509805e0f8..a4d323ef8ba95 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,6 +2,7 @@
language: python
env:
+
global:
# scatterci API key
#- secure: "Bx5umgo6WjuGY+5XFa004xjCiX/vq0CyMZ/ETzcs7EIBI1BE/0fIDXOoWhoxbY9HPfdPGlDnDgB9nGqr5wArO2s+BavyKBWg6osZ3dmkfuJPMOWeyCa92EeP+sfKw8e5HSU5MizW9e319wHWOF/xkzdHR7T67Qd5erhv91x4DnQ="
@@ -19,6 +20,7 @@ matrix:
- NOSE_ARGS="not slow and not network and not disabled"
- CLIPBOARD=xclip
- LOCALE_OVERRIDE="it_IT.UTF-8"
+ - BUILD_TYPE=conda
- JOB_NAME: "26_nslow_nnet"
- python: 2.7
env:
@@ -26,12 +28,14 @@ matrix:
- LOCALE_OVERRIDE="zh_CN.GB18030"
- FULL_DEPS=true
- JOB_TAG=_LOCALE
+ - BUILD_TYPE=conda
- JOB_NAME: "27_slow_nnet_LOCALE"
- python: 2.7
env:
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
- CLIPBOARD_GUI=gtk2
+ - BUILD_TYPE=conda
- JOB_NAME: "27_nslow"
- DOC_BUILD=true # if rst files were changed, build docs in parallel with tests
- python: 3.3
@@ -39,18 +43,21 @@ matrix:
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
- CLIPBOARD=xsel
+ - BUILD_TYPE=conda
- JOB_NAME: "33_nslow"
- python: 3.4
env:
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
- CLIPBOARD=xsel
+ - BUILD_TYPE=conda
- JOB_NAME: "34_nslow"
- python: 3.2
env:
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
- CLIPBOARD_GUI=qt4
+ - BUILD_TYPE=pydata
- JOB_NAME: "32_nslow"
- python: 2.7
env:
@@ -59,6 +66,7 @@ matrix:
- JOB_NAME: "27_numpy_master"
- JOB_TAG=_NUMPY_DEV_master
- NUMPY_BUILD=master
+ - BUILD_TYPE=pydata
- PANDAS_TESTING_MODE="deprecate"
allow_failures:
- python: 3.2
@@ -66,6 +74,7 @@ matrix:
- NOSE_ARGS="not slow and not disabled"
- FULL_DEPS=true
- CLIPBOARD_GUI=qt4
+ - BUILD_TYPE=pydata
- JOB_NAME: "32_nslow"
- python: 2.7
env:
@@ -74,11 +83,14 @@ matrix:
- JOB_NAME: "27_numpy_master"
- JOB_TAG=_NUMPY_DEV_master
- NUMPY_BUILD=master
+ - BUILD_TYPE=pydata
- PANDAS_TESTING_MODE="deprecate"
before_install:
- echo "before_install"
- echo $VIRTUAL_ENV
+ - export PATH="$HOME/miniconda/bin:$PATH"
+ - sudo apt-get install ccache
- df -h
- date
- pwd
@@ -92,7 +104,7 @@ before_install:
install:
- echo "install"
- ci/prep_ccache.sh
- - ci/install.sh
+ - ci/install_${BUILD_TYPE}.sh
- ci/submit_ccache.sh
before_script:
@@ -106,6 +118,6 @@ script:
after_script:
- if [ -f /tmp/doc.log ]; then cat /tmp/doc.log; fi
- - ci/print_versions.py
+ - source activate pandas && ci/print_versions.py
- ci/print_skipped.py /tmp/nosetests.xml
- ci/after_script.sh
diff --git a/ci/install_conda.sh b/ci/install_conda.sh
new file mode 100755
index 0000000000000..ec0aa5fef84ae
--- /dev/null
+++ b/ci/install_conda.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+# There are 2 distinct pieces that get zipped and cached
+# - The venv site-packages dir including the installed dependencies
+# - The pandas build artifacts, using the build cache support via
+# scripts/use_build_cache.py
+#
+# if the user opted in to use the cache and we're on a whitelisted fork
+# - if the server doesn't hold a cached version of venv/pandas build,
+# do things the slow way, and put the results on the cache server
+# for the next time.
+# - if the cache files are available, instal some necessaries via apt
+# (no compiling needed), then directly goto script and collect 200$.
+#
+
+function edit_init()
+{
+ if [ -n "$LOCALE_OVERRIDE" ]; then
+ echo "Adding locale to the first line of pandas/__init__.py"
+ rm -f pandas/__init__.pyc
+ sedc="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LOCALE_OVERRIDE')\n"
+ sed -i "$sedc" pandas/__init__.py
+ echo "head -4 pandas/__init__.py"
+ head -4 pandas/__init__.py
+ echo
+ fi
+}
+
+edit_init
+
+python_major_version="${TRAVIS_PYTHON_VERSION:0:1}"
+[ "$python_major_version" == "2" ] && python_major_version=""
+
+home_dir=$(pwd)
+echo "home_dir: [$home_dir]"
+
+if [ -n "$LOCALE_OVERRIDE" ]; then
+ # make sure the locale is available
+ # probably useless, since you would need to relogin
+ time sudo locale-gen "$LOCALE_OVERRIDE"
+fi
+
+# Need to enable for locale testing. The location of the locale file(s) is
+# distro specific. For example, on Arch Linux all of the locales are in a
+# commented file--/etc/locale.gen--that must be commented in to be used
+# whereas Ubuntu looks in /var/lib/locales/supported.d/* and generates locales
+# based on what's in the files in that folder
+time echo 'it_CH.UTF-8 UTF-8' | sudo tee -a /var/lib/locales/supported.d/it
+time sudo locale-gen
+
+
+# install gui for clipboard testing
+if [ -n "$CLIPBOARD_GUI" ]; then
+ echo "Using CLIPBOARD_GUI: $CLIPBOARD_GUI"
+ [ -n "$python_major_version" ] && py="py"
+ python_cb_gui_pkg=python${python_major_version}-${py}${CLIPBOARD_GUI}
+ time sudo apt-get $APT_ARGS install $python_cb_gui_pkg
+fi
+
+
+# install a clipboard if $CLIPBOARD is not empty
+if [ -n "$CLIPBOARD" ]; then
+ echo "Using clipboard: $CLIPBOARD"
+ time sudo apt-get $APT_ARGS install $CLIPBOARD
+fi
+
+python_major_version="${TRAVIS_PYTHON_VERSION:0:1}"
+[ "$python_major_version" == "2" ] && python_major_version=""
+
+wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh || exit 1
+bash miniconda.sh -b -p $HOME/miniconda || exit 1
+
+conda config --set always_yes yes --set changeps1 no || exit 1
+conda update -q conda || exit 1
+conda config --add channels http://conda.binstar.org/pandas || exit 1
+
+# Useful for debugging any issues with conda
+conda info -a || exit 1
+
+conda create -n pandas python=$TRAVIS_PYTHON_VERSION || exit 1
+conda install -n pandas --file=ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.txt || exit 1
+
+conda install -n pandas pip setuptools nose || exit 1
+conda remove -n pandas pandas
+
+source activate pandas
+
+# set the compiler cache to work
+if [ "$IRON_TOKEN" ]; then
+ export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH
+ gcc=$(which gcc)
+ echo "gcc: $gcc"
+ ccache=$(which ccache)
+ echo "ccache: $ccache"
+ export CC='ccache gcc'
+fi
+
+python setup.py build_ext --inplace && python setup.py develop
+
+for package in beautifulsoup4 'python-dateutil'; do
+ pip uninstall --yes $package
+done
+
+true
diff --git a/ci/install.sh b/ci/install_pydata.sh
similarity index 97%
rename from ci/install.sh
rename to ci/install_pydata.sh
index f146f3ba7ee82..33a6d3854da22 100755
--- a/ci/install.sh
+++ b/ci/install_pydata.sh
@@ -137,11 +137,8 @@ if [ "$IRON_TOKEN" ]; then
fi
# build pandas
-time python setup.py sdist
-pip uninstall cython -y
-
-# install pandas
-time pip install $(find dist | grep gz | head -n 1)
+python setup.py build_ext --inplace
+python setup.py develop
# restore cython (if not numpy building)
if [ -z "$NUMPY_BUILD" ]; then
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt
index 3a845f4ee0540..9b338cee26801 100644
--- a/ci/requirements-2.6.txt
+++ b/ci/requirements-2.6.txt
@@ -1,16 +1,16 @@
-numpy==1.7.0
-cython==0.19.1
-python-dateutil==1.5
-pytz==2013b
-http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2.0.tar.gz
-html5lib==1.0b2
-numexpr==1.4.2
-sqlalchemy==0.7.10
-pymysql==0.6.0
-psycopg2==2.5
-scipy==0.11.0
-statsmodels==0.4.3
-xlwt==0.7.5
-openpyxl==2.0.3
-xlsxwriter==0.4.6
-xlrd==0.9.2
+numpy=1.7.0
+cython=0.19.1
+dateutil=1.5
+pytz=2013b
+scipy=0.11.0
+xlwt=0.7.5
+xlrd=0.9.2
+openpyxl=2.0.3
+statsmodels=0.4.3
+html5lib=1.0b2
+beautiful-soup=4.2.0
+psycopg2=2.5.1
+numexpr=1.4.2
+pymysql=0.6.0
+sqlalchemy=0.7.8
+xlsxwriter=0.4.6
diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt
index 5b77428a0f6d7..f3df26e7a0c24 100644
--- a/ci/requirements-2.7.txt
+++ b/ci/requirements-2.7.txt
@@ -1,25 +1,25 @@
-python-dateutil==2.1
-pytz==2013b
-xlwt==0.7.5
-numpy==1.8.1
-cython==0.19.1
-bottleneck==0.6.0
-numexpr==2.2.2
-tables==3.0.0
-matplotlib==1.3.1
-openpyxl==1.6.2
-xlsxwriter==0.4.6
-xlrd==0.9.2
-patsy==0.1.0
-sqlalchemy==0.9.6
-pymysql==0.6.1
-psycopg2==2.5.2
-html5lib==1.0b2
-lxml==3.2.1
-scipy==0.13.3
-beautifulsoup4==4.2.1
-statsmodels==0.5.0
-boto==2.26.1
-httplib2==0.8
-python-gflags==2.0
-google-api-python-client==1.2
+dateutil=2.1
+pytz=2013b
+xlwt=0.7.5
+numpy=1.7.0
+cython=0.19.1
+numexpr=2.2.2
+pytables=3.0.0
+matplotlib=1.3.1
+openpyxl=1.6.2
+xlrd=0.9.2
+sqlalchemy=0.9.6
+lxml=3.2.1
+scipy
+xlsxwriter=0.4.6
+statsmodels
+boto=2.26.1
+bottleneck=0.8.0
+psycopg2=2.5.2
+patsy
+pymysql=0.6.1
+html5lib=1.0b2
+beautiful-soup=4.2.1
+httplib2=0.8
+python-gflags=2.0
+google-api-python-client=1.2
diff --git a/ci/requirements-2.7_LOCALE.txt b/ci/requirements-2.7_LOCALE.txt
index 9af33fe96d58c..036e597e5b788 100644
--- a/ci/requirements-2.7_LOCALE.txt
+++ b/ci/requirements-2.7_LOCALE.txt
@@ -1,18 +1,18 @@
-python-dateutil
-pytz==2013b
-xlwt==0.7.5
-openpyxl==1.6.2
-xlsxwriter==0.4.6
-xlrd==0.9.2
-numpy==1.7.1
-cython==0.19.1
-bottleneck==0.6.0
-matplotlib==1.3.0
-patsy==0.1.0
-sqlalchemy==0.8.1
-html5lib==1.0b2
-lxml==3.2.1
-scipy==0.10.0
-beautifulsoup4==4.2.1
-statsmodels==0.4.3
-bigquery==2.0.17
+dateutil
+pytz=2013b
+xlwt=0.7.5
+openpyxl=1.6.2
+xlsxwriter=0.4.6
+xlrd=0.9.2
+numpy=1.7.1
+cython=0.19.1
+bottleneck=0.8.0
+matplotlib=1.3.0
+patsy=0.1.0
+sqlalchemy=0.8.1
+html5lib=1.0b2
+lxml=3.2.1
+scipy=0.11.0
+beautiful-soup=4.2.1
+statsmodels=0.4.3
+bigquery=2.0.17
diff --git a/ci/requirements-3.3.txt b/ci/requirements-3.3.txt
index fc8cb04387a55..c9beec81236fb 100644
--- a/ci/requirements-3.3.txt
+++ b/ci/requirements-3.3.txt
@@ -1,17 +1,17 @@
-python-dateutil==2.2
-pytz==2013b
-openpyxl==1.6.2
-xlsxwriter==0.4.6
-xlrd==0.9.2
-html5lib==1.0b2
-numpy==1.8.0
-cython==0.19.1
-numexpr==2.3
-tables==3.1.0
-bottleneck==0.8.0
-matplotlib==1.2.1
-patsy==0.1.0
-lxml==3.2.1
-scipy==0.13.3
-beautifulsoup4==4.2.1
-statsmodels==0.5.0
+dateutil
+pytz=2013b
+openpyxl=1.6.2
+xlsxwriter=0.4.6
+xlrd=0.9.2
+html5lib=1.0b2
+numpy=1.8.0
+cython=0.19.1
+numexpr
+pytables
+bottleneck=0.8.0
+matplotlib
+patsy
+lxml=3.2.1
+scipy
+beautiful-soup=4.2.1
+statsmodels
diff --git a/ci/requirements-3.4.txt b/ci/requirements-3.4.txt
index 0747e6f54cd73..33d3b3b4dc459 100644
--- a/ci/requirements-3.4.txt
+++ b/ci/requirements-3.4.txt
@@ -1,19 +1,19 @@
-python-dateutil
+dateutil
pytz
openpyxl
xlsxwriter
xlrd
html5lib
-numpy==1.8.0
-cython==0.20.2
-scipy==0.13.3
-numexpr==2.4
-tables==3.1.0
-bottleneck==0.8.0
-matplotlib==1.3.1
patsy
-lxml==3.3.5
-sqlalchemy==0.9.6
-pymysql==0.6.1
-psycopg2==2.5.2
-beautifulsoup4
+beautiful-soup
+numpy
+cython
+scipy
+numexpr
+pytables
+matplotlib
+lxml
+sqlalchemy
+bottleneck
+pymysql
+psycopg2
diff --git a/ci/script.sh b/ci/script.sh
index 152a2f1ebdcf9..b1ba7ba79c816 100755
--- a/ci/script.sh
+++ b/ci/script.sh
@@ -2,22 +2,23 @@
echo "inside $0"
+source activate pandas
+
if [ -n "$LOCALE_OVERRIDE" ]; then
export LC_ALL="$LOCALE_OVERRIDE";
echo "Setting LC_ALL to $LOCALE_OVERRIDE"
- curdir="$(pwd)"
- cd /tmp
+
pycmd='import pandas; print("pandas detected console encoding: %s" % pandas.get_option("display.encoding"))'
python -c "$pycmd"
- cd "$curdir"
fi
# conditionally build and upload docs to GH/pandas-docs/pandas-docs/travis
"$TRAVIS_BUILD_DIR"/ci/build_docs.sh 2>&1 > /tmp/doc.log &
# doc build log will be shown after tests
-echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
-nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
+
+echo nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
+nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
RET="$?"
| closes #6870
- [x] use conda.binstar.org/cpcloud channel for versions/packages not in conda defaults
- [x] possibly bring back the ccache, not totally sure what this is doing
this doesn't remove maintenance, but it sure does simplify things, since conda recipes are very easy to create
| https://api.github.com/repos/pandas-dev/pandas/pulls/7971 | 2014-08-09T18:35:35Z | 2014-10-02T17:02:28Z | 2014-10-02T17:02:28Z | 2014-10-02T17:03:41Z |
ENH Warn about panel.to_frame() discarding NaN GH7879 | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 1e6ed56386f63..2a8c3844f8f75 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -8,6 +8,7 @@
from pandas import compat
import sys
import numpy as np
+import warnings
from pandas.core.common import (PandasError, _try_sort, _default_index,
_infer_dtype_from_scalar, notnull)
from pandas.core.categorical import Categorical
@@ -846,6 +847,7 @@ def to_frame(self, filter_observations=True):
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
+ Default will be changed to False in future versions
Returns
-------
@@ -858,6 +860,13 @@ def to_frame(self, filter_observations=True):
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
+ if not selector.all():
+ warnings.warn("Panel to_frame method discards entries with "
+ "NaN/None in the data by default, use "
+ "filter_observations = False to save them. "
+ "This will be default behaviour "
+ "in future versions.",
+ FutureWarning)
else:
# size = N * K
selector = slice(None, None)
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index fb1f1c1693fdd..07bad6fe19447 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1496,7 +1496,11 @@ def test_to_frame_multi_major(self):
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
- result = wp.to_frame()
+
+ with tm.assert_produces_warning(FutureWarning):
+ setattr(panelm, '__warningregistry__', {})
+ result = wp.to_frame()
+
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'),
| Also there was unimported module warnings with calls to it. In pandas/core/panel.py lines 718, 748 for example.
closes #7879
| https://api.github.com/repos/pandas-dev/pandas/pulls/7970 | 2014-08-09T12:35:21Z | 2015-08-15T23:57:54Z | null | 2015-08-15T23:57:54Z |
ENH/BUG: Period and PeriodIndex ops supports timedelta-like | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 7a912361d0e14..c69cd12673463 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -4,7 +4,7 @@
.. ipython:: python
:suppress:
- from datetime import datetime
+ from datetime import datetime, timedelta
import numpy as np
np.random.seed(123456)
from pandas import *
@@ -1098,6 +1098,36 @@ frequency.
p - 3
+If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have same freq. Otherise, ``ValueError`` will be raised.
+
+.. ipython:: python
+
+ p = Period('2014-07-01 09:00', freq='H')
+ p + Hour(2)
+ p + timedelta(minutes=120)
+ p + np.timedelta64(7200, 's')
+
+.. code-block:: python
+
+ In [1]: p + Minute(5)
+ Traceback
+ ...
+ ValueError: Input has different freq from Period(freq=H)
+
+If ``Period`` has other freqs, only the same ``offsets`` can be added. Otherwise, ``ValueError`` will be raised.
+
+.. ipython:: python
+
+ p = Period('2014-07', freq='M')
+ p + MonthEnd(3)
+
+.. code-block:: python
+
+ In [1]: p + MonthBegin(3)
+ Traceback
+ ...
+ ValueError: Input has different freq from Period(freq=M)
+
Taking the difference of ``Period`` instances with the same frequency will
return the number of frequency units between them:
@@ -1129,6 +1159,18 @@ objects:
ps = Series(randn(len(prng)), prng)
ps
+``PeriodIndex`` supports addition and subtraction as the same rule as ``Period``.
+
+.. ipython:: python
+
+ idx = period_range('2014-07-01 09:00', periods=5, freq='H')
+ idx
+ idx + Hour(2)
+
+ idx = period_range('2014-07', periods=5, freq='M')
+ idx
+ idx + MonthEnd(3)
+
PeriodIndex Partial String Indexing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index a9266c24df8ee..61a012ab5b503 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -271,10 +271,21 @@ Enhancements
+- ``Period`` and ``PeriodIndex`` supports addition/subtraction with ``timedelta``-likes (:issue:`7966`)
+ If ``Period`` freq is ``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``, ``timedelta``-like can be added if the result can have same freq. Otherwise, only the same ``offsets`` can be added.
+ .. ipython:: python
+ idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H')
+ idx
+ idx + pd.offsets.Hour(2)
+ idx + timedelta(minutes=120)
+ idx + np.timedelta64(7200, 's')
+ idx = pd.period_range('2014-07', periods=5, freq='M')
+ idx
+ idx + pd.offsets.MonthEnd(3)
@@ -414,6 +425,10 @@ Bug Fixes
+- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`)
+
+
+
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 356984ea88f43..211bd03262fac 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -915,6 +915,8 @@ def test_resolution(self):
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
+ tm._skip_if_not_numpy17_friendly()
+
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
@@ -968,11 +970,64 @@ def test_add_iadd(self):
tm.assert_index_equal(rng, expected)
# offset
- for delta in [pd.offsets.Hour(2), timedelta(hours=2)]:
- rng = pd.period_range('2000-01-01', '2000-02-01')
- with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
+ # DateOffset
+ rng = pd.period_range('2014', '2024', freq='A')
+ result = rng + pd.offsets.YearEnd(5)
+ expected = pd.period_range('2019', '2029', freq='A')
+ tm.assert_index_equal(result, expected)
+ rng += pd.offsets.YearEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ result = rng + pd.offsets.MonthEnd(5)
+ expected = pd.period_range('2014-06', '2017-05', freq='M')
+ tm.assert_index_equal(result, expected)
+ rng += pd.offsets.MonthEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ # Tick
+ offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
+ pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
+ for delta in offsets:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ result = rng + delta
+ expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng + o
+
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
+ for delta in offsets:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ result = rng + delta
+ expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
+ tm.assert_index_equal(result, expected)
+ rng += delta
+ tm.assert_index_equal(rng, expected)
+
+ for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
- with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
@@ -984,6 +1039,8 @@ def test_add_iadd(self):
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
+ tm._skip_if_not_numpy17_friendly()
+
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
@@ -1027,10 +1084,65 @@ def test_sub_isub(self):
tm.assert_index_equal(rng, expected)
# offset
- for delta in [pd.offsets.Hour(2), timedelta(hours=2)]:
- with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
+ # DateOffset
+ rng = pd.period_range('2014', '2024', freq='A')
+ result = rng - pd.offsets.YearEnd(5)
+ expected = pd.period_range('2009', '2019', freq='A')
+ tm.assert_index_equal(result, expected)
+ rng -= pd.offsets.YearEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ rng = pd.period_range('2014', '2024', freq='A')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ result = rng - pd.offsets.MonthEnd(5)
+ expected = pd.period_range('2013-08', '2016-07', freq='M')
+ tm.assert_index_equal(result, expected)
+ rng -= pd.offsets.MonthEnd(5)
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ rng = pd.period_range('2014-01', '2016-12', freq='M')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ # Tick
+ offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
+ pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
+ for delta in offsets:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ result = rng - delta
+ expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ rng - o
+
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
+ pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
+ for delta in offsets:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ result = rng - delta
+ expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
+ tm.assert_index_equal(result, expected)
+ rng -= delta
+ tm.assert_index_equal(rng, expected)
+
+ for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
+ rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
- with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index e80fdf28c4089..f12badc080f12 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1,7 +1,7 @@
# pylint: disable=E1101,E1103,W0232
import operator
-from datetime import datetime, date
+from datetime import datetime, date, timedelta
import numpy as np
from pandas.core.base import PandasObject
@@ -10,6 +10,7 @@
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.core.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
+import pandas.tseries.offsets as offsets
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
@@ -169,8 +170,37 @@ def __ne__(self, other):
def __hash__(self):
return hash((self.ordinal, self.freq))
+ def _add_delta(self, other):
+ if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
+ offset = frequencies.to_offset(self.freq)
+ if isinstance(offset, offsets.Tick):
+ nanos = tslib._delta_to_nanoseconds(other)
+ offset_nanos = tslib._delta_to_nanoseconds(offset)
+
+ if nanos % offset_nanos == 0:
+ if self.ordinal == tslib.iNaT:
+ ordinal = self.ordinal
+ else:
+ ordinal = self.ordinal + (nanos // offset_nanos)
+ return Period(ordinal=ordinal, freq=self.freq)
+ elif isinstance(other, offsets.DateOffset):
+ freqstr = frequencies.get_standard_freq(other)
+ base = frequencies.get_base_alias(freqstr)
+
+ if base == self.freq:
+ if self.ordinal == tslib.iNaT:
+ ordinal = self.ordinal
+ else:
+ ordinal = self.ordinal + other.n
+ return Period(ordinal=ordinal, freq=self.freq)
+
+ raise ValueError("Input has different freq from Period(freq={0})".format(self.freq))
+
def __add__(self, other):
- if com.is_integer(other):
+ if isinstance(other, (timedelta, np.timedelta64,
+ offsets.Tick, offsets.DateOffset)):
+ return self._add_delta(other)
+ elif com.is_integer(other):
if self.ordinal == tslib.iNaT:
ordinal = self.ordinal
else:
@@ -180,13 +210,17 @@ def __add__(self, other):
return NotImplemented
def __sub__(self, other):
- if com.is_integer(other):
+ if isinstance(other, (timedelta, np.timedelta64,
+ offsets.Tick, offsets.DateOffset)):
+ neg_other = -other
+ return self + neg_other
+ elif com.is_integer(other):
if self.ordinal == tslib.iNaT:
ordinal = self.ordinal
else:
ordinal = self.ordinal - other
return Period(ordinal=ordinal, freq=self.freq)
- if isinstance(other, Period):
+ elif isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
@@ -862,6 +896,22 @@ def to_timestamp(self, freq=None, how='start'):
new_data = tslib.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
+ def _add_delta(self, other):
+ if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
+ offset = frequencies.to_offset(self.freq)
+ if isinstance(offset, offsets.Tick):
+ nanos = tslib._delta_to_nanoseconds(other)
+ offset_nanos = tslib._delta_to_nanoseconds(offset)
+ if nanos % offset_nanos == 0:
+ return self.shift(nanos // offset_nanos)
+ elif isinstance(other, offsets.DateOffset):
+ freqstr = frequencies.get_standard_freq(other)
+ base = frequencies.get_base_alias(freqstr)
+
+ if base == self.freq:
+ return self.shift(other.n)
+ raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
+
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index b7abedbafa7b0..3fae251b433e6 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -16,6 +16,7 @@
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
+import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
@@ -26,7 +27,7 @@
from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
- assertRaisesRegexp)
+ assertRaisesRegexp, _skip_if_not_numpy17_friendly)
import pandas.util.testing as tm
from pandas import compat
from numpy.testing import assert_array_equal
@@ -2484,6 +2485,190 @@ def test_add(self):
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
+ def test_add_offset(self):
+ _skip_if_not_numpy17_friendly()
+
+ # freq is DateOffset
+ p = Period('2011', freq='A')
+ self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ p = Period('2011-03', freq='M')
+ self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq='M'))
+ self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq='M'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ # freq is Tick
+ p = Period('2011-04-01', freq='D')
+ self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq='D'))
+ self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq='D'))
+ self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq='D'))
+ self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq='D'))
+ self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq='D'))
+ self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq='D'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ p = Period('2011-04-01 09:00', freq='H')
+ self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq='H'))
+ self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq='H'))
+ self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq='H'))
+ self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq='H'))
+ self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq='H'))
+ self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq='H'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ def test_add_offset_nat(self):
+ _skip_if_not_numpy17_friendly()
+
+ # freq is DateOffset
+ p = Period('NaT', freq='A')
+ for o in [offsets.YearEnd(2)]:
+ self.assertEqual((p + o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaises(ValueError):
+ p + o
+
+ p = Period('NaT', freq='M')
+ for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
+ self.assertEqual((p + o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ # freq is Tick
+ p = Period('NaT', freq='D')
+ for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
+ np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
+ self.assertEqual((p + o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ p = Period('NaT', freq='H')
+ for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
+ np.timedelta64(3600, 's'), timedelta(minutes=120),
+ timedelta(days=4, minutes=180)]:
+ self.assertEqual((p + o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
+ p + o
+
+ def test_sub_offset(self):
+ _skip_if_not_numpy17_friendly()
+
+ # freq is DateOffset
+ p = Period('2011', freq='A')
+ self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ p = Period('2011-03', freq='M')
+ self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq='M'))
+ self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq='M'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ # freq is Tick
+ p = Period('2011-04-01', freq='D')
+ self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq='D'))
+ self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq='D'))
+ self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq='D'))
+ self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq='D'))
+ self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq='D'))
+ self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq='D'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ p = Period('2011-04-01 09:00', freq='H')
+ self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq='H'))
+ self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq='H'))
+ self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq='H'))
+ self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq='H'))
+ self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq='H'))
+ self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq='H'))
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ def test_sub_offset_nat(self):
+ _skip_if_not_numpy17_friendly()
+
+ # freq is DateOffset
+ p = Period('NaT', freq='A')
+ for o in [offsets.YearEnd(2)]:
+ self.assertEqual((p - o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ p = Period('NaT', freq='M')
+ for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
+ self.assertEqual((p - o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(365, 'D'), timedelta(365)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ # freq is Tick
+ p = Period('NaT', freq='D')
+ for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
+ np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
+ self.assertEqual((p - o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(4, 'h'), timedelta(hours=23)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
+ p = Period('NaT', freq='H')
+ for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
+ np.timedelta64(3600, 's'), timedelta(minutes=120),
+ timedelta(days=4, minutes=180)]:
+ self.assertEqual((p - o).ordinal, tslib.iNaT)
+
+ for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
+ np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
+ with tm.assertRaises(ValueError):
+ p - o
+
def test_nat_ops(self):
p = Period('NaT', freq='M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
| Must be revisited after #7954 to remove `_skip_if_not_numpy17_friendly`
Closes #7740.
Allow `Period` and `PeriodIndex` `add` and `sub` to support `timedelta`-like. If period freq is `offsets.Tick`, offsets can be added if the result can have same freq. Otherwise, `ValueError` will be raised.
```
idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H')
idx + pd.offsets.Hour(2)
# <class 'pandas.tseries.period.PeriodIndex'>
# [2014-07-01 11:00, ..., 2014-07-01 15:00]
# Length: 5, Freq: H
idx + datetime.timedelta(minutes=120)
# <class 'pandas.tseries.period.PeriodIndex'>
# [2014-07-01 11:00, ..., 2014-07-01 15:00]
# Length: 5, Freq: H
idx + np.timedelta64(7200, 's')
# <class 'pandas.tseries.period.PeriodIndex'>
# [2014-07-01 11:00, ..., 2014-07-01 15:00]
# Length: 5, Freq: H
idx + pd.offsets.Minute(5)
# ValueError: Input has different freq from PeriodIndex(freq=H)
```
If period freq isn't `Tick`, only the same offset can be added. Otherwise, `ValueError` will be raised.
```
idx = pd.period_range('2014-07', periods=5, freq='M')
idx + pd.offsets.MonthEnd(3)
# <class 'pandas.tseries.period.PeriodIndex'>
# [2014-10, ..., 2015-02]
# Length: 5, Freq: M
idx + pd.offsets.MonthBegin(3)
# ValueError: Input has different freq from PeriodIndex(freq=M)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/7966 | 2014-08-08T16:58:44Z | 2014-08-10T14:53:57Z | 2014-08-10T14:53:57Z | 2014-08-10T15:20:26Z |
Support is_dst indicators in tz_localize | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 8f96ec98df6f2..a23d067cefa4f 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1357,6 +1357,9 @@ Pandas provides rich support for working with timestamps in different time zones
``dateutil`` support is new [in 0.14.1] and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``.
Support for ``dateutil`` is provided for compatibility with other applications e.g. if you use ``dateutil`` in other python packages.
+Working with Time Zones
+~~~~~~~~~~~~~~~~~~~~~~~
+
By default, pandas objects are time zone unaware:
.. ipython:: python
@@ -1488,10 +1491,29 @@ TimeSeries, aligning the data on the UTC timestamps:
result
result.index
+To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``.
+``tz_localize(None)`` will remove timezone holding local time representations.
+``tz_convert(None)`` will remove timezone after converting to UTC time.
+
+.. ipython:: python
+
+ didx = DatetimeIndex(start='2014-08-01 09:00', freq='H', periods=10, tz='US/Eastern')
+ didx
+ didx.tz_localize(None)
+ didx.tz_convert(None)
+
+ # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None)
+ didx.tz_convert('UCT').tz_localize(None)
+
+.. _timeseries.timezone_ambiguous:
+
+Ambiguous Times when Localizing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
In some cases, localize cannot determine the DST and non-DST hours when there are
-duplicates. This often happens when reading files that simply duplicate the hours.
-The infer_dst argument in tz_localize will attempt
-to determine the right offset.
+duplicates. This often happens when reading files or database records that simply
+duplicate the hours. Passing ``ambiguous='infer'`` (``infer_dst`` argument in prior
+releases) into ``tz_localize`` will attempt to determine the right offset.
.. ipython:: python
:okexcept:
@@ -1500,21 +1522,23 @@ to determine the right offset.
'11/06/2011 01:00', '11/06/2011 02:00',
'11/06/2011 03:00'])
rng_hourly.tz_localize('US/Eastern')
- rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', infer_dst=True)
+ rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
rng_hourly_eastern.values
-
-To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``. ``tz_localize(None)`` will remove timezone holding local time representations. ``tz_convert(None)`` will remove timezone after converting to UTC time.
+In addition to 'infer', there are several other arguments supported. Passing
+an array-like of bools or 0s/1s where True represents a DST hour and False a
+non-DST hour, allows for distinguishing more than one DST
+transition (e.g., if you have multiple records in a database each with their
+own DST transition). Or passing 'NaT' will fill in transition times
+with not-a-time values. These methods are available in the ``DatetimeIndex``
+constructor as well as ``tz_localize``.
.. ipython:: python
+
+ rng_hourly_dst = np.array([1, 1, 0, 0, 0])
+ rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).values
+ rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').values
- didx = DatetimeIndex(start='2014-08-01 09:00', freq='H', periods=10, tz='US/Eastern')
- didx
- didx.tz_localize(None)
- didx.tz_convert(None)
-
- # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None)
- didx.tz_convert('UCT').tz_localize(None)
.. _timeseries.timedeltas:
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index b2581a4f2aab3..21ba7ebbc940c 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -344,7 +344,6 @@ API changes
- ``Series.to_csv()`` now returns a string when ``path=None``, matching the behaviour of
``DataFrame.to_csv()`` (:issue:`8215`).
-
.. _whatsnew_0150.index_set_ops:
- The Index set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``. Further the method name ``Index.diff()`` is deprecated and can be replaced by ``Index.difference()`` (:issue:`8226`)
@@ -466,6 +465,10 @@ Deprecations
- The ``convert_dummies`` method has been deprecated in favor of
``get_dummies`` (:issue:`8140`)
+- The ``infer_dst`` argument in ``tz_localize`` will be deprecated in favor of
+ ``ambiguous`` to allow for more flexibility in dealing with DST transitions.
+ Replace ``infer_dst=True`` with ``ambiguous='infer'`` for the same behavior (:issue:`7943`).
+ See :ref:`the docs<timeseries.timezone_ambiguous>` for more details.
.. _whatsnew_0150.knownissues:
@@ -544,7 +547,10 @@ Enhancements
-
+- ``tz_localize`` now accepts the ``ambiguous`` keyword which allows for passing an array of bools
+ indicating whether the date belongs in DST or not, 'NaT' for setting transition times to NaT,
+ 'infer' for inferring DST/non-DST, and 'raise' (default) for an AmbiguousTimeError to be raised (:issue:`7943`).
+ See :ref:`the docs<timeseries.timezone_ambiguous>` for more details.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3a75f145587c0..dddfa3bf7d56a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -23,7 +23,7 @@
_maybe_box_datetimelike, ABCSeries,
SettingWithCopyError, SettingWithCopyWarning)
import pandas.core.nanops as nanops
-from pandas.util.decorators import Appender, Substitution
+from pandas.util.decorators import Appender, Substitution, deprecate_kwarg
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
@@ -3558,8 +3558,11 @@ def _tz_convert(ax, tz):
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
return result.__finalize__(self)
-
- def tz_localize(self, tz, axis=0, level=None, copy=True, infer_dst=False):
+
+ @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
+ mapping={True: 'infer', False: 'raise'})
+ def tz_localize(self, tz, axis=0, level=None, copy=True,
+ ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone
@@ -3572,16 +3575,22 @@ def tz_localize(self, tz, axis=0, level=None, copy=True, infer_dst=False):
must be None
copy : boolean, default True
Also make a copy of the underlying data
- infer_dst : boolean, default False
- Attempt to infer fall dst-transition times based on order
-
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
+ - 'infer' will attempt to infer fall dst-transition hours based on order
+ - bool-ndarray where True signifies a DST time, False designates
+ a non-DST time (note that this flag is only applicable for ambiguous times)
+ - 'NaT' will return NaT where there are ambiguous times
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous times
+ infer_dst : boolean, default False (DEPRECATED)
+ Attempt to infer fall dst-transition hours based on order
+
Returns
-------
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
- def _tz_localize(ax, tz, infer_dst):
+ def _tz_localize(ax, tz, ambiguous):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
@@ -3590,19 +3599,19 @@ def _tz_localize(ax, tz, infer_dst):
else:
ax = DatetimeIndex([],tz=tz)
else:
- ax = ax.tz_localize(tz, infer_dst=infer_dst)
+ ax = ax.tz_localize(tz, ambiguous=ambiguous)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
- new_level = _tz_localize(ax.levels[level], tz, infer_dst)
+ new_level = _tz_localize(ax.levels[level], tz, ambiguous)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
- ax = _tz_localize(ax, tz, infer_dst)
+ ax = _tz_localize(ax, tz, ambiguous)
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 2acdcfffb7d9a..e2cb8216bb270 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -6,6 +6,8 @@
import numpy as np
+import warnings
+
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
ABCSeries)
@@ -18,7 +20,7 @@
from pandas.core.base import DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
-from pandas.util.decorators import cache_readonly
+from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
@@ -145,6 +147,15 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
+ tz : pytz.timezone or dateutil.tz.tzfile
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
+ - 'infer' will attempt to infer fall dst-transition hours based on order
+ - bool-ndarray where True signifies a DST time, False signifies
+ a non-DST time (note that this flag is only applicable for ambiguous times)
+ - 'NaT' will return NaT where there are ambiguous times
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous times
+ infer_dst : boolean, default False (DEPRECATED)
+ Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
"""
@@ -180,15 +191,17 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
'is_quarter_start','is_quarter_end','is_year_start','is_year_end']
_is_numeric_dtype = False
+
+ @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
+ mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
- closed=None, **kwargs):
+ closed=None, ambiguous='raise', **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
- infer_dst = kwargs.pop('infer_dst', False)
freq_infer = False
if not isinstance(freq, DateOffset):
@@ -214,7 +227,7 @@ def __new__(cls, data=None,
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
- infer_dst=infer_dst)
+ ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
@@ -240,7 +253,7 @@ def __new__(cls, data=None,
data.name = name
if tz is not None:
- return data.tz_localize(tz, infer_dst=infer_dst)
+ return data.tz_localize(tz, ambiguous=ambiguous)
return data
@@ -309,7 +322,7 @@ def __new__(cls, data=None,
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
- infer_dst=infer_dst)
+ ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
@@ -333,7 +346,7 @@ def __new__(cls, data=None,
@classmethod
def _generate(cls, start, end, periods, name, offset,
- tz=None, normalize=False, infer_dst=False, closed=None):
+ tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
@@ -447,7 +460,7 @@ def _generate(cls, start, end, periods, name, offset,
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(com._ensure_int64(index), tz,
- infer_dst=infer_dst)
+ ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
@@ -1645,7 +1658,9 @@ def tz_convert(self, tz):
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
- def tz_localize(self, tz, infer_dst=False):
+ @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
+ mapping={True: 'infer', False: 'raise'})
+ def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
@@ -1656,7 +1671,13 @@ def tz_localize(self, tz, infer_dst=False):
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
- infer_dst : boolean, default False
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
+ - 'infer' will attempt to infer fall dst-transition hours based on order
+ - bool-ndarray where True signifies a DST time, False signifies
+ a non-DST time (note that this flag is only applicable for ambiguous times)
+ - 'NaT' will return NaT where there are ambiguous times
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous times
+ infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
@@ -1671,7 +1692,9 @@ def tz_localize(self, tz, infer_dst=False):
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
- new_dates = tslib.tz_localize_to_utc(self.asi8, tz, infer_dst=infer_dst)
+
+ new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
+ ambiguous=ambiguous)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 5635bb75dd9ce..9fbdb714d8cfa 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -423,31 +423,98 @@ def test_with_tz_ambiguous_times(self):
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=datetools.Minute(30), tz=pytz.utc)
- def test_infer_dst(self):
+ def test_ambiguous_infer(self):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
tz = self.tz('US/Eastern')
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=datetools.Hour())
- self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize,
- tz, infer_dst=True)
+ self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=datetools.Hour(), tz=tz)
- di = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
- '11/06/2011 01:00', '11/06/2011 02:00',
- '11/06/2011 03:00'])
- localized = di.tz_localize(tz, infer_dst=True)
+ times = ['11/06/2011 00:00', '11/06/2011 01:00',
+ '11/06/2011 01:00', '11/06/2011 02:00',
+ '11/06/2011 03:00']
+ di = DatetimeIndex(times)
+ localized = di.tz_localize(tz, ambiguous='infer')
self.assert_numpy_array_equal(dr, localized)
-
+ localized_old = di.tz_localize(tz, infer_dst=True)
+ self.assert_numpy_array_equal(dr, localized_old)
+ self.assert_numpy_array_equal(dr, DatetimeIndex(times, tz=tz, ambiguous='infer'))
+
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=datetools.Hour())
localized = dr.tz_localize(tz)
- localized_infer = dr.tz_localize(tz, infer_dst=True)
+ localized_infer = dr.tz_localize(tz, ambiguous='infer')
self.assert_numpy_array_equal(localized, localized_infer)
+ localized_infer_old = dr.tz_localize(tz, infer_dst=True)
+ self.assert_numpy_array_equal(localized, localized_infer_old)
+
+ def test_ambiguous_flags(self):
+ # November 6, 2011, fall back, repeat 2 AM hour
+ tz = self.tz('US/Eastern')
+
+ # Pass in flags to determine right dst transition
+ dr = date_range(datetime(2011, 11, 6, 0), periods=5,
+ freq=datetools.Hour(), tz=tz)
+ times = ['11/06/2011 00:00', '11/06/2011 01:00',
+ '11/06/2011 01:00', '11/06/2011 02:00',
+ '11/06/2011 03:00']
+
+ # Test tz_localize
+ di = DatetimeIndex(times)
+ is_dst = [1, 1, 0, 0, 0]
+ localized = di.tz_localize(tz, ambiguous=is_dst)
+ self.assert_numpy_array_equal(dr, localized)
+ self.assert_numpy_array_equal(dr, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
+
+ localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
+ self.assert_numpy_array_equal(dr, localized)
+
+ localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype('bool'))
+ self.assert_numpy_array_equal(dr, localized)
+
+ # Test constructor
+ localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
+ self.assert_numpy_array_equal(dr, localized)
+
+ # Test duplicate times where infer_dst fails
+ times += times
+ di = DatetimeIndex(times)
+
+ # When the sizes are incompatible, make sure error is raised
+ self.assertRaises(Exception, di.tz_localize, tz, ambiguous=is_dst)
+
+ # When sizes are compatible and there are repeats ('infer' won't work)
+ is_dst = np.hstack((is_dst, is_dst))
+ localized = di.tz_localize(tz, ambiguous=is_dst)
+ dr = dr.append(dr)
+ self.assert_numpy_array_equal(dr, localized)
+ # When there is no dst transition, nothing special happens
+ dr = date_range(datetime(2011, 6, 1, 0), periods=10,
+ freq=datetools.Hour())
+ is_dst = np.array([1] * 10)
+ localized = dr.tz_localize(tz)
+ localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
+ self.assert_numpy_array_equal(localized, localized_is_dst)
+
+ def test_ambiguous_nat(self):
+ tz = self.tz('US/Eastern')
+ times = ['11/06/2011 00:00', '11/06/2011 01:00',
+ '11/06/2011 01:00', '11/06/2011 02:00',
+ '11/06/2011 03:00']
+ di = DatetimeIndex(times)
+ localized = di.tz_localize(tz, ambiguous='NaT')
+
+ times = ['11/06/2011 00:00', np.NaN,
+ np.NaN, '11/06/2011 02:00',
+ '11/06/2011 03:00']
+ di_test = DatetimeIndex(times, tz='US/Eastern')
+ self.assert_numpy_array_equal(di_test, localized)
# test utility methods
def test_infer_tz(self):
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 61fc3652fb8a4..9adcbb4ea4a41 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -232,6 +232,17 @@ def test_tz(self):
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
+
+ def test_tz_localize_ambiguous(self):
+
+ ts = Timestamp('2014-11-02 01:00')
+ ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
+ ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
+
+ rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
+ self.assertEqual(rng[1], ts_dst)
+ self.assertEqual(rng[2], ts_no_dst)
+ self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern', ambiguous='infer')
# GH 8025
with tm.assertRaisesRegexp(TypeError, 'Cannot localize tz-aware Timestamp, use '
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 36c40f8ca39af..c05d85a39441e 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -100,9 +100,9 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, offset=None, box=False):
offset = to_offset(offset)
if box:
- func_create = create_timestamp_from_ts
+ func_create = create_timestamp_from_ts
else:
- func_create = create_datetime_from_ts
+ func_create = create_datetime_from_ts
if tz is not None:
if _is_utc(tz):
@@ -359,7 +359,7 @@ class Timestamp(_Timestamp):
def is_year_end(self):
return self._get_start_end_field('is_year_end')
- def tz_localize(self, tz, infer_dst=False):
+ def tz_localize(self, tz, ambiguous='raise'):
"""
Convert naive Timestamp to local time zone, or remove
timezone from tz-aware Timestamp.
@@ -369,18 +369,26 @@ class Timestamp(_Timestamp):
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time which Timestamp will be converted to.
None will remove timezone holding local time.
- infer_dst : boolean, default False
- Attempt to infer fall dst-transition hours based on order
-
+ ambiguous : bool, 'NaT', default 'raise'
+ - bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates)
+ - 'NaT' will return NaT for an ambiguous time
+ - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+
Returns
-------
localized : Timestamp
"""
+ if ambiguous == 'infer':
+ raise ValueError('Cannot infer offset with only one time.')
+
if self.tzinfo is None:
# tz naive, localize
tz = maybe_get_tz(tz)
+ if not isinstance(ambiguous, basestring):
+ ambiguous = [ambiguous]
value = tz_localize_to_utc(np.array([self.value]), tz,
- infer_dst=infer_dst)[0]
+ ambiguous=ambiguous)[0]
return Timestamp(value, tz=tz)
else:
if tz is None:
@@ -1330,12 +1338,12 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
else:
try:
if len(val) == 0:
- iresult[i] = iNaT
- continue
+ iresult[i] = iNaT
+ continue
elif val in _nat_strings:
- iresult[i] = iNaT
- continue
+ iresult[i] = iNaT
+ continue
_string_to_dts(val, &dts, &out_local, &out_tzoffset)
value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
@@ -1349,8 +1357,8 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
py_dt = parse_datetime_string(val, dayfirst=dayfirst)
except Exception:
if coerce:
- iresult[i] = iNaT
- continue
+ iresult[i] = iNaT
+ continue
raise TypeError
try:
@@ -1491,7 +1499,7 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
return ts.astype('timedelta64[ns]')
def repr_timedelta64(object value, format=None):
- """
+ """
provide repr for timedelta64
Parameters
@@ -1503,60 +1511,60 @@ def repr_timedelta64(object value, format=None):
-------
converted : Timestamp
- """
- cdef object ivalue
+ """
+ cdef object ivalue
- ivalue = value.view('i8')
+ ivalue = value.view('i8')
- # put frac in seconds
- frac = float(ivalue)/1e9
- sign = np.sign(frac)
- frac = np.abs(frac)
+ # put frac in seconds
+ frac = float(ivalue)/1e9
+ sign = np.sign(frac)
+ frac = np.abs(frac)
- if frac >= 86400:
- days = int(frac / 86400)
- frac -= days * 86400
- else:
- days = 0
+ if frac >= 86400:
+ days = int(frac / 86400)
+ frac -= days * 86400
+ else:
+ days = 0
- if frac >= 3600:
- hours = int(frac / 3600)
- frac -= hours * 3600
- else:
- hours = 0
+ if frac >= 3600:
+ hours = int(frac / 3600)
+ frac -= hours * 3600
+ else:
+ hours = 0
- if frac >= 60:
- minutes = int(frac / 60)
- frac -= minutes * 60
- else:
- minutes = 0
+ if frac >= 60:
+ minutes = int(frac / 60)
+ frac -= minutes * 60
+ else:
+ minutes = 0
- if frac >= 1:
- seconds = int(frac)
- frac -= seconds
- else:
- seconds = 0
+ if frac >= 1:
+ seconds = int(frac)
+ frac -= seconds
+ else:
+ seconds = 0
- if frac == int(frac):
- seconds_pretty = "%02d" % seconds
- else:
- sp = abs(round(1e6*frac))
- seconds_pretty = "%02d.%06d" % (seconds, sp)
+ if frac == int(frac):
+ seconds_pretty = "%02d" % seconds
+ else:
+ sp = abs(round(1e6*frac))
+ seconds_pretty = "%02d.%06d" % (seconds, sp)
- if sign < 0:
- sign_pretty = "-"
- else:
- sign_pretty = ""
+ if sign < 0:
+ sign_pretty = "-"
+ else:
+ sign_pretty = ""
- if days or format == 'long':
- if (hours or minutes or seconds or frac) or format != 'short':
- return "%s%d days, %02d:%02d:%s" % (sign_pretty, days, hours, minutes,
- seconds_pretty)
- else:
- return "%s%d days" % (sign_pretty, days)
+ if days or format == 'long':
+ if (hours or minutes or seconds or frac) or format != 'short':
+ return "%s%d days, %02d:%02d:%s" % (sign_pretty, days, hours, minutes,
+ seconds_pretty)
+ else:
+ return "%s%d days" % (sign_pretty, days)
- return "%s%02d:%02d:%s" % (sign_pretty, hours, minutes, seconds_pretty)
+ return "%s%02d:%02d:%s" % (sign_pretty, hours, minutes, seconds_pretty)
def array_strptime(ndarray[object] values, object fmt, coerce=False):
@@ -1765,8 +1773,8 @@ def array_strptime(ndarray[object] values, object fmt, coerce=False):
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
- else: # Assume that if they bothered to include Julian day it will
- # be accurate.
+ else: # Assume that if they bothered to include Julian day it will
+ # be accurate.
datetime_result = datetime_date.fromordinal(
(julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
@@ -1850,7 +1858,7 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except -1:
base = <int64_t> ts
frac = ts-base
if p:
- frac = round(frac,p)
+ frac = round(frac,p)
return <int64_t> (base*m) + <int64_t> (frac*m)
def cast_to_nanoseconds(ndarray arr):
@@ -2183,7 +2191,7 @@ cpdef ndarray _unbox_utcoffsets(object transinfo):
@cython.boundscheck(False)
@cython.wraparound(False)
-def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False):
+def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None):
"""
Localize tzinfo-naive DateRange to given time zone (using pytz). If
there are ambiguities in the values, raise AmbiguousTimeError.
@@ -2199,6 +2207,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False):
int64_t v, left, right
ndarray[int64_t] result, result_a, result_b, dst_hours
pandas_datetimestruct dts
+ bint infer_dst = False, is_dst = False, fill = False
# Vectorized version of DstTzInfo.localize
@@ -2220,6 +2229,16 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False):
result[i] = v - delta
return result
+ if isinstance(ambiguous, string_types):
+ if ambiguous == 'infer':
+ infer_dst = True
+ elif ambiguous == 'NaT':
+ fill = True
+ elif hasattr(ambiguous, '__iter__'):
+ is_dst = True
+ if len(ambiguous) != len(vals):
+ raise ValueError("Length of ambiguous bool-array must be the same size as vals")
+
trans = _get_transitions(tz) # transition dates
deltas = _get_deltas(tz) # utc offsets
@@ -2307,10 +2326,17 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False):
else:
if infer_dst and dst_hours[i] != NPY_NAT:
result[i] = dst_hours[i]
+ elif is_dst:
+ if ambiguous[i]:
+ result[i] = left
+ else:
+ result[i] = right
+ elif fill:
+ result[i] = NPY_NAT
else:
stamp = Timestamp(vals[i])
raise pytz.AmbiguousTimeError("Cannot infer dst time from %r, "\
- "try using the 'infer_dst' argument"
+ "try using the 'ambiguous' argument"
% stamp)
elif left != NPY_NAT:
result[i] = left
@@ -3328,7 +3354,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt):
result = result.replace(str_extra_fmts[i], repl)
if PY2:
- result = result.decode('utf-8', 'ignore')
+ result = result.decode('utf-8', 'ignore')
return result
| The indicators are useful for assigning the correct offset when there are ambiguous transition times
closes #7943.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7963 | 2014-08-08T12:01:42Z | 2014-09-13T19:27:34Z | 2014-09-13T19:27:34Z | 2014-09-13T22:07:44Z |
BUG: fix checking of table name in read_sql (GH7826) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index ecfd7b5ada055..ac475d637f9cf 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -400,7 +400,7 @@ For full docs, see the :ref:`Categorical introduction <categorical>` and the
-
+- Bug in checking of table name in ``read_sql`` in certain cases (:issue:`7826`).
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 914ade45adaa1..d9d20c3b8b835 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -420,7 +420,12 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
- if pandas_sql.has_table(sql):
+ try:
+ _is_table_name = pandas_sql.has_table(sql)
+ except:
+ _is_table_name = False
+
+ if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
| Closes #7826.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7961 | 2014-08-08T09:57:11Z | 2014-08-22T08:50:47Z | 2014-08-22T08:50:47Z | 2014-08-22T08:50:47Z |
DOC: Removed unpaired right-paren. Rephrased references. | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 2111bb2d72dcb..985f112979a7e 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -528,8 +528,8 @@ the function.
Reshaping
---------
-See the section on :ref:`Hierarchical Indexing <indexing.hierarchical>` and
-see the section on :ref:`Reshaping <reshaping.stacking>`).
+See the sections on :ref:`Hierarchical Indexing <indexing.hierarchical>` and
+:ref:`Reshaping <reshaping.stacking>`.
Stack
~~~~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/7959 | 2014-08-08T00:50:46Z | 2014-08-08T01:30:22Z | 2014-08-08T01:30:22Z | 2014-08-09T00:59:22Z | |
CI: Drop numpy 1.6 support | diff --git a/README.md b/README.md
index 79a84440d6a5c..6a645dc64123d 100644
--- a/README.md
+++ b/README.md
@@ -90,7 +90,7 @@ pip install pandas
```
## Dependencies
-- [NumPy](http://www.numpy.org): 1.6.1 or higher
+- [NumPy](http://www.numpy.org): 1.7.0 or higher
- [python-dateutil](http://labix.org/python-dateutil): 1.5 or higher
- [pytz](http://pytz.sourceforge.net)
- Needed for time zone support with ``pandas.date_range``
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt
index 117d14005e175..fec0a96a3d077 100644
--- a/ci/requirements-2.6.txt
+++ b/ci/requirements-2.6.txt
@@ -1,4 +1,4 @@
-numpy==1.6.1
+numpy==1.7.0
cython==0.19.1
python-dateutil==1.5
pytz==2013b
diff --git a/ci/requirements-2.7_LOCALE.txt b/ci/requirements-2.7_LOCALE.txt
index a4d2b857f92c1..9af33fe96d58c 100644
--- a/ci/requirements-2.7_LOCALE.txt
+++ b/ci/requirements-2.7_LOCALE.txt
@@ -4,7 +4,7 @@ xlwt==0.7.5
openpyxl==1.6.2
xlsxwriter==0.4.6
xlrd==0.9.2
-numpy==1.6.1
+numpy==1.7.1
cython==0.19.1
bottleneck==0.6.0
matplotlib==1.3.0
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 5595f60c6789c..fb22a86096b59 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -247,7 +247,7 @@ installed), make sure you have `nose
Dependencies
------------
- * `NumPy <http://www.numpy.org>`__: 1.6.1 or higher
+ * `NumPy <http://www.numpy.org>`__: 1.7.0 or higher
* `python-dateutil <http://labix.org/python-dateutil>`__ 1.5
* `pytz <http://pytz.sourceforge.net/>`__
* Needed for time zone support
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index d12e0fd7c7f9e..8f96ec98df6f2 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1685,23 +1685,3 @@ yields another ``timedelta64[ns]`` dtypes Series.
td * -1
td * Series([1,2,3,4])
-
-Numpy < 1.7 Compatibility
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Numpy < 1.7 has a broken ``timedelta64`` type that does not correctly work
-for arithmetic. pandas bypasses this, but for frequency conversion as above,
-you need to create the divisor yourself. The ``np.timetimedelta64`` type only
-has 1 argument, the number of **micro** seconds.
-
-The following are equivalent statements in the two versions of numpy.
-
-.. code-block:: python
-
- from distutils.version import LooseVersion
- if LooseVersion(np.__version__) <= '1.6.2':
- y / np.timedelta(86400*int(1e6))
- y / np.timedelta(int(1e6))
- else:
- y / np.timedelta64(1,'D')
- y / np.timedelta64(1,'s')
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 322bcba9664d9..c6e784ac93e92 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -7,6 +7,12 @@ This is a major release from 0.14.1 and includes a small number of API changes,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
+.. warning::
+
+ pandas >= 0.15.0 will no longer support compatibility with NumPy versions <
+ 1.7.0. If you want to use the latest versions of pandas, please upgrade to
+ NumPy >= 1.7.0.
+
- Highlights include:
- The ``Categorical`` type was integrated as a first-class pandas type, see :ref:`here <whatsnew_0150.cat>`
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 6eda049835526..df5e6f567e3a6 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -1,5 +1,6 @@
# pylint: disable-msg=W0614,W0401,W0611,W0622
+
__docformat__ = 'restructuredtext'
try:
@@ -18,6 +19,7 @@
from datetime import datetime
import numpy as np
+
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
@@ -27,14 +29,20 @@
# numpy versioning
from distutils.version import LooseVersion
_np_version = np.version.short_version
-_np_version_under1p6 = LooseVersion(_np_version) < '1.6'
-_np_version_under1p7 = LooseVersion(_np_version) < '1.7'
_np_version_under1p8 = LooseVersion(_np_version) < '1.8'
_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
+
from pandas.version import version as __version__
from pandas.info import __doc__
+
+if LooseVersion(_np_version) < '1.7.0':
+ raise ImportError('pandas {0} is incompatible with numpy < 1.7.0, '
+ 'your numpy version is {1}. Please upgrade numpy to'
+ ' >= 1.7.0 to use pandas version {0}'.format(__version__,
+ _np_version))
+
# let init-time option registration happen
import pandas.core.config_init
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 021f4474130bd..1655d2a4e4e23 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -298,7 +298,12 @@ def ndim(self):
def item(self):
""" return the first element of the underlying data as a python scalar """
- return self.values.item()
+ try:
+ return self.values.item()
+ except IndexError:
+ # copy numpy's message here because Py26 raises an IndexError
+ raise ValueError('can only convert an array of size 1 to a '
+ 'Python scalar')
@property
def data(self):
diff --git a/pandas/core/common.py b/pandas/core/common.py
index bc4c95ed3323e..48fb75f59ac34 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1848,6 +1848,8 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
+ from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
+ from pandas.tseries.tools import to_datetime
if dtype is not None:
if isinstance(dtype, compat.string_types):
@@ -1886,13 +1888,11 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
elif np.prod(value.shape) and value.dtype != dtype:
try:
if is_datetime64:
- from pandas.tseries.tools import to_datetime
value = to_datetime(value, coerce=coerce).values
elif is_timedelta64:
- from pandas.tseries.timedeltas import \
- _possibly_cast_to_timedelta
- value = _possibly_cast_to_timedelta(value, coerce='compat', dtype=dtype)
- except:
+ value = _possibly_cast_to_timedelta(value,
+ dtype=dtype)
+ except (AttributeError, ValueError):
pass
else:
@@ -1901,28 +1901,20 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
- if (is_array and value.dtype.kind in ['M','m']):
+ if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
- from pandas.tseries.timedeltas import \
- _possibly_cast_to_timedelta
- value = _possibly_cast_to_timedelta(value, coerce='compat')
+ value = _possibly_cast_to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
- elif (is_array and not (
- issubclass(value.dtype.type, np.integer) or
- value.dtype == np.object_)):
- pass
-
- # try to infer if we have a datetimelike here
- # otherwise pass thru
- else:
+ elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
+ value.dtype == np.object_)):
value = _possibly_infer_to_datetimelike(value)
return value
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 83110d143e8bc..5064545404fb0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -16,7 +16,7 @@
from pandas.core.internals import BlockManager
import pandas.core.common as com
import pandas.core.datetools as datetools
-from pandas import compat, _np_version_under1p7
+from pandas import compat
from pandas.compat import map, zip, lrange, string_types, isidentifier, lmap
from pandas.core.common import (isnull, notnull, is_list_like,
_values_from_object, _maybe_promote,
@@ -3613,21 +3613,6 @@ def abs(self):
-------
abs: type of caller
"""
-
- # suprimo numpy 1.6 hacking
- # for timedeltas
- if _np_version_under1p7:
-
- def _convert_timedeltas(x):
- if x.dtype.kind == 'm':
- return np.abs(x.view('i8')).astype(x.dtype)
- return np.abs(x)
-
- if self.ndim == 1:
- return _convert_timedeltas(self)
- elif self.ndim == 2:
- return self.apply(_convert_timedeltas)
-
return np.abs(self)
_shared_docs['describe'] = """
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7c24c339d4f16..ce57a9c03d570 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -26,7 +26,6 @@
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
-from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
@@ -2764,18 +2763,21 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# normally use vstack as its faster than concat
# and if we have mi-columns
- if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
- stacked_values = np.vstack([np.asarray(x) for x in values])
- result = DataFrame(stacked_values,index=key_index,columns=index)
+ if isinstance(v.index, MultiIndex) or key_index is None:
+ stacked_values = np.vstack(map(np.asarray, values))
+ result = DataFrame(stacked_values, index=key_index,
+ columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
- result = concat(values,keys=key_index,names=key_index.names,
+ result = concat(values, keys=key_index,
+ names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
- stacked_values = np.vstack([np.asarray(x) for x in values])
- result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
+ stacked_values = np.vstack(map(np.asarray, values))
+ result = DataFrame(stacked_values.T, index=v.index,
+ columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index da36d95a3ad9e..f3b8a54034d56 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -25,7 +25,7 @@
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp
-from pandas import compat, _np_version_under1p7
+from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
@@ -1298,10 +1298,8 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs):
def get_values(self, dtype=None):
# return object dtypes as datetime.timedeltas
if dtype == object:
- if _np_version_under1p7:
- return self.values.astype('object')
return lib.map_infer(self.values.ravel(),
- lambda x: timedelta(microseconds=x.item()/1000)
+ lambda x: timedelta(microseconds=x.item() / 1000)
).reshape(self.values.shape)
return self.values
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 9f29570af6f4f..16e6e40802a95 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -258,9 +258,7 @@ def __init__(self, left, right, name):
self.is_datetime_lhs = com.is_datetime64_dtype(left)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_datetime_rhs = com.is_datetime64_dtype(rvalues)
- self.is_timedelta_rhs = (com.is_timedelta64_dtype(rvalues)
- or (not self.is_datetime_rhs
- and pd._np_version_under1p7))
+ self.is_timedelta_rhs = com.is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self._validate()
@@ -318,7 +316,7 @@ def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
- coerce = 'compat' if pd._np_version_under1p7 else True
+ coerce = True
if not is_list_like(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
@@ -648,13 +646,7 @@ def _radd_compat(left, right):
try:
output = radd(left, right)
except TypeError:
- cond = (pd._np_version_under1p6 and
- left.dtype == np.object_)
- if cond: # pragma: no cover
- output = np.empty_like(left)
- output.flat[:] = [radd(x, right) for x in left.flat]
- else:
- raise
+ raise
return output
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 989249994d953..5150729ed6f79 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -14,7 +14,7 @@
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
- MultiIndex, Int64Index, Timestamp, _np_version_under1p7)
+ MultiIndex, Int64Index, Timestamp)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
@@ -1721,9 +1721,6 @@ def set_atom(self, block, block_items, existing_col, min_itemsize,
if inferred_type == 'datetime64':
self.set_atom_datetime64(block)
elif dtype == 'timedelta64[ns]':
- if _np_version_under1p7:
- raise TypeError(
- "timdelta64 is not supported under under numpy < 1.7")
self.set_atom_timedelta64(block)
elif inferred_type == 'date':
raise TypeError(
@@ -2240,9 +2237,6 @@ def read_array(self, key):
if dtype == u('datetime64'):
ret = np.array(ret, dtype='M8[ns]')
elif dtype == u('timedelta64'):
- if _np_version_under1p7:
- raise TypeError(
- "timedelta64 is not supported under under numpy < 1.7")
ret = np.array(ret, dtype='m8[ns]')
if transposed:
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index 62d729ccdaa88..5732bc90573fd 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -601,8 +601,6 @@ def test_url(self):
self.assertEqual(result[c].dtype, 'datetime64[ns]')
def test_timedelta(self):
- tm._skip_if_not_numpy17_friendly()
-
from datetime import timedelta
converter = lambda x: pd.to_timedelta(x,unit='ms')
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 2a0796e90e418..9cdecd16755c7 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -22,7 +22,7 @@
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
-from pandas import compat, _np_version_under1p7
+from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
@@ -2159,8 +2159,6 @@ def setTZ(tz):
setTZ(orig_tz)
def test_append_with_timedelta(self):
- tm._skip_if_not_numpy17_friendly()
-
# GH 3577
# append timedelta
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index eadcb2c9f1fdb..4d7eb2d04af21 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -36,7 +36,6 @@
import pandas.io.sql as sql
import pandas.util.testing as tm
-from pandas import _np_version_under1p7
try:
@@ -509,8 +508,6 @@ def test_date_and_index(self):
def test_timedelta(self):
# see #6921
- tm._skip_if_not_numpy17_friendly()
-
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 5dd8d072595cb..b171b31528a55 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
@@ -8,7 +9,6 @@
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas.tseries.common import is_datetimelike
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
-from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
@@ -128,6 +128,7 @@ def test_values(self):
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
+
class TestPandasDelegate(tm.TestCase):
def setUp(self):
@@ -175,6 +176,7 @@ def f():
delegate.foo()
self.assertRaises(TypeError, f)
+
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
@@ -238,6 +240,7 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False):
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
+
class TestIndexOps(Ops):
def setUp(self):
@@ -250,29 +253,25 @@ def test_ndarray_compat_properties(self):
for o in self.objs:
# check that we work
- for p in ['shape','dtype','base','flags','T',
- 'strides','itemsize','nbytes']:
- self.assertIsNotNone(getattr(o,p,None))
+ for p in ['shape', 'dtype', 'base', 'flags', 'T',
+ 'strides', 'itemsize', 'nbytes']:
+ self.assertIsNotNone(getattr(o, p, None))
# if we have a datetimelike dtype then needs a view to work
# but the user is responsible for that
try:
self.assertIsNotNone(o.data)
- except (ValueError):
+ except ValueError:
pass
- # len > 1
- self.assertRaises(ValueError, lambda : o.item())
-
- self.assertTrue(o.ndim == 1)
-
- self.assertTrue(o.size == len(o))
+ self.assertRaises(ValueError, o.item) # len > 1
+ self.assertEqual(o.ndim, 1)
+ self.assertEqual(o.size, len(o))
- self.assertTrue(Index([1]).item() == 1)
- self.assertTrue(Series([1]).item() == 1)
+ self.assertEqual(Index([1]).item(), 1)
+ self.assertEqual(Series([1]).item(), 1)
def test_ops(self):
- tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
@@ -734,10 +733,7 @@ def test_add_iadd(self):
tm.assert_index_equal(rng, expected)
# offset
- if _np_version_under1p7:
- offsets = [pd.offsets.Hour(2), timedelta(hours=2)]
- else:
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
@@ -781,10 +777,7 @@ def test_sub_isub(self):
tm.assert_index_equal(rng, expected)
# offset
- if _np_version_under1p7:
- offsets = [pd.offsets.Hour(2), timedelta(hours=2)]
- else:
- offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
+ offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
@@ -961,8 +954,6 @@ def test_resolution(self):
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
- tm._skip_if_not_numpy17_friendly()
-
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
@@ -1085,8 +1076,6 @@ def test_add_iadd(self):
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
- tm._skip_if_not_numpy17_friendly()
-
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 421e05f5a3bc7..d07adeadb640c 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -7,8 +7,7 @@
import numpy as np
import pandas as pd
-from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
- Timestamp, _np_version_under1p7)
+from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp
import pandas.core.common as com
import pandas.compat as compat
@@ -379,10 +378,7 @@ def f():
codes= c.codes
def f():
codes[4] = 1
- if _np_version_under1p7:
- self.assertRaises(RuntimeError, f)
- else:
- self.assertRaises(ValueError, f)
+ self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be writeable!
c[4] = "a"
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 27f5ab3c63d81..c6a9192d7bb79 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -14,7 +14,7 @@
from numpy.random import randn
import numpy as np
-from pandas import DataFrame, Series, Index, _np_version_under1p7, Timestamp, MultiIndex
+from pandas import DataFrame, Series, Index, Timestamp, MultiIndex
import pandas.core.format as fmt
import pandas.util.testing as tm
@@ -2727,10 +2727,6 @@ def test_format(self):
class TestRepr_timedelta64(tm.TestCase):
- @classmethod
- def setUpClass(cls):
- tm._skip_if_not_numpy17_friendly()
-
def test_legacy(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
@@ -2775,10 +2771,6 @@ def test_long(self):
class TestTimedelta64Formatter(tm.TestCase):
- @classmethod
- def setUpClass(cls):
- tm._skip_if_not_numpy17_friendly()
-
def test_mixed(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 7912debd0d409..cf845a18092af 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3803,8 +3803,6 @@ def test_operators_timedelta64(self):
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_datetimelike_setitem_with_inference(self):
- tm._skip_if_not_numpy17_friendly()
-
# GH 7592
# assignment of timedeltas with NaT
@@ -9668,8 +9666,6 @@ def test_apply(self):
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
def test_apply_mixed_datetimelike(self):
- tm._skip_if_not_numpy17_friendly()
-
# mixed datetimelike
# GH 7778
df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })
@@ -13112,7 +13108,6 @@ def test_select_dtypes_exclude_include(self):
tm.assert_frame_equal(r, e)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
- tm._skip_if_not_numpy17_friendly()
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 8d80962eb9902..001d6f489e934 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -160,8 +160,6 @@ def f():
self.assertRaises(ValueError, lambda : not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
- tm._skip_if_not_numpy17_friendly()
-
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index d0045c2282aba..60105719179ad 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -32,7 +32,6 @@
import pandas as pd
from pandas.lib import Timestamp
-from pandas import _np_version_under1p7
class Base(object):
""" base class for index sub-class tests """
@@ -392,8 +391,6 @@ def test_asof(self):
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_nanosecond_index_access(self):
- tm._skip_if_not_numpy17_friendly()
-
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
@@ -1630,7 +1627,7 @@ def test_pickle_compat_construction(self):
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
- if not (_np_version_under1p7 or compat.PY3_2):
+ if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
@@ -2227,12 +2224,11 @@ def test_get_level_values_na(self):
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
- if not _np_version_under1p7:
- arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
- index = pd.MultiIndex.from_arrays(arrays)
- values = index.get_level_values(1)
- expected = pd.DatetimeIndex([0, 1, pd.NaT])
- assert_array_equal(values.values, expected.values)
+ arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
+ index = pd.MultiIndex.from_arrays(arrays)
+ values = index.get_level_values(1)
+ expected = pd.DatetimeIndex([0, 1, pd.NaT])
+ assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 4ecb9a1430eba..24282fdc280af 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -15,8 +15,8 @@
import numpy.ma as ma
import pandas as pd
-from pandas import (Index, Series, DataFrame, isnull, notnull,
- bdate_range, date_range, period_range, _np_version_under1p7)
+from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
+ date_range, period_range)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp, DatetimeIndex
@@ -27,7 +27,7 @@
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
-from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long, PY3_2
+from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
@@ -80,7 +80,6 @@ def test_dt_namespace_accessor(self):
ok_for_period = ok_for_base + ['qyear']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end']
- ok_for_both = ok_for_dt
def get_expected(s, name):
result = getattr(Index(s.values),prop)
@@ -726,15 +725,14 @@ def test_constructor_dtype_datetime64(self):
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
- # numpy < 1.7 is very odd about astyping
- if not _np_version_under1p7:
- for dtype in ['s','D','ms','us','ns']:
- values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
- result = Series(values1, dates)
- assert_series_equal(result,expected)
+ for dtype in ['s', 'D', 'ms', 'us', 'ns']:
+ values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
+ result = Series(values1, dates)
+ assert_series_equal(result,expected)
# leave datetime.date alone
- dates2 = np.array([ d.date() for d in dates.to_pydatetime() ],dtype=object)
+ dates2 = np.array([d.date() for d in dates.to_pydatetime()],
+ dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values,dates2)
self.assertEqual(series1.dtype,object)
@@ -1343,7 +1341,7 @@ def test_reshape_2d_return_array(self):
self.assertNotIsInstance(result, Series)
result2 = np.reshape(x, (-1, 1))
- self.assertNotIsInstance(result, Series)
+ self.assertNotIsInstance(result2, Series)
result = x[:, None]
expected = x.reshape((-1, 1))
@@ -1929,11 +1927,10 @@ def test_timeseries_repr_object_dtype(self):
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
- repr(ts).splitlines()[-1]
+ repr(ts2).splitlines()[-1]
def test_timeseries_periodindex(self):
# GH2891
- import pickle
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
@@ -2297,11 +2294,10 @@ def test_quantile(self):
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
- if not _np_version_under1p7:
- # timedelta64[ns] dtype
- tds = dts.diff()
- q = tds.quantile(.25)
- self.assertEqual(q, pd.to_timedelta('24:00:00'))
+ # timedelta64[ns] dtype
+ tds = dts.diff()
+ q = tds.quantile(.25)
+ self.assertEqual(q, pd.to_timedelta('24:00:00'))
def test_quantile_multi(self):
from numpy import percentile
@@ -2488,9 +2484,7 @@ def check_comparators(series, other):
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
-
- # it works!
- _ = s1 * s2
+ tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_constructor_dtype_timedelta64(self):
@@ -2501,9 +2495,8 @@ def test_constructor_dtype_timedelta64(self):
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
- if not _np_version_under1p7:
- td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
- self.assertEqual(td.dtype, 'timedelta64[ns]')
+ td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
+ self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
@@ -2530,9 +2523,8 @@ def test_constructor_dtype_timedelta64(self):
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
- if not _np_version_under1p7:
- td = Series([np.timedelta64(1,'s')])
- self.assertEqual(td.dtype, 'timedelta64[ns]')
+ td = Series([np.timedelta64(1,'s')])
+ self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
#for t in ['s', 'D', 'us', 'ms']:
@@ -2554,16 +2546,14 @@ def f():
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
- # but only on numpy > 1.7 as the cython path will only be used
- if not _np_version_under1p7:
- s = Series([None, pd.NaT, '1 Day'])
- self.assertEqual(s.dtype,'timedelta64[ns]')
- s = Series([np.nan, pd.NaT, '1 Day'])
- self.assertEqual(s.dtype,'timedelta64[ns]')
- s = Series([pd.NaT, None, '1 Day'])
- self.assertEqual(s.dtype,'timedelta64[ns]')
- s = Series([pd.NaT, np.nan, '1 Day'])
- self.assertEqual(s.dtype,'timedelta64[ns]')
+ s = Series([None, pd.NaT, '1 Day'])
+ self.assertEqual(s.dtype,'timedelta64[ns]')
+ s = Series([np.nan, pd.NaT, '1 Day'])
+ self.assertEqual(s.dtype,'timedelta64[ns]')
+ s = Series([pd.NaT, None, '1 Day'])
+ self.assertEqual(s.dtype,'timedelta64[ns]')
+ s = Series([pd.NaT, np.nan, '1 Day'])
+ self.assertEqual(s.dtype,'timedelta64[ns]')
def test_operators_timedelta64(self):
@@ -2666,22 +2656,20 @@ def test_timedeltas_with_DateOffset(self):
[Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])
assert_series_equal(result, expected)
- if not _np_version_under1p7:
-
- # operate with np.timedelta64 correctly
- result = s + np.timedelta64(1, 's')
- result2 = np.timedelta64(1, 's') + s
- expected = Series(
- [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
- assert_series_equal(result, expected)
- assert_series_equal(result2, expected)
+ # operate with np.timedelta64 correctly
+ result = s + np.timedelta64(1, 's')
+ result2 = np.timedelta64(1, 's') + s
+ expected = Series(
+ [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
+ assert_series_equal(result, expected)
+ assert_series_equal(result2, expected)
- result = s + np.timedelta64(5, 'ms')
- result2 = np.timedelta64(5, 'ms') + s
- expected = Series(
- [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
- assert_series_equal(result, expected)
- assert_series_equal(result2, expected)
+ result = s + np.timedelta64(5, 'ms')
+ result2 = np.timedelta64(5, 'ms') + s
+ expected = Series(
+ [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
+ assert_series_equal(result, expected)
+ assert_series_equal(result2, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
@@ -2720,22 +2708,21 @@ def test_timedelta64_operations_with_timedeltas(self):
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
- if not _np_version_under1p7:
- td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
- td2 = pd.to_timedelta('00:05:04')
- result = td1 - td2
- expected = Series([timedelta(seconds=0)] * 3) -Series(
- [timedelta(seconds=1)] * 3)
- self.assertEqual(result.dtype, 'm8[ns]')
- assert_series_equal(result, expected)
+ td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
+ td2 = pd.to_timedelta('00:05:04')
+ result = td1 - td2
+ expected = Series([timedelta(seconds=0)] * 3) -Series(
+ [timedelta(seconds=1)] * 3)
+ self.assertEqual(result.dtype, 'm8[ns]')
+ assert_series_equal(result, expected)
- result2 = td2 - td1
- expected = (Series([timedelta(seconds=1)] * 3) -
- Series([timedelta(seconds=0)] * 3))
- assert_series_equal(result2, expected)
+ result2 = td2 - td1
+ expected = (Series([timedelta(seconds=1)] * 3) -
+ Series([timedelta(seconds=0)] * 3))
+ assert_series_equal(result2, expected)
- # roundtrip
- assert_series_equal(result + td2,td1)
+ # roundtrip
+ assert_series_equal(result + td2,td1)
def test_timedelta64_operations_with_integers(self):
@@ -2800,8 +2787,6 @@ def test_timedelta64_operations_with_integers(self):
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
- tm._skip_if_not_numpy17_friendly()
-
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
@@ -2836,11 +2821,6 @@ def test_timedelta64_equal_timedelta_supported_ops(self):
'm': 60 * 1000000, 's': 1000000, 'us': 1}
def timedelta64(*args):
- if _np_version_under1p7:
- coeffs = np.array(args)
- terms = np.array([npy16_mappings[interval]
- for interval in intervals])
- return np.timedelta64(coeffs.dot(terms))
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
@@ -2913,8 +2893,6 @@ def run_ops(ops, get_ser, test_ser):
dt1 + td1
def test_ops_datetimelike_align(self):
- tm._skip_if_not_numpy17_friendly()
-
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
@@ -2976,8 +2954,6 @@ def test_timedelta64_functions(self):
assert_series_equal(result, expected)
def test_timedelta_fillna(self):
- tm._skip_if_not_numpy17_friendly()
-
#GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
@@ -3183,8 +3159,6 @@ def test_bfill(self):
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
- tm._skip_if_not_numpy17_friendly()
-
from pandas.tseries.timedeltas import _possibly_cast_to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
@@ -5640,9 +5614,8 @@ def test_isin_with_i8(self):
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
- if not _np_version_under1p7:
- result = s.isin(s[0:2].values.astype('datetime64[D]'))
- assert_series_equal(result, expected)
+ result = s.isin(s[0:2].values.astype('datetime64[D]'))
+ assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
@@ -5651,19 +5624,15 @@ def test_isin_with_i8(self):
assert_series_equal(result, expected2)
# timedelta64[ns]
- if not _np_version_under1p7:
- s = Series(pd.to_timedelta(lrange(5),unit='d'))
- result = s.isin(s[0:2])
- assert_series_equal(result, expected)
+ s = Series(pd.to_timedelta(lrange(5),unit='d'))
+ result = s.isin(s[0:2])
+ assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# TimeSeries-specific
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
- if _np_version_under1p7 and sys.version_info[0] == 3 and sys.version_info[1] == 2:
- raise nose.SkipTest("failure of GH6270 on numpy < 1.7 and py 3.2")
-
def cummin(x):
return np.minimum.accumulate(x)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 7ac9f900c615e..749f15af0d916 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas.compat import range, lrange, lzip, zip, StringIO
-from pandas import compat, _np_version_under1p7
+from pandas import compat
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
@@ -822,7 +822,6 @@ def test_join_append_timedeltas(self):
# timedelta64 issues with join/merge
# GH 5695
- tm._skip_if_not_numpy17_friendly()
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
@@ -2013,9 +2012,6 @@ def test_concat_datetime64_block(self):
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
-
- # not friendly for < 1.7
- tm._skip_if_not_numpy17_friendly()
from pandas import to_timedelta
rng = to_timedelta(np.arange(10),unit='s')
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index d2c9acedcee94..cd37f4000e5a2 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -11,8 +11,6 @@
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime
-from pandas import _np_version_under1p7
-
import functools
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
@@ -2062,7 +2060,7 @@ class Micro(Tick):
class Nano(Tick):
- _inc = np.timedelta64(1, 'ns') if not _np_version_under1p7 else 1
+ _inc = np.timedelta64(1, 'ns')
_prefix = 'N'
@@ -2181,9 +2179,7 @@ def generate_range(start=None, end=None, periods=None,
FY5253Quarter,
])
-if not _np_version_under1p7:
- # Only 1.7+ supports nanosecond resolution
- prefix_mapping['N'] = Nano
+prefix_mapping['N'] = Nano
def _make_offset(key):
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 24deb8a298688..b251ae50e22d6 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -16,7 +16,6 @@
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
-from pandas import _np_version_under1p7
import pandas.util.testing as tm
def test_to_offset_multiple():
@@ -48,11 +47,10 @@ def test_to_offset_multiple():
expected = offsets.Milli(10075)
assert(result == expected)
- if not _np_version_under1p7:
- freqstr = '2800N'
- result = frequencies.to_offset(freqstr)
- expected = offsets.Nano(2800)
- assert(result == expected)
+ freqstr = '2800N'
+ result = frequencies.to_offset(freqstr)
+ expected = offsets.Nano(2800)
+ assert(result == expected)
# malformed
try:
@@ -137,7 +135,6 @@ def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
- tm._skip_if_not_numpy17_friendly()
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 065aa9236e539..f6f91760e8ad8 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -30,8 +30,6 @@
LastWeekOfMonth, FY5253, FY5253Quarter, WeekDay
from pandas.tseries.holiday import USFederalHolidayCalendar
-from pandas import _np_version_under1p7
-
_multiprocess_can_split_ = True
@@ -96,18 +94,13 @@ class Base(tm.TestCase):
_offset = None
_offset_types = [getattr(offsets, o) for o in offsets.__all__]
- skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.CustomBusinessMonthBegin,
- offsets.CustomBusinessMonthEnd, offsets.Nano]
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
@property
def offset_types(self):
- if _np_version_under1p7:
- return [o for o in self._offset_types if o not in self.skip_np_u1p7]
- else:
- return self._offset_types
+ return self._offset_types
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
@@ -133,8 +126,6 @@ def _get_offset(self, klass, value=1, normalize=False):
def test_apply_out_of_range(self):
if self._offset is None:
return
- if _np_version_under1p7 and self._offset in self.skip_np_u1p7:
- raise nose.SkipTest('numpy >= 1.7 required')
# try to create an out-of-bounds result timestamp; if we can't create the offset
# skip
@@ -2857,8 +2848,6 @@ def test_Microsecond():
def test_NanosecondGeneric():
- tm._skip_if_not_numpy17_friendly()
-
timestamp = Timestamp(datetime(2010, 1, 1))
assert timestamp.nanosecond == 0
@@ -2870,8 +2859,6 @@ def test_NanosecondGeneric():
def test_Nanosecond():
- tm._skip_if_not_numpy17_friendly()
-
timestamp = Timestamp(datetime(2010, 1, 1))
assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns'))
assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp)
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 3fae251b433e6..e6e6b48ccb573 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -27,7 +27,7 @@
from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
- assertRaisesRegexp, _skip_if_not_numpy17_friendly)
+ assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
from numpy.testing import assert_array_equal
@@ -2486,8 +2486,6 @@ def test_add(self):
dt1 + dt2
def test_add_offset(self):
- _skip_if_not_numpy17_friendly()
-
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A'))
@@ -2534,8 +2532,6 @@ def test_add_offset(self):
p + o
def test_add_offset_nat(self):
- _skip_if_not_numpy17_friendly()
-
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
@@ -2578,8 +2574,6 @@ def test_add_offset_nat(self):
p + o
def test_sub_offset(self):
- _skip_if_not_numpy17_friendly()
-
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A'))
@@ -2626,8 +2620,6 @@ def test_sub_offset(self):
p - o
def test_sub_offset_nat(self):
- _skip_if_not_numpy17_friendly()
-
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 9d85c599c840c..769062f293cf9 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -15,8 +15,7 @@
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
- ensure_clean,
- _skip_if_not_numpy17_friendly)
+ ensure_clean)
import pandas.util.testing as tm
class TestTimedeltas(tm.TestCase):
@@ -26,8 +25,6 @@ def setUp(self):
pass
def test_numeric_conversions(self):
- _skip_if_not_numpy17_friendly()
-
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
@@ -38,15 +35,11 @@ def test_numeric_conversions(self):
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
- _skip_if_not_numpy17_friendly()
-
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
- _skip_if_not_numpy17_friendly()
-
def conv(v):
return v.astype('m8[ns]')
@@ -93,8 +86,6 @@ def conv(v):
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
- _skip_if_not_numpy17_friendly()
-
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
@@ -116,14 +107,10 @@ def conv(v):
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
- _skip_if_not_numpy17_friendly()
-
self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
- _skip_if_not_numpy17_friendly()
-
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
@@ -231,8 +218,6 @@ def testit(unit, transform):
self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo'))
def test_to_timedelta_via_apply(self):
- _skip_if_not_numpy17_friendly()
-
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
@@ -242,8 +227,6 @@ def test_to_timedelta_via_apply(self):
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
- _skip_if_not_numpy17_friendly()
-
# GH4984
# make sure ops return timedeltas
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
@@ -271,8 +254,6 @@ def test_timedelta_ops(self):
tm.assert_almost_equal(result, expected)
def test_timedelta_ops_scalar(self):
- _skip_if_not_numpy17_friendly()
-
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
@@ -305,8 +286,6 @@ def test_timedelta_ops_scalar(self):
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
- _skip_if_not_numpy17_friendly()
-
# GH5438
timedelta_NaT = np.timedelta64('NaT')
@@ -324,8 +303,6 @@ def test_to_timedelta_on_missing_values(self):
self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64'))
def test_timedelta_ops_with_missing_values(self):
- _skip_if_not_numpy17_friendly()
-
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
@@ -403,8 +380,6 @@ def test_timedelta_ops_with_missing_values(self):
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
- _skip_if_not_numpy17_friendly()
-
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f94910d9dec89..3da97074a93fd 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -34,7 +34,7 @@
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
-from pandas import _np_version_under1p7, _np_version_under1p8
+from pandas import _np_version_under1p8
from numpy.testing.decorators import slow
@@ -288,10 +288,7 @@ def test_indexing(self):
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
- if _np_version_under1p7:
- freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
- else:
- freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
+ freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
@@ -768,19 +765,6 @@ def test_index_cast_datetime64_other_units(self):
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
- def test_index_astype_datetime64(self):
- # valid only under 1.7!
- if not _np_version_under1p7:
- raise nose.SkipTest("test only valid in numpy < 1.7")
-
- idx = Index([datetime(2012, 1, 1)], dtype=object)
- casted = idx.astype(np.dtype('M8[D]'))
-
- casted = idx.astype(np.dtype('M8[D]'))
- expected = DatetimeIndex(idx.values)
- tm.assert_isinstance(casted, DatetimeIndex)
- self.assertTrue(casted.equals(expected))
-
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
@@ -2713,8 +2697,6 @@ def assert_index_parameters(self, index):
assert index.inferred_freq == '40960N'
def test_ns_index(self):
- tm._skip_if_not_numpy17_friendly()
-
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
@@ -2862,10 +2844,9 @@ def test_datetimeindex_accessors(self):
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
- if not _np_version_under1p7:
- bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
- dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
- self.assertRaises(ValueError, lambda: dti.is_month_start)
+ bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
+ dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
+ self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
@@ -3545,18 +3526,7 @@ def test_timestamp_compare_scalars(self):
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
-
- if pd._np_version_under1p7:
- # you have to convert to timestamp for this to work with numpy
- # scalars
- expected = left_f(Timestamp(lhs), rhs)
-
- # otherwise a TypeError is thrown
- if left not in ('eq', 'ne'):
- with tm.assertRaises(TypeError):
- left_f(lhs, rhs)
- else:
- expected = left_f(lhs, rhs)
+ expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index bcfb2357b668d..5635bb75dd9ce 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -23,8 +23,6 @@
from pandas.util.testing import assert_frame_equal
from pandas.compat import lrange, zip
-from pandas import _np_version_under1p7
-
try:
import pytz
@@ -1195,9 +1193,8 @@ def test_tzaware_offset(self):
offset = dates + offsets.Hour(5)
self.assertTrue(offset.equals(expected))
- if not _np_version_under1p7:
- offset = dates + np.timedelta64(5, 'h')
- self.assertTrue(offset.equals(expected))
+ offset = dates + np.timedelta64(5, 'h')
+ self.assertTrue(offset.equals(expected))
offset = dates + timedelta(hours=5)
self.assertTrue(offset.equals(expected))
@@ -1227,14 +1224,13 @@ def test_nat(self):
expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT]
self.assertTrue(idx.equals(DatetimeIndex(expected, tz='US/Pacific')))
- if not _np_version_under1p7:
- idx = idx + np.timedelta64(3, 'h')
- expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
- self.assertTrue(idx.equals(DatetimeIndex(expected, tz='US/Pacific')))
+ idx = idx + np.timedelta64(3, 'h')
+ expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
+ self.assertTrue(idx.equals(DatetimeIndex(expected, tz='US/Pacific')))
- idx = idx.tz_convert('US/Eastern')
- expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]
- self.assertTrue(idx.equals(DatetimeIndex(expected, tz='US/Eastern')))
+ idx = idx.tz_convert('US/Eastern')
+ expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]
+ self.assertTrue(idx.equals(DatetimeIndex(expected, tz='US/Eastern')))
if __name__ == '__main__':
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index a700a617b0dee..57dc5f4404621 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -10,7 +10,6 @@
from pandas.tseries.index import date_range
from pandas.tseries.frequencies import get_freq
import pandas.tseries.offsets as offsets
-from pandas import _np_version_under1p7
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
@@ -140,10 +139,7 @@ def test_constructor_with_stringoffset(self):
def test_repr(self):
dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
- if _np_version_under1p7:
- freqs = ['D', 'M', 'S']
- else:
- freqs = ['D', 'M', 'S', 'N']
+ freqs = ['D', 'M', 'S', 'N']
for date in dates:
for tz in timezones:
@@ -431,7 +427,6 @@ def test_parsing_timezone_offsets(self):
class TestTimestampNsOperations(tm.TestCase):
def setUp(self):
- tm._skip_if_not_numpy17_friendly()
self.timestamp = Timestamp(datetime.datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
@@ -539,15 +534,6 @@ def test_nat_arithmetic(self):
with tm.assertRaises(TypeError):
right - left
- if _np_version_under1p7:
- self.assertEqual(nat + np.timedelta64(1, 'h'), tslib.NaT)
- with tm.assertRaises(TypeError):
- np.timedelta64(1, 'h') + nat
-
- self.assertEqual(nat - np.timedelta64(1, 'h'), tslib.NaT)
- with tm.assertRaises(TypeError):
- np.timedelta64(1, 'h') - nat
-
class TestTslib(tm.TestCase):
@@ -655,10 +641,9 @@ def test_timestamp_and_series(self):
timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', tz='US/Eastern'))
first_timestamp = timestamp_series[0]
- if not _np_version_under1p7:
- delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
- assert_series_equal(timestamp_series - first_timestamp, delta_series)
- assert_series_equal(first_timestamp - timestamp_series, -delta_series)
+ delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
+ assert_series_equal(timestamp_series - first_timestamp, delta_series)
+ assert_series_equal(first_timestamp - timestamp_series, -delta_series)
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time objects
@@ -676,11 +661,10 @@ def test_addition_subtraction_types(self):
self.assertEqual(type(timestamp_instance + timedelta_instance), Timestamp)
self.assertEqual(type(timestamp_instance - timedelta_instance), Timestamp)
- if not _np_version_under1p7:
- # Timestamp +/- datetime64 not supported, so not tested (could possibly assert error raised?)
- timedelta64_instance = np.timedelta64(1, 'D')
- self.assertEqual(type(timestamp_instance + timedelta64_instance), Timestamp)
- self.assertEqual(type(timestamp_instance - timedelta64_instance), Timestamp)
+ # Timestamp +/- datetime64 not supported, so not tested (could possibly assert error raised?)
+ timedelta64_instance = np.timedelta64(1, 'D')
+ self.assertEqual(type(timestamp_instance + timedelta64_instance), Timestamp)
+ self.assertEqual(type(timestamp_instance - timedelta64_instance), Timestamp)
def test_addition_subtraction_preserve_frequency(self):
timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]
@@ -691,10 +675,9 @@ def test_addition_subtraction_preserve_frequency(self):
self.assertEqual((timestamp_instance + timedelta_instance).freq, original_freq)
self.assertEqual((timestamp_instance - timedelta_instance).freq, original_freq)
- if not _np_version_under1p7:
- timedelta64_instance = np.timedelta64(1, 'D')
- self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq)
- self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq)
+ timedelta64_instance = np.timedelta64(1, 'D')
+ self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq)
+ self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq)
def test_resolution(self):
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 0d6d74db6f18c..e762ebe9d85cf 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -7,9 +7,10 @@
import numpy as np
import pandas.tslib as tslib
-from pandas import compat, _np_version_under1p7
-from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, is_timedelta64_dtype,
- _values_from_object, is_list_like, isnull, _ensure_object)
+from pandas import compat
+from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype,
+ is_timedelta64_dtype, _values_from_object,
+ is_list_like, isnull, _ensure_object)
repr_timedelta = tslib.repr_timedelta64
repr_timedelta64 = tslib.repr_timedelta64
@@ -29,9 +30,6 @@ def to_timedelta(arg, box=True, unit='ns'):
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
"""
- if _np_version_under1p7:
- raise ValueError("to_timedelta is not support for numpy < 1.7")
-
unit = _validate_timedelta_unit(unit)
def _convert_listlike(arg, box, unit):
@@ -187,46 +185,9 @@ def _possibly_cast_to_timedelta(value, coerce=True, dtype=None):
sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards,
don't force the conversion unless coerce is True
- if coerce='compat' force a compatibilty coercerion (to timedeltas) if needeed
if dtype is passed then this is the target dtype
"""
- # coercion compatability
- if coerce == 'compat' and _np_version_under1p7:
-
- def convert(td, dtype):
-
- # we have an array with a non-object dtype
- if hasattr(td,'item'):
- td = td.astype(np.int64).item()
- if td == tslib.iNaT:
- return td
- if dtype == 'm8[us]':
- td *= 1000
- return td
-
- if isnull(td) or td == tslib.compat_NaT or td == tslib.iNaT:
- return tslib.iNaT
-
- # convert td value to a nanosecond value
- d = td.days
- s = td.seconds
- us = td.microseconds
-
- if dtype == 'object' or dtype == 'm8[ns]':
- td = 1000*us + (s + d * 24 * 3600) * 10 ** 9
- else:
- raise ValueError("invalid conversion of dtype in np < 1.7 [%s]" % dtype)
-
- return td
-
- # < 1.7 coercion
- if not is_list_like(value):
- value = np.array([ value ])
-
- dtype = value.dtype
- return np.array([ convert(v,dtype) for v in value ], dtype='m8[ns]')
-
# deal with numpy not being able to handle certain timedelta operations
if isinstance(value, (ABCSeries, np.ndarray)):
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 7084184b7d423..3bdd422d9fc06 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -42,12 +42,6 @@ from pandas.compat import parse_date, string_types
from sys import version_info
-# numpy compat
-from distutils.version import LooseVersion
-_np_version = np.version.short_version
-_np_version_under1p6 = LooseVersion(_np_version) < '1.6'
-_np_version_under1p7 = LooseVersion(_np_version) < '1.7'
-
# GH3363
cdef bint PY2 = version_info[0] == 2
@@ -1472,33 +1466,16 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce):
if util.is_array(ts):
ts = ts.astype('int64').item()
if unit in ['Y','M','W']:
- if _np_version_under1p7:
- raise ValueError("unsupported unit for native timedelta under this numpy {0}".format(unit))
- else:
- ts = np.timedelta64(ts,unit)
+ ts = np.timedelta64(ts, unit)
else:
ts = cast_from_unit(ts, unit)
- if _np_version_under1p7:
- ts = timedelta(microseconds=ts/1000.0)
- else:
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts)
elif util.is_string_object(ts):
if ts in _nat_strings or coerce:
return np.timedelta64(iNaT)
else:
raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
- if _np_version_under1p7:
- if not isinstance(ts, timedelta):
- if coerce:
- return np.timedelta64(iNaT)
- raise ValueError("Invalid type for timedelta scalar: %s" % type(ts))
- if not PY2:
- # convert to microseconds in timedelta64
- ts = np.timedelta64(int(ts.total_seconds()*1e9 + ts.microseconds*1000))
- else:
- return ts
-
if isinstance(ts, timedelta):
ts = np.timedelta64(ts)
elif not isinstance(ts, np.timedelta64):
@@ -2124,9 +2101,6 @@ cdef object _get_transitions(object tz):
arr = np.hstack([np.array([0], dtype='M8[s]'), # place holder for first item
np.array(trans_list, dtype='M8[s]')]).astype('M8[ns]') # all trans listed
arr = arr.view('i8')
- # scale transitions correctly in numpy 1.6
- if _np_version_under1p7:
- arr *= 1000000000
arr[0] = NPY_NAT + 1
elif _is_fixed_offset(tz):
arr = np.array([NPY_NAT + 1], dtype=np.int64)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 42048ec9877fa..c6ddfd20cec7c 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -41,7 +41,7 @@
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
-from pandas import _testing, _np_version_under1p7
+from pandas import _testing
from pandas.io.common import urlopen
@@ -225,11 +225,6 @@ def setUpClass(cls):
cls.setUpClass = setUpClass
return cls
-def _skip_if_not_numpy17_friendly():
- # not friendly for < 1.7
- if _np_version_under1p7:
- import nose
- raise nose.SkipTest("numpy >= 1.7 is required")
def _skip_if_no_scipy():
try:
@@ -351,7 +346,6 @@ def get_locales(prefix=None, normalize=True,
# raw_locales is "\n" seperated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
- locales = raw_locales.split(b'\n')
raw_locales = []
for x in raw_locales:
try:
@@ -1231,7 +1225,7 @@ def dec(f):
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
-if sys.version_info[:2] >= (3,3):
+if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,)
def can_connect(url, error_classes=_network_error_classes):
diff --git a/setup.py b/setup.py
index 844f5742c0e69..f93ade98c26cf 100755
--- a/setup.py
+++ b/setup.py
@@ -35,12 +35,9 @@
_have_setuptools = False
setuptools_kwargs = {}
-min_numpy_ver = '1.6'
+min_numpy_ver = '1.7.0'
if sys.version_info[0] >= 3:
- if sys.version_info[1] >= 3: # 3.3 needs numpy 1.7+
- min_numpy_ver = "1.7.0b2"
-
setuptools_kwargs = {
'zip_safe': False,
'install_requires': ['python-dateutil >= 2',
@@ -53,7 +50,6 @@
"\n$ pip install distribute")
else:
- min_numpy_ver = '1.6.1'
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz >= 2011k',
| closes #7711
| https://api.github.com/repos/pandas-dev/pandas/pulls/7954 | 2014-08-07T17:30:47Z | 2014-08-12T21:59:20Z | 2014-08-12T21:59:20Z | 2014-08-12T21:59:23Z |
API: add Series.dt delegator for datetimelike methods (GH7207) | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 62518bf0d9ffd..ec6e2aff870c6 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -436,12 +436,47 @@ Time series-related
Series.tz_convert
Series.tz_localize
+Datetimelike Properties
+~~~~~~~~~~~~~~~~~~~~~~~
+``Series.dt`` can be used to access the values of the series as
+datetimelike and return several properties.
+Due to implementation details the methods show up here as methods of the
+``DatetimeProperties/PeriodProperties`` classes. These can be accessed like ``Series.dt.<property>``.
+
+.. currentmodule:: pandas.tseries.common
+
+.. autosummary::
+ :toctree: generated/
+
+ DatetimeProperties.date
+ DatetimeProperties.time
+ DatetimeProperties.year
+ DatetimeProperties.month
+ DatetimeProperties.day
+ DatetimeProperties.hour
+ DatetimeProperties.minute
+ DatetimeProperties.second
+ DatetimeProperties.microsecond
+ DatetimeProperties.nanosecond
+ DatetimeProperties.second
+ DatetimeProperties.weekofyear
+ DatetimeProperties.dayofweek
+ DatetimeProperties.weekday
+ DatetimeProperties.dayofyear
+ DatetimeProperties.quarter
+ DatetimeProperties.is_month_start
+ DatetimeProperties.is_month_end
+ DatetimeProperties.is_quarter_start
+ DatetimeProperties.is_quarter_end
+ DatetimeProperties.is_year_start
+ DatetimeProperties.is_year_end
+
String handling
~~~~~~~~~~~~~~~
``Series.str`` can be used to access the values of the series as
strings and apply several methods to it. Due to implementation
details the methods show up here as methods of the
-``StringMethods`` class.
+``StringMethods`` class. These can be acccessed like ``Series.str.<function/property>``.
.. currentmodule:: pandas.core.strings
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 93933140ab11c..e880bb2d6b952 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1099,6 +1099,41 @@ For instance,
for r in df2.itertuples():
print(r)
+.. _basics.dt_accessors:
+
+.dt accessor
+~~~~~~~~~~~~
+
+``Series`` has an accessor to succinctly return datetime like properties for the *values* of the Series, if its a datetime/period like Series.
+This will return a Series, indexed like the existing Series.
+
+.. ipython:: python
+
+ # datetime
+ s = Series(date_range('20130101 09:10:12',periods=4))
+ s
+ s.dt.hour
+ s.dt.second
+ s.dt.day
+
+This enables nice expressions like this:
+
+.. ipython:: python
+
+ s[s.dt.day==2]
+
+.. ipython:: python
+
+ # period
+ s = Series(period_range('20130101',periods=4,freq='D').asobject)
+ s
+ s.dt.year
+ s.dt.day
+
+.. note::
+
+ ``Series.dt`` will raise a ``TypeError`` if you access with a non-datetimelike values
+
.. _basics.string_methods:
Vectorized string methods
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 7a912361d0e14..60e32f8db5305 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -444,6 +444,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
is_year_start,"Logical indicating if first day of year (defined by frequency)"
is_year_end,"Logical indicating if last day of year (defined by frequency)"
+Furthermore, if you have a ``Series`` with datetimelike values, then you can access these properties via the ``.dt`` accessor, see the :ref:`docs <basics.dt_accessors>`
DateOffset objects
------------------
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 5824e5824e8b5..8cdad6a872f49 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -11,6 +11,7 @@ users upgrade to this version.
- The ``Categorical`` type was integrated as a first-class pandas type, see :ref:`here <whatsnew_0150.cat>`
- Internal refactoring of the ``Index`` class to no longer sub-class ``ndarray``, see :ref:`Internal Refactoring <whatsnew_0150.refactoring>`
+ - New datetimelike properties accessor ``.dt`` for Series, see :ref:`Dateimelike Properties <whatsnew_0150.dt>`
- :ref:`Other Enhancements <whatsnew_0150.enhancements>`
@@ -165,6 +166,37 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
for localizing a specific level of a MultiIndex (:issue:`7846`)
+.. _whatsnew_0150.dt:
+
+.dt accessor
+~~~~~~~~~~~~
+
+``Series`` has gained an accessor to succinctly return datetime like properties for the *values* of the Series, if its a datetime/period like Series. (:issue:`7207`)
+This will return a Series, indexed like the existing Series. See the :ref:`docs <basics.dt_accessors>`
+
+.. ipython:: python
+
+ # datetime
+ s = Series(date_range('20130101 09:10:12',periods=4))
+ s
+ s.dt.hour
+ s.dt.second
+ s.dt.day
+
+This enables nice expressions like this:
+
+.. ipython:: python
+
+ s[s.dt.day==2]
+
+.. ipython:: python
+
+ # period
+ s = Series(period_range('20130101',periods=4,freq='D').asobject)
+ s
+ s.dt.year
+ s.dt.day
+
.. _whatsnew_0150.refactoring:
Internal Refactoring
diff --git a/pandas/core/base.py b/pandas/core/base.py
index c04872ab74bb0..021f4474130bd 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -100,6 +100,62 @@ def _reset_cache(self, key=None):
else:
self._cache.pop(key, None)
+class PandasDelegate(PandasObject):
+ """ an abstract base class for delegating methods/properties """
+
+ def _delegate_property_get(self, name, *args, **kwargs):
+ raise TypeError("You cannot access the property {name}".format(name=name))
+
+ def _delegate_property_set(self, name, value, *args, **kwargs):
+ raise TypeError("The property {name} cannot be set".format(name=name))
+
+ def _delegate_method(self, name, *args, **kwargs):
+ raise TypeError("You cannot call method {name}".format(name=name))
+
+ @classmethod
+ def _add_delegate_accessors(cls, delegate, accessors, typ):
+ """
+ add accessors to cls from the delegate class
+
+ Parameters
+ ----------
+ cls : the class to add the methods/properties to
+ delegate : the class to get methods/properties & doc-strings
+ acccessors : string list of accessors to add
+ typ : 'property' or 'method'
+
+ """
+
+ def _create_delegator_property(name):
+
+ def _getter(self):
+ return self._delegate_property_get(name)
+ def _setter(self, new_values):
+ return self._delegate_property_set(name, new_values)
+
+ _getter.__name__ = name
+ _setter.__name__ = name
+
+ return property(fget=_getter, fset=_setter, doc=getattr(delegate,name).__doc__)
+
+ def _create_delegator_method(name):
+
+ def f(self, *args, **kwargs):
+ return self._delegate_method(name, *args, **kwargs)
+
+ f.__name__ = name
+ f.__doc__ = getattr(delegate,name).__doc__
+
+ return f
+
+ for name in accessors:
+
+ if typ == 'property':
+ f = _create_delegator_property(name)
+ else:
+ f = _create_delegator_method(name)
+
+ setattr(cls,name,f)
class FrozenList(PandasObject, list):
@@ -221,36 +277,6 @@ def f(self, *args, **kwargs):
class IndexOpsMixin(object):
""" common ops mixin to support a unified inteface / docs for Series / Index """
- def _is_allowed_index_op(self, name):
- if not self._allow_index_ops:
- raise TypeError("cannot perform an {name} operations on this type {typ}".format(
- name=name,typ=type(self._get_access_object())))
-
- def _ops_compat(self, name, op_accessor):
-
- obj = self._get_access_object()
- try:
- return self._wrap_access_object(getattr(obj,op_accessor))
- except AttributeError:
- raise TypeError("cannot perform an {name} operations on this type {typ}".format(
- name=name,typ=type(obj)))
-
- def _get_access_object(self):
- if isinstance(self, com.ABCSeries):
- return self.index
- return self
-
- def _wrap_access_object(self, obj):
- # we may need to coerce the input as we don't want non int64 if
- # we have an integer result
- if hasattr(obj,'dtype') and com.is_integer_dtype(obj):
- obj = obj.astype(np.int64)
-
- if isinstance(self, com.ABCSeries):
- return self._constructor(obj,index=self.index).__finalize__(self)
-
- return obj
-
# ndarray compatibility
__array_priority__ = 1000
@@ -449,68 +475,9 @@ def searchsorted(self, key, side='left'):
all = _unbox(np.ndarray.all)
any = _unbox(np.ndarray.any)
-# facilitate the properties on the wrapped ops
-def _field_accessor(name, docstring=None):
- op_accessor = '_{0}'.format(name)
- def f(self):
- return self._ops_compat(name,op_accessor)
-
- f.__name__ = name
- f.__doc__ = docstring
- return property(f)
-
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified inteface datetimelike Index """
- def _is_allowed_datetime_index_op(self, name):
- if not self._allow_datetime_index_ops:
- raise TypeError("cannot perform an {name} operations on this type {typ}".format(
- name=name,typ=type(self._get_access_object())))
-
- def _is_allowed_period_index_op(self, name):
- if not self._allow_period_index_ops:
- raise TypeError("cannot perform an {name} operations on this type {typ}".format(
- name=name,typ=type(self._get_access_object())))
-
- def _ops_compat(self, name, op_accessor):
-
- from pandas.tseries.index import DatetimeIndex
- from pandas.tseries.period import PeriodIndex
- obj = self._get_access_object()
- if isinstance(obj, DatetimeIndex):
- self._is_allowed_datetime_index_op(name)
- elif isinstance(obj, PeriodIndex):
- self._is_allowed_period_index_op(name)
- try:
- return self._wrap_access_object(getattr(obj,op_accessor))
- except AttributeError:
- raise TypeError("cannot perform an {name} operations on this type {typ}".format(
- name=name,typ=type(obj)))
-
- date = _field_accessor('date','Returns numpy array of datetime.date. The date part of the Timestamps')
- time = _field_accessor('time','Returns numpy array of datetime.time. The time part of the Timestamps')
- year = _field_accessor('year', "The year of the datetime")
- month = _field_accessor('month', "The month as January=1, December=12")
- day = _field_accessor('day', "The days of the datetime")
- hour = _field_accessor('hour', "The hours of the datetime")
- minute = _field_accessor('minute', "The minutes of the datetime")
- second = _field_accessor('second', "The seconds of the datetime")
- microsecond = _field_accessor('microsecond', "The microseconds of the datetime")
- nanosecond = _field_accessor('nanosecond', "The nanoseconds of the datetime")
- weekofyear = _field_accessor('weekofyear', "The week ordinal of the year")
- week = weekofyear
- dayofweek = _field_accessor('dayofweek', "The day of the week with Monday=0, Sunday=6")
- weekday = dayofweek
- dayofyear = _field_accessor('dayofyear', "The ordinal day of the year")
- quarter = _field_accessor('quarter', "The quarter of the date")
- qyear = _field_accessor('qyear')
- is_month_start = _field_accessor('is_month_start', "Logical indicating if first day of month (defined by frequency)")
- is_month_end = _field_accessor('is_month_end', "Logical indicating if last day of month (defined by frequency)")
- is_quarter_start = _field_accessor('is_quarter_start', "Logical indicating if first day of quarter (defined by frequency)")
- is_quarter_end = _field_accessor('is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)")
- is_year_start = _field_accessor('is_year_start', "Logical indicating if first day of year (defined by frequency)")
- is_year_end = _field_accessor('is_year_end', "Logical indicating if last day of year (defined by frequency)")
-
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 90c3fa207e3bb..7b8b609fe0f2a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1193,12 +1193,17 @@ def _check_setitem_copy(self, stacklevel=4, t='setting'):
except:
pass
- if t == 'referant':
+ # a custom message
+ if isinstance(self.is_copy, string_types):
+ t = self.is_copy
+
+ elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy")
+
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 4f4fe092a3606..a58a3331f9759 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -37,7 +37,6 @@ def _try_get_item(x):
except AttributeError:
return x
-
def _indexOp(opname):
"""
Wrapper function for index comparison operations, to avoid
@@ -4281,7 +4280,6 @@ def isin(self, values, level=None):
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
-
# For utility purposes
def _sparsify(label_list, start=0, sentinel=''):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 22284df337d97..3901e19968841 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -107,18 +107,6 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ['name']
_allow_index_ops = True
- @property
- def _allow_datetime_index_ops(self):
- # disabling to invalidate datetime index ops (GH7206)
- # return self.index.is_all_dates and isinstance(self.index, DatetimeIndex)
- return False
-
- @property
- def _allow_period_index_ops(self):
- # disabling to invalidate period index ops (GH7206)
- # return self.index.is_all_dates and isinstance(self.index, PeriodIndex)
- return False
-
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
@@ -2405,6 +2393,18 @@ def to_period(self, freq=None, copy=True):
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
+
+ #------------------------------------------------------------------------------
+ # Datetimelike delegation methods
+
+ @cache_readonly
+ def dt(self):
+ from pandas.tseries.common import maybe_to_datetimelike
+ try:
+ return maybe_to_datetimelike(self)
+ except (Exception):
+ raise TypeError("Can only use .dt accessor with datetimelike values")
+
#------------------------------------------------------------------------------
# Categorical methods
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 356984ea88f43..179dc4d2948d9 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -4,8 +4,9 @@
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
-from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
+from pandas.core.base import FrozenList, FrozenNDArray, PandasDelegate, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
+from pandas.tseries.common import is_datetimelike
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
@@ -127,6 +128,53 @@ def test_values(self):
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
+class TestPandasDelegate(tm.TestCase):
+
+ def setUp(self):
+ pass
+
+ def test_invalida_delgation(self):
+ # these show that in order for the delegation to work
+ # the _delegate_* methods need to be overriden to not raise a TypeError
+
+ class Delegator(object):
+ _properties = ['foo']
+ _methods = ['bar']
+
+ def _set_foo(self, value):
+ self.foo = value
+
+ def _get_foo(self):
+ return self.foo
+
+ foo = property(_get_foo, _set_foo, doc="foo property")
+
+ def bar(self, *args, **kwargs):
+ """ a test bar method """
+ pass
+
+ class Delegate(PandasDelegate):
+ def __init__(self, obj):
+ self.obj = obj
+ Delegate._add_delegate_accessors(delegate=Delegator,
+ accessors=Delegator._properties,
+ typ='property')
+ Delegate._add_delegate_accessors(delegate=Delegator,
+ accessors=Delegator._methods,
+ typ='method')
+
+ delegate = Delegate(Delegator())
+
+ def f():
+ delegate.foo
+ self.assertRaises(TypeError, f)
+ def f():
+ delegate.foo = 5
+ self.assertRaises(TypeError, f)
+ def f():
+ delegate.foo()
+ self.assertRaises(TypeError, f)
+
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
@@ -526,13 +574,12 @@ def test_factorize(self):
class TestDatetimeIndexOps(Ops):
- _allowed = '_allow_datetime_index_ops'
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
- mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
+ mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) or is_datetimelike(x)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
@@ -784,11 +831,10 @@ def test_value_counts_unique(self):
class TestPeriodIndexOps(Ops):
- _allowed = '_allow_period_index_ops'
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
- mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
+ mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) or is_datetimelike(x)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 3e5fe1f392445..aa718a11d97cf 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -16,7 +16,7 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull,
- bdate_range, date_range, _np_version_under1p7)
+ bdate_range, date_range, period_range, _np_version_under1p7)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp, DatetimeIndex
@@ -71,6 +71,75 @@ def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
+ def test_dt_namespace_accessor(self):
+
+ # GH 7207
+ # test .dt namespace accessor
+
+ ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter']
+ ok_for_period = ok_for_base + ['qyear']
+ ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
+ 'is_quarter_end', 'is_year_start', 'is_year_end']
+ ok_for_both = ok_for_dt
+
+ def get_expected(s, name):
+ result = getattr(Index(s.values),prop)
+ if isinstance(result, np.ndarray):
+ if com.is_integer_dtype(result):
+ result = result.astype('int64')
+ return Series(result,index=s.index)
+
+ # invalids
+ for s in [Series(np.arange(5)),
+ Series(list('abcde')),
+ Series(np.random.randn(5))]:
+ self.assertRaises(TypeError, lambda : s.dt)
+
+ # datetimeindex
+ for s in [Series(date_range('20130101',periods=5)),
+ Series(date_range('20130101',periods=5,freq='s')),
+ Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
+
+ for prop in ok_for_dt:
+ tm.assert_series_equal(getattr(s.dt,prop),get_expected(s,prop))
+
+ # both
+ index = date_range('20130101',periods=3,freq='D')
+ s = Series(date_range('20140204',periods=3,freq='s'),index=index)
+ tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))
+ tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))
+ tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
+
+ # periodindex
+ for s in [Series(period_range('20130101',periods=5,freq='D').asobject)]:
+
+ for prop in ok_for_period:
+ tm.assert_series_equal(getattr(s.dt,prop),get_expected(s,prop))
+
+ # test limited display api
+ def get_dir(s):
+ results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]
+ return list(sorted(set(results)))
+
+ s = Series(date_range('20130101',periods=5,freq='D'))
+ results = get_dir(s)
+ tm.assert_almost_equal(results,list(sorted(set(ok_for_dt))))
+
+ s = Series(period_range('20130101',periods=5,freq='D').asobject)
+ results = get_dir(s)
+ tm.assert_almost_equal(results,list(sorted(set(ok_for_period))))
+
+ # no setting allowed
+ s = Series(date_range('20130101',periods=5,freq='D'))
+ with tm.assertRaisesRegexp(ValueError, "modifications"):
+ s.dt.hour = 5
+
+ # trying to set a copy
+ with pd.option_context('chained_assignment','raise'):
+ def f():
+ s.dt.hour[0] = 5
+ self.assertRaises(com.SettingWithCopyError, f)
+
def test_binop_maybe_preserve_name(self):
# names match, preserve
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
new file mode 100644
index 0000000000000..92ccd1248fac9
--- /dev/null
+++ b/pandas/tseries/common.py
@@ -0,0 +1,115 @@
+## datetimelike delegation ##
+
+import numpy as np
+from pandas.core.base import PandasDelegate
+from pandas.core import common as com
+from pandas import Series, DatetimeIndex, PeriodIndex
+from pandas import lib, tslib
+
+def is_datetimelike(data):
+ """ return a boolean if we can be successfully converted to a datetimelike """
+ try:
+ maybe_to_datetimelike(data)
+ return True
+ except (Exception):
+ pass
+ return False
+
+def maybe_to_datetimelike(data, copy=False):
+ """
+ return a DelegatedClass of a Series that is datetimelike (e.g. datetime64[ns] dtype or a Series of Periods)
+ raise TypeError if this is not possible.
+
+ Parameters
+ ----------
+ data : Series
+ copy : boolean, default False
+ copy the input data
+
+ Returns
+ -------
+ DelegatedClass
+
+ """
+
+ if not isinstance(data, Series):
+ raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
+
+ index = data.index
+ if issubclass(data.dtype.type, np.datetime64):
+ return DatetimeProperties(DatetimeIndex(data, copy=copy), index)
+ else:
+
+ if isinstance(data, PeriodIndex):
+ return PeriodProperties(PeriodIndex(data, copy=copy), index)
+
+ data = com._values_from_object(data)
+ inferred = lib.infer_dtype(data)
+ if inferred == 'period':
+ return PeriodProperties(PeriodIndex(data), index)
+
+ raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
+
+class Properties(PandasDelegate):
+
+ def __init__(self, values, index):
+ self.values = values
+ self.index = index
+
+ def _delegate_property_get(self, name):
+ result = getattr(self.values,name)
+
+ # maybe need to upcast (ints)
+ if isinstance(result, np.ndarray):
+ if com.is_integer_dtype(result):
+ result = result.astype('int64')
+
+ # return the result as a Series, which is by definition a copy
+ result = Series(result, index=self.index)
+
+ # setting this object will show a SettingWithCopyWarning/Error
+ result.is_copy = ("modifications to a property of a datetimelike object are not "
+ "supported and are discarded. Change values on the original.")
+
+ return result
+
+ def _delegate_property_set(self, name, value, *args, **kwargs):
+ raise ValueError("modifications to a property of a datetimelike object are not "
+ "supported. Change values on the original.")
+
+
+class DatetimeProperties(Properties):
+ """
+ Accessor object for datetimelike properties of the Series values.
+
+ Examples
+ --------
+ >>> s.dt.hour
+ >>> s.dt.second
+ >>> s.dt.quarter
+
+ Returns a Series indexed like the original Series.
+ Raises TypeError if the Series does not contain datetimelike values.
+ """
+
+DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex,
+ accessors=DatetimeIndex._datetimelike_ops,
+ typ='property')
+
+class PeriodProperties(Properties):
+ """
+ Accessor object for datetimelike properties of the Series values.
+
+ Examples
+ --------
+ >>> s.dt.hour
+ >>> s.dt.second
+ >>> s.dt.quarter
+
+ Returns a Series indexed like the original Series.
+ Raises TypeError if the Series does not contain datetimelike values.
+ """
+
+PeriodProperties._add_delegate_accessors(delegate=PeriodIndex,
+ accessors=PeriodIndex._datetimelike_ops,
+ typ='property')
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 3b8cebcb51684..2acdcfffb7d9a 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -174,7 +174,10 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
offset = None
_comparables = ['name','freqstr','tz']
_attributes = ['name','freq','tz']
- _allow_datetime_index_ops = True
+ _datetimelike_ops = ['year','month','day','hour','minute','second',
+ 'weekofyear','week','dayofweek','weekday','dayofyear','quarter',
+ 'date','time','microsecond','nanosecond','is_month_start','is_month_end',
+ 'is_quarter_start','is_quarter_end','is_year_start','is_year_end']
_is_numeric_dtype = False
def __new__(cls, data=None,
@@ -1428,30 +1431,31 @@ def freqstr(self):
return None
return self.offset.freqstr
- _year = _field_accessor('year', 'Y')
- _month = _field_accessor('month', 'M', "The month as January=1, December=12")
- _day = _field_accessor('day', 'D')
- _hour = _field_accessor('hour', 'h')
- _minute = _field_accessor('minute', 'm')
- _second = _field_accessor('second', 's')
- _microsecond = _field_accessor('microsecond', 'us')
- _nanosecond = _field_accessor('nanosecond', 'ns')
- _weekofyear = _field_accessor('weekofyear', 'woy')
- _week = _weekofyear
- _dayofweek = _field_accessor('dayofweek', 'dow',
+ year = _field_accessor('year', 'Y', "The year of the datetime")
+ month = _field_accessor('month', 'M', "The month as January=1, December=12")
+ day = _field_accessor('day', 'D', "The days of the datetime")
+ hour = _field_accessor('hour', 'h', "The hours of the datetime")
+ minute = _field_accessor('minute', 'm', "The minutes of the datetime")
+ second = _field_accessor('second', 's', "The seconds of the datetime")
+ millisecond = _field_accessor('millisecond', 'ms', "The milliseconds of the datetime")
+ microsecond = _field_accessor('microsecond', 'us', "The microseconds of the datetime")
+ nanosecond = _field_accessor('nanosecond', 'ns', "The nanoseconds of the datetime")
+ weekofyear = _field_accessor('weekofyear', 'woy', "The week ordinal of the year")
+ week = weekofyear
+ dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
- _weekday = _dayofweek
- _dayofyear = _field_accessor('dayofyear', 'doy')
- _quarter = _field_accessor('quarter', 'q')
- _is_month_start = _field_accessor('is_month_start', 'is_month_start')
- _is_month_end = _field_accessor('is_month_end', 'is_month_end')
- _is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start')
- _is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end')
- _is_year_start = _field_accessor('is_year_start', 'is_year_start')
- _is_year_end = _field_accessor('is_year_end', 'is_year_end')
+ weekday = dayofweek
+ dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
+ quarter = _field_accessor('quarter', 'q', "The quarter of the date")
+ is_month_start = _field_accessor('is_month_start', 'is_month_start', "Logical indicating if first day of month (defined by frequency)")
+ is_month_end = _field_accessor('is_month_end', 'is_month_end', "Logical indicating if last day of month (defined by frequency)")
+ is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start', "Logical indicating if first day of quarter (defined by frequency)")
+ is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)")
+ is_year_start = _field_accessor('is_year_start', 'is_year_start', "Logical indicating if first day of year (defined by frequency)")
+ is_year_end = _field_accessor('is_year_end', 'is_year_end', "Logical indicating if last day of year (defined by frequency)")
@property
- def _time(self):
+ def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
@@ -1460,7 +1464,7 @@ def _time(self):
return _algos.arrmap_object(self.asobject.values, lambda x: x.time())
@property
- def _date(self):
+ def date(self):
"""
Returns numpy array of datetime.date. The date part of the Timestamps.
"""
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index e80fdf28c4089..b8b97a35cba15 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -33,14 +33,14 @@ def f(self):
return property(f)
-def _field_accessor(name, alias):
+def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field_arr(alias, self.values, base)
f.__name__ = name
+ f.__doc__ = docstring
return property(f)
-
class Period(PandasObject):
"""
Represents an period of time
@@ -572,8 +572,9 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
- _allow_period_index_ops = True
_attributes = ['name','freq']
+ _datetimelike_ops = ['year','month','day','hour','minute','second',
+ 'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear']
_is_numeric_dtype = False
__eq__ = _period_index_cmp('__eq__')
@@ -786,19 +787,19 @@ def asfreq(self, freq=None, how='E'):
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
- _year = _field_accessor('year', 0)
- _month = _field_accessor('month', 3)
- _day = _field_accessor('day', 4)
- _hour = _field_accessor('hour', 5)
- _minute = _field_accessor('minute', 6)
- _second = _field_accessor('second', 7)
- _weekofyear = _field_accessor('week', 8)
- _week = _weekofyear
- _dayofweek = _field_accessor('dayofweek', 10)
- _weekday = _dayofweek
- _dayofyear = day_of_year = _field_accessor('dayofyear', 9)
- _quarter = _field_accessor('quarter', 2)
- _qyear = _field_accessor('qyear', 1)
+ year = _field_accessor('year', 0, "The year of the period")
+ month = _field_accessor('month', 3, "The month as January=1, December=12")
+ day = _field_accessor('day', 4, "The days of the period")
+ hour = _field_accessor('hour', 5, "The hour of the period")
+ minute = _field_accessor('minute', 6, "The minute of the period")
+ second = _field_accessor('second', 7, "The second of the period")
+ weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
+ week = weekofyear
+ dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
+ weekday = dayofweek
+ dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
+ quarter = _field_accessor('quarter', 2, "The quarter of the date")
+ qyear = _field_accessor('qyear', 1)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
| closes #7207
```
In [1]: s = Series(date_range('20130101',periods=3))
In [2]: s.dt.
s.dt.date s.dt.dayofweek s.dt.hour s.dt.is_month_start s.dt.is_quarter_start s.dt.is_year_start s.dt.minute s.dt.nanosecond s.dt.second s.dt.week s.dt.year
s.dt.day s.dt.dayofyear s.dt.is_month_end s.dt.is_quarter_end s.dt.is_year_end s.dt.microsecond s.dt.month s.dt.quarter s.dt.time s.dt.weekofyear
In [3]: s.dt.year
Out[3]: array([2013, 2013, 2013])
In [4]: s.dt.hour
Out[4]: array([0, 0, 0])
In [5]: Series(np.arange(5)).dt
TypeError: Can only use .dt accessor with datetimelike values
```
Tab completion is specific to the type of wrapped delegate (DatetimeIndex or PeriodIndex)
```
In [5]: p = Series(period_range('20130101',periods=3,freq='D').asobject)
In [6]: p.dt.
p.dt.day p.dt.dayofweek p.dt.dayofyear p.dt.hour p.dt.minute p.dt.month p.dt.quarter p.dt.qyear p.dt.second p.dt.week p.dt.weekofyear p.dt.year
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/7953 | 2014-08-07T16:59:07Z | 2014-08-10T22:25:36Z | 2014-08-10T22:25:35Z | 2014-08-10T22:25:41Z |
ENH: add schema support to sql functions | diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt
index fec0a96a3d077..baba82f588ed6 100644
--- a/ci/requirements-2.6.txt
+++ b/ci/requirements-2.6.txt
@@ -5,7 +5,7 @@ pytz==2013b
http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2.0.tar.gz
html5lib==1.0b2
numexpr==1.4.2
-sqlalchemy==0.7.1
+sqlalchemy==0.7.4
pymysql==0.6.0
psycopg2==2.5
scipy==0.11.0
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ad0a5bb3b67c9..d60fc234650e0 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3320,6 +3320,20 @@ to pass to :func:`pandas.to_datetime`:
You can check if a table exists using :func:`~pandas.io.sql.has_table`
+Schema support
+~~~~~~~~~~~~~~
+
+.. versionadded:: 0.15.0
+
+Reading from and writing to different schema's is supported through the ``schema``
+keyword in the :func:`~pandas.read_sql_table` and :func:`~pandas.DataFrame.to_sql`
+functions. Note however that this depends on the database flavor (sqlite does not
+have schema's). For example:
+
+.. code-block:: python
+
+ df.to_sql('table', engine, schema='other_schema')
+ pd.read_sql_table('table', engine, schema='other_schema')
Querying
~~~~~~~~
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 5152e6d8eaf63..d6fdbf59c51f9 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -429,6 +429,13 @@ Enhancements
- Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`)
- Added support for writing ``datetime.date`` and ``datetime.time`` object columns with ``to_sql`` (:issue:`6932`).
+- Added support for specifying a ``schema`` to read from/write to with ``read_sql_table`` and ``to_sql`` (:issue:`7441`, :issue:`7952`).
+ For example:
+
+.. code-block:: python
+
+ df.to_sql('table', engine, schema='other_schema')
+ pd.read_sql_table('table', engine, schema='other_schema')
- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d56095b6300a4..42814c7eca4a4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -915,8 +915,8 @@ def to_msgpack(self, path_or_buf=None, **kwargs):
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, **kwargs)
- def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True,
- index_label=None, chunksize=None):
+ def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail',
+ index=True, index_label=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -932,6 +932,9 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True,
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
+ schema : string, default None
+ Specify the schema (if database flavor supports this). If None, use
+ default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
@@ -949,8 +952,8 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True,
"""
from pandas.io import sql
sql.to_sql(
- self, name, con, flavor=flavor, if_exists=if_exists, index=index,
- index_label=index_label, chunksize=chunksize)
+ self, name, con, flavor=flavor, schema=schema, if_exists=if_exists,
+ index=index, index_label=index_label, chunksize=chunksize)
def to_pickle(self, path):
"""
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 434fc409f671b..b72c41e45c9ca 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -38,7 +38,7 @@ def _is_sqlalchemy_engine(con):
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
-
+
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
@@ -47,7 +47,7 @@ def _is_sqlalchemy_engine(con):
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
-
+
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
@@ -145,7 +145,7 @@ def _safe_fetch(cur):
if not isinstance(result, list):
result = list(result)
return result
- except Exception as e: # pragma: no cover
+ except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
@@ -187,7 +187,7 @@ def tquery(sql, con=None, cur=None, retry=True):
con.commit()
except Exception as e:
excName = e.__class__.__name__
- if excName == 'OperationalError': # pragma: no cover
+ if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
@@ -199,7 +199,7 @@ def tquery(sql, con=None, cur=None, retry=True):
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
- elif result is None: # pragma: no cover
+ elif result is None: # pragma: no cover
result = []
return result
@@ -253,8 +253,8 @@ def uquery(sql, con=None, cur=None, retry=True, params=None):
#------------------------------------------------------------------------------
#--- Read and write to DataFrames
-def read_sql_table(table_name, con, index_col=None, coerce_float=True,
- parse_dates=None, columns=None):
+def read_sql_table(table_name, con, schema=None, index_col=None,
+ coerce_float=True, parse_dates=None, columns=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
@@ -266,6 +266,9 @@ def read_sql_table(table_name, con, index_col=None, coerce_float=True,
Name of SQL table in database
con : SQLAlchemy engine
Sqlite DBAPI connection mode not supported
+ schema : string, default None
+ Name of SQL schema in database to query (if database flavor supports this).
+ If None, use default schema (default).
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
@@ -298,7 +301,7 @@ def read_sql_table(table_name, con, index_col=None, coerce_float=True,
"SQLAlchemy engines.")
import sqlalchemy
from sqlalchemy.schema import MetaData
- meta = MetaData(con)
+ meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name])
except sqlalchemy.exc.InvalidRequestError:
@@ -437,8 +440,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
coerce_float=coerce_float, parse_dates=parse_dates)
-def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
- index_label=None, chunksize=None):
+def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
+ index=True, index_label=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -455,6 +458,9 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor supports
+ this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
@@ -473,7 +479,7 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
- pandas_sql = pandasSQL_builder(con, flavor=flavor)
+ pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
@@ -481,10 +487,11 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
- index_label=index_label, chunksize=chunksize)
+ index_label=index_label, schema=schema,
+ chunksize=chunksize)
-def has_table(table_name, con, flavor='sqlite'):
+def has_table(table_name, con, flavor='sqlite', schema=None):
"""
Check if DataBase has named table.
@@ -500,12 +507,15 @@ def has_table(table_name, con, flavor='sqlite'):
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor supports
+ this). If None, use default schema (default).
Returns
-------
boolean
"""
- pandas_sql = pandasSQL_builder(con, flavor=flavor)
+ pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
@@ -515,7 +525,7 @@ def has_table(table_name, con, flavor='sqlite'):
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
-def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):
+def pandasSQL_builder(con, flavor=None, schema=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
@@ -523,7 +533,7 @@ def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
if _is_sqlalchemy_engine(con):
- return PandasSQLAlchemy(con, meta=meta)
+ return PandasSQLAlchemy(con, schema=schema, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
@@ -540,24 +550,26 @@ class PandasSQLTable(PandasObject):
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
- if_exists='fail', prefix='pandas', index_label=None):
+ if_exists='fail', prefix='pandas', index_label=None,
+ schema=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
+ self.schema = schema
if frame is not None:
# We want to write a frame
- if self.pd_sql.has_table(self.name):
+ if self.pd_sql.has_table(self.name, self.schema):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
- self.pd_sql.drop_table(self.name)
+ self.pd_sql.drop_table(self.name, self.schema)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'append':
- self.table = self.pd_sql.get_table(self.name)
+ self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
self.table = self._create_table_statement()
else:
@@ -568,13 +580,13 @@ def __init__(self, name, pandas_sql_engine, frame=None, index=True,
self.create()
else:
# no data provided, read-only mode
- self.table = self.pd_sql.get_table(self.name)
+ self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
- return self.pd_sql.has_table(self.name)
+ return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
@@ -709,7 +721,7 @@ def _create_table_statement(self):
columns = [Column(name, typ)
for name, typ in column_names_and_types]
- return Table(self.name, self.pd_sql.meta, *columns)
+ return Table(self.name, self.pd_sql.meta, *columns, schema=self.schema)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
@@ -830,11 +842,11 @@ class PandasSQLAlchemy(PandasSQL):
using SQLAlchemy to handle DataBase abstraction
"""
- def __init__(self, engine, meta=None):
+ def __init__(self, engine, schema=None, meta=None):
self.engine = engine
if not meta:
from sqlalchemy.schema import MetaData
- meta = MetaData(self.engine)
+ meta = MetaData(self.engine, schema=schema)
self.meta = meta
@@ -843,9 +855,10 @@ def execute(self, *args, **kwargs):
return self.engine.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
- parse_dates=None, columns=None):
+ parse_dates=None, columns=None, schema=None):
- table = PandasSQLTable(table_name, self, index=index_col)
+ table = PandasSQLTable(
+ table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
@@ -868,26 +881,31 @@ def read_sql(self, sql, index_col=None, coerce_float=True,
return data_frame
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None, chunksize=None):
+ index_label=None, schema=None, chunksize=None):
table = PandasSQLTable(
name, self, frame=frame, index=index, if_exists=if_exists,
- index_label=index_label)
+ index_label=index_label, schema=schema)
table.insert(chunksize)
@property
def tables(self):
return self.meta.tables
- def has_table(self, name):
- return self.engine.has_table(name)
+ def has_table(self, name, schema=None):
+ return self.engine.has_table(name, schema or self.meta.schema)
- def get_table(self, table_name):
- return self.meta.tables.get(table_name)
+ def get_table(self, table_name, schema=None):
+ schema = schema or self.meta.schema
+ if schema:
+ return self.meta.tables.get('.'.join([schema, table_name]))
+ else:
+ return self.meta.tables.get(table_name)
- def drop_table(self, table_name):
- if self.engine.has_table(table_name):
- self.meta.reflect(only=[table_name])
- self.get_table(table_name).drop()
+ def drop_table(self, table_name, schema=None):
+ schema = schema or self.meta.schema
+ if self.engine.has_table(table_name, schema):
+ self.meta.reflect(only=[table_name], schema=schema)
+ self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name):
@@ -1113,7 +1131,7 @@ def _fetchall_as_list(self, cur):
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
- index_label=None, chunksize=None):
+ index_label=None, schema=None, chunksize=None):
"""
Write records stored in a DataFrame to a SQL database.
@@ -1133,7 +1151,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=index_label)
table.insert(chunksize)
- def has_table(self, name):
+ def has_table(self, name, schema=None):
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='%s';") % name,
@@ -1142,10 +1160,10 @@ def has_table(self, name):
return len(self.execute(query).fetchall()) > 0
- def get_table(self, table_name):
+ def get_table(self, table_name, schema=None):
return None # not supported in Legacy mode
- def drop_table(self, name):
+ def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % name
self.execute(drop_sql)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 0d55f4c1dbcd8..93c95169a60d1 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -28,7 +28,7 @@
from datetime import datetime, date, time
-from pandas import DataFrame, Series, Index, MultiIndex, isnull
+from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
@@ -457,12 +457,12 @@ def test_roundtrip(self):
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
- sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
+ sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, flavor='sqlite', chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
- tm.assert_frame_equal(result, self.test_frame1)
+ tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
@@ -591,13 +591,13 @@ def test_to_sql_index_label_multiindex(self):
index_label='C')
def test_multiindex_roundtrip(self):
- df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
+ df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
- result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
+ result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
- tm.assert_frame_equal(df, result, check_index_type=True)
+ tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
@@ -1196,8 +1196,8 @@ class TestPostgreSQLAlchemy(_TestSQLAlchemy):
flavor = 'postgresql'
def connect(self):
- return sqlalchemy.create_engine(
- 'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver))
+ url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
+ return sqlalchemy.create_engine(url.format(driver=self.driver))
def setup_driver(self):
try:
@@ -1213,6 +1213,61 @@ def tearDown(self):
for table in c.fetchall():
self.conn.execute("DROP TABLE %s" % table[0])
+ def test_schema_support(self):
+ # only test this for postgresql (schema's not supported in mysql/sqlite)
+ df = DataFrame({'col1':[1, 2], 'col2':[0.1, 0.2], 'col3':['a', 'n']})
+
+ # create a schema
+ self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
+ self.conn.execute("CREATE SCHEMA other;")
+
+ # write dataframe to different schema's
+ df.to_sql('test_schema_public', self.conn, index=False)
+ df.to_sql('test_schema_public_explicit', self.conn, index=False,
+ schema='public')
+ df.to_sql('test_schema_other', self.conn, index=False, schema='other')
+
+ # read dataframes back in
+ res1 = sql.read_sql_table('test_schema_public', self.conn)
+ tm.assert_frame_equal(df, res1)
+ res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
+ tm.assert_frame_equal(df, res2)
+ res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
+ schema='public')
+ tm.assert_frame_equal(df, res3)
+ res4 = sql.read_sql_table('test_schema_other', self.conn,
+ schema='other')
+ tm.assert_frame_equal(df, res4)
+ self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
+ self.conn, schema='public')
+
+ ## different if_exists options
+
+ # create a schema
+ self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
+ self.conn.execute("CREATE SCHEMA other;")
+
+ # write dataframe with different if_exists options
+ df.to_sql('test_schema_other', self.conn, schema='other', index=False)
+ df.to_sql('test_schema_other', self.conn, schema='other', index=False,
+ if_exists='replace')
+ df.to_sql('test_schema_other', self.conn, schema='other', index=False,
+ if_exists='append')
+ res = sql.read_sql_table('test_schema_other', self.conn, schema='other')
+ tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
+
+ ## specifying schema in user-provided meta
+
+ engine2 = self.connect()
+ meta = sqlalchemy.MetaData(engine2, schema='other')
+ pdsql = sql.PandasSQLAlchemy(engine2, meta=meta)
+ pdsql.to_sql(df, 'test_schema_other2', index=False)
+ pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='replace')
+ pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='append')
+ res1 = sql.read_sql_table('test_schema_other2', self.conn, schema='other')
+ res2 = pdsql.read_table('test_schema_other2')
+ tm.assert_frame_equal(res1, res2)
+
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
@@ -1295,7 +1350,7 @@ def test_datetime_date(self):
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
-
+
def test_datetime_time(self):
# test support for datetime.time
raise nose.SkipTest("datetime.time not supported for sqlite fallback")
| Related #7441. Work in progress for now
Add support to read/write data from/to a specific database schema through a `schema` keyword. If this is `None`, use the default schema as before.
- [x] support via a `schema` arg in the `read_sql_table` and `to_sql` functions
- [x] not for `read_sql_query` -> is this needed? (this is a direct `execute`, and I don't directly see how this can be aware of schema's) -> indeed not
- [x] tested for postgresql
- [ ] test for other databases (mysql and sqlite don't support schema's -> so can't test on travis)
- [x] docs
- [x] Cause of the failure at the moment: `schema` as arg for `MetaData` is only introduced in sqlalchemy 0.7.4. We test on 0.7.1 => raise required version
| https://api.github.com/repos/pandas-dev/pandas/pulls/7952 | 2014-08-07T14:12:28Z | 2014-08-31T22:29:28Z | 2014-08-31T22:29:28Z | 2016-10-14T04:55:27Z |
DOC/TST: index followup | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 9d443254ae25a..62518bf0d9ffd 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -235,8 +235,8 @@ Constructor
Series
-Attributes and underlying data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Attributes
+~~~~~~~~~~
**Axes**
* **index**: axis labels
@@ -246,6 +246,14 @@ Attributes and underlying data
Series.values
Series.dtype
Series.ftype
+ Series.shape
+ Series.size
+ Series.nbytes
+ Series.ndim
+ Series.strides
+ Series.itemsize
+ Series.base
+ Series.T
Conversion
~~~~~~~~~~
@@ -1087,11 +1095,36 @@ used before calling these methods directly.**
Index
+Attributes
+~~~~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ Index.values
+ Index.is_monotonic
+ Index.is_unique
+ Index.dtype
+ Index.inferred_type
+ Index.is_all_dates
+ Index.shape
+ Index.size
+ Index.nbytes
+ Index.ndim
+ Index.strides
+ Index.itemsize
+ Index.base
+ Index.T
+
Modifying and Computations
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
+ Index.all
+ Index.any
+ Index.argmin
+ Index.argmax
Index.copy
Index.delete
Index.diff
@@ -1101,6 +1134,8 @@ Modifying and Computations
Index.factorize
Index.identical
Index.insert
+ Index.min
+ Index.max
Index.order
Index.reindex
Index.repeat
@@ -1161,14 +1196,6 @@ Selecting
Index.slice_indexer
Index.slice_locs
-Properties
-~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- Index.is_monotonic
- Index.is_numeric
-
.. _api.datetimeindex:
DatetimeIndex
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 32af1924aee70..40977aee44cdd 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2101,7 +2101,8 @@ any pickled pandas object (or any other pickled object) from file:
.. warning::
- In 0.13, pickle preserves compatibility with pickles created prior to 0.13. These must
+ Several internal refactorings, 0.13 (:ref:`Series Refactoring <whatsnew_0130.refactoring>`), and 0.15 (:ref:`Index Refactoring <whatsnew_0150.refactoring>`),
+ preserve compatibility with pickles created prior to these versions. However, these must
be read with ``pd.read_pickle``, rather than the default python ``pickle.load``.
See `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
for a detailed explanation.
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index bb039b4484c7d..2520015581cc8 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -169,10 +169,11 @@ Internal Refactoring
In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray``
but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This change allows very easy sub-classing and creation of new index types. This should be
-a transparent change with only very limited API implications (:issue:`5080`,:issue:`7439`,:issue:`7796`)
+a transparent change with only very limited API implications (:issue:`5080`, :issue:`7439`, :issue:`7796`)
- you may need to unpickle pandas version < 0.15.0 pickles using ``pd.read_pickle`` rather than ``pickle.load``. See :ref:`pickle docs <io.pickle>`
-- when plotting with a ``PeriodIndex``. The ``matplotlib`` internal axes will now be arrays of ``Period`` rather than a ``PeriodIndex``. (this is similar to how a ``DatetimeIndex`` passess arrays of ``datetimes`` now)
+- when plotting with a ``PeriodIndex``. The ``matplotlib`` internal axes will now be arrays of ``Period`` rather than a ``PeriodIndex``. (this is similar to how a ``DatetimeIndex`` passes arrays of ``datetimes`` now)
+- MultiIndexes will now raise similary to other pandas objects w.r.t. truth testing, See :ref:`here <gotchas.truth>` (:issue:`7897`).
.. _whatsnew_0150.cat:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index f685edd477b8c..c04872ab74bb0 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -313,10 +313,34 @@ def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
+ def argmax(self, axis=None):
+ """
+ return a ndarray of the maximum argument indexer
+
+ See also
+ --------
+ numpy.ndarray.argmax
+ """
+ return nanops.nanargmax(self.values)
+
def min(self):
""" The minimum value of the object """
return nanops.nanmin(self.values)
+ def argmin(self, axis=None):
+ """
+ return a ndarray of the minimum argument indexer
+
+ See also
+ --------
+ numpy.ndarray.argmin
+ """
+ return nanops.nanargmin(self.values)
+
+ def hasnans(self):
+ """ return if I have any nans; enables various perf speedups """
+ return com.isnull(self).any()
+
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
@@ -554,10 +578,11 @@ def argmin(self, axis=None):
numpy.ndarray.argmin
"""
- ##### FIXME: need some tests (what do do if all NaT?)
i8 = self.asi8
if self.hasnans:
mask = i8 == tslib.iNaT
+ if mask.all():
+ return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
@@ -596,10 +621,11 @@ def argmax(self, axis=None):
numpy.ndarray.argmax
"""
- #### FIXME: need some tests (what do do if all NaT?)
i8 = self.asi8
if self.hasnans:
mask = i8 == tslib.iNaT
+ if mask.all():
+ return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
diff --git a/pandas/core/index.py b/pandas/core/index.py
index c7b1c60a9ddc4..4f4fe092a3606 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -560,6 +560,7 @@ def _mpl_repr(self):
@property
def is_monotonic(self):
+ """ return if the index has monotonic (only equaly or increasing) values """
return self._engine.is_monotonic
def is_lexsorted_for_tuple(self, tup):
@@ -567,6 +568,7 @@ def is_lexsorted_for_tuple(self, tup):
@cache_readonly(allow_setting=True)
def is_unique(self):
+ """ return if the index has unique values """
return self._engine.is_unique
def is_integer(self):
@@ -788,6 +790,7 @@ def _get_level_number(self, level):
@cache_readonly
def inferred_type(self):
+ """ return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, typ):
@@ -835,6 +838,13 @@ def __setstate__(self, state):
def __deepcopy__(self, memo={}):
return self.copy(deep=True)
+ def __nonzero__(self):
+ raise ValueError("The truth value of a {0} is ambiguous. "
+ "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
+ .format(self.__class__.__name__))
+
+ __bool__ = __nonzero__
+
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
@@ -2143,6 +2153,11 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, *
def inferred_type(self):
return 'integer'
+ @cache_readonly
+ def hasnans(self):
+ # by definition
+ return False
+
@property
def asi8(self):
# do not cache or you'll create a memory leak
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 90a36228e816a..356984ea88f43 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -261,6 +261,27 @@ def test_nanops(self):
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
+ # argmin/max
+ obj = Index(np.arange(5,dtype='int64'))
+ self.assertEqual(obj.argmin(),0)
+ self.assertEqual(obj.argmax(),4)
+
+ obj = Index([np.nan, 1, np.nan, 2])
+ self.assertEqual(obj.argmin(),1)
+ self.assertEqual(obj.argmax(),3)
+
+ obj = Index([np.nan])
+ self.assertEqual(obj.argmin(),-1)
+ self.assertEqual(obj.argmax(),-1)
+
+ obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011,11,2),pd.NaT])
+ self.assertEqual(obj.argmin(),1)
+ self.assertEqual(obj.argmax(),2)
+
+ obj = Index([pd.NaT])
+ self.assertEqual(obj.argmin(),-1)
+ self.assertEqual(obj.argmax(),-1)
+
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 5affdbe1c99aa..4162413554d49 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -74,6 +74,15 @@ def test_numeric_compat(self):
"cannot perform floor division",
lambda : 1 // idx)
+ def test_boolean_context_compat(self):
+
+ # boolean context compat
+ idx = self.create_index()
+ def f():
+ if idx:
+ pass
+ tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
+
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
@@ -1656,6 +1665,19 @@ def setUp(self):
def create_index(self):
return self.index
+ def test_boolean_context_compat2(self):
+
+ # boolean context compat
+ # GH7897
+ i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
+ i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
+ common = i1.intersection(i2)
+
+ def f():
+ if common:
+ pass
+ tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
+
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 3ada26a7e5779..ce5a2a319a336 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1490,13 +1490,6 @@ def searchsorted(self, key, side='left'):
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
- def argmin(self):
- # hack to workaround argmin failure
- try:
- return self.values.argmin()
- except Exception: # pragma: no cover
- return self.asi8.argmin()
-
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
| TST: argmin/max impl, tests, and doc-strings
DOC: release notes corrections
API: raise on `__nonzero__` for Indexes, closes #7897
partial close of #7904
| https://api.github.com/repos/pandas-dev/pandas/pulls/7951 | 2014-08-07T13:15:24Z | 2014-08-07T14:27:08Z | 2014-08-07T14:27:08Z | 2014-08-07T14:27:08Z |
COMPAT: raise SettingWithCopy in even more situations when a view is at hand | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 8ec61496c538a..25233d970b3a6 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1481,7 +1481,8 @@ which can take the values ``['raise','warn',None]``, where showing a warning is
'three', 'two', 'one', 'six'],
'c' : np.arange(7)})
- # passed via reference (will stay)
+ # This will show the SettingWithCopyWarning
+ # but the frame values will be set
dfb['c'][dfb.a.str.startswith('o')] = 42
This however is operating on a copy and will not work.
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 148cf85d0b5ab..6a9daa162cbf9 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -114,7 +114,7 @@ API changes
df
df.dtypes
-- ``SettingWithCopy`` raise/warnings (according to the option ``mode.chained_assignment``) will now be issued when setting a value on a sliced mixed-dtype DataFrame using chained-assignment. (:issue:`7845`)
+- ``SettingWithCopy`` raise/warnings (according to the option ``mode.chained_assignment``) will now be issued when setting a value on a sliced mixed-dtype DataFrame using chained-assignment. (:issue:`7845`, :issue:`7950`)
.. code-block:: python
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7b8b609fe0f2a..83110d143e8bc 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1088,8 +1088,14 @@ def _maybe_cache_changed(self, item, value):
@property
def _is_cached(self):
""" boolean : return if I am cached """
+ return getattr(self, '_cacher', None) is not None
+
+ def _get_cacher(self):
+ """ return my cacher or None """
cacher = getattr(self, '_cacher', None)
- return cacher is not None
+ if cacher is not None:
+ cacher = cacher[1]()
+ return cacher
@property
def _is_view(self):
@@ -1154,8 +1160,35 @@ def _set_is_copy(self, ref=None, copy=True):
else:
self.is_copy = None
- def _check_setitem_copy(self, stacklevel=4, t='setting'):
+ def _check_is_chained_assignment_possible(self):
+ """
+ check if we are a view, have a cacher, and are of mixed type
+ if so, then force a setitem_copy check
+
+ should be called just near setting a value
+
+ will return a boolean if it we are a view and are cached, but a single-dtype
+ meaning that the cacher should be updated following setting
"""
+ if self._is_view and self._is_cached:
+ ref = self._get_cacher()
+ if ref is not None and ref._is_mixed_type:
+ self._check_setitem_copy(stacklevel=4, t='referant', force=True)
+ return True
+ elif self.is_copy:
+ self._check_setitem_copy(stacklevel=4, t='referant')
+ return False
+
+ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
+ """
+
+ Parameters
+ ----------
+ stacklevel : integer, default 4
+ the level to show of the stack when the error is output
+ t : string, the type of setting error
+ force : boolean, default False
+ if True, then force showing an error
validate if we are doing a settitem on a chained copy.
@@ -1177,7 +1210,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting'):
"""
- if self.is_copy:
+ if force or self.is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 91008f9b22aed..6ee03eab4bab8 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -472,6 +472,9 @@ def can_do_equal_len():
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
+ # check for chained assignment
+ self.obj._check_is_chained_assignment_possible()
+
# actually do the set
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3901e19968841..5a490992c478c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -587,61 +587,68 @@ def _get_values(self, indexer):
return self.values[indexer]
def __setitem__(self, key, value):
- try:
- self._set_with_engine(key, value)
- return
- except (SettingWithCopyError):
- raise
- except (KeyError, ValueError):
- values = self.values
- if (com.is_integer(key)
- and not self.index.inferred_type == 'integer'):
- values[key] = value
+ def setitem(key, value):
+ try:
+ self._set_with_engine(key, value)
return
- elif key is Ellipsis:
- self[:] = value
+ except (SettingWithCopyError):
+ raise
+ except (KeyError, ValueError):
+ values = self.values
+ if (com.is_integer(key)
+ and not self.index.inferred_type == 'integer'):
+
+ values[key] = value
+ return
+ elif key is Ellipsis:
+ self[:] = value
+ return
+ elif _is_bool_indexer(key):
+ pass
+ elif com.is_timedelta64_dtype(self.dtype):
+ # reassign a null value to iNaT
+ if isnull(value):
+ value = tslib.iNaT
+
+ try:
+ self.index._engine.set_value(self.values, key, value)
+ return
+ except (TypeError):
+ pass
+
+ self.loc[key] = value
return
- elif _is_bool_indexer(key):
- pass
- elif com.is_timedelta64_dtype(self.dtype):
- # reassign a null value to iNaT
- if isnull(value):
- value = tslib.iNaT
-
- try:
- self.index._engine.set_value(self.values, key, value)
- return
- except (TypeError):
- pass
-
- self.loc[key] = value
- return
- except TypeError as e:
- if isinstance(key, tuple) and not isinstance(self.index,
- MultiIndex):
- raise ValueError("Can only tuple-index with a MultiIndex")
+ except TypeError as e:
+ if isinstance(key, tuple) and not isinstance(self.index,
+ MultiIndex):
+ raise ValueError("Can only tuple-index with a MultiIndex")
- # python 3 type errors should be raised
- if 'unorderable' in str(e): # pragma: no cover
- raise IndexError(key)
+ # python 3 type errors should be raised
+ if 'unorderable' in str(e): # pragma: no cover
+ raise IndexError(key)
- if _is_bool_indexer(key):
- key = _check_bool_indexer(self.index, key)
- try:
- self.where(~key, value, inplace=True)
- return
- except (InvalidIndexError):
- pass
+ if _is_bool_indexer(key):
+ key = _check_bool_indexer(self.index, key)
+ try:
+ self.where(~key, value, inplace=True)
+ return
+ except (InvalidIndexError):
+ pass
+
+ self._set_with(key, value)
- self._set_with(key, value)
+ # do the setitem
+ cacher_needs_updating = self._check_is_chained_assignment_possible()
+ setitem(key, value)
+ if cacher_needs_updating:
+ self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self.values
try:
self.index._engine.set_value(values, key, value)
- self._check_setitem_copy()
return
except KeyError:
values[self.index.get_loc(key)] = value
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index f6f705201bf18..62d729ccdaa88 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -305,13 +305,13 @@ def test_frame_from_json_nones(self):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
- df[2][0] = np.inf
+ df.loc[0,2] = np.inf
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(), dtype=False)
self.assertTrue(np.isnan(unser[2][0]))
- df[2][0] = np.NINF
+ df.loc[0,2] = np.NINF
unser = read_json(df.to_json())
self.assertTrue(np.isnan(unser[2][0]))
unser = read_json(df.to_json(),dtype=False)
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 4f76f72b8eb66..89809b47d76eb 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1278,8 +1278,8 @@ def test_append_with_data_columns(self):
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
- df_new['string'][1:4] = np.nan
- df_new['string'][5:6] = 'bar'
+ df_new.loc[1:4,'string'] = np.nan
+ df_new.loc[5:6,'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
@@ -1317,14 +1317,14 @@ def check_col(key,name,size):
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
- df_new.loc[:,'A'].iloc[0] = 1.
- df_new.loc[:,'B'].iloc[0] = -1.
+ df_new.ix[0,'A'] = 1.
+ df_new.ix[0,'B'] = -1.
df_new['string'] = 'foo'
- df_new['string'][1:4] = np.nan
- df_new['string'][5:6] = 'bar'
+ df_new.loc[1:4,'string'] = np.nan
+ df_new.loc[5:6,'string'] = 'bar'
df_new['string2'] = 'foo'
- df_new['string2'][2:5] = np.nan
- df_new['string2'][7:8] = 'bar'
+ df_new.loc[2:5,'string2'] = np.nan
+ df_new.loc[7:8,'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 5d785df355aa3..27f5ab3c63d81 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1348,8 +1348,8 @@ def test_to_string(self):
'B': tm.makeStringIndex(200)},
index=lrange(200))
- biggie['A'][:20] = nan
- biggie['B'][:20] = nan
+ biggie.loc[:20,'A'] = nan
+ biggie.loc[:20,'B'] = nan
s = biggie.to_string()
buf = StringIO()
@@ -1597,8 +1597,8 @@ def test_to_html(self):
'B': tm.makeStringIndex(200)},
index=lrange(200))
- biggie['A'][:20] = nan
- biggie['B'][:20] = nan
+ biggie.loc[:20,'A'] = nan
+ biggie.loc[:20,'B'] = nan
s = biggie.to_html()
buf = StringIO()
@@ -1624,8 +1624,8 @@ def test_to_html_filename(self):
'B': tm.makeStringIndex(200)},
index=lrange(200))
- biggie['A'][:20] = nan
- biggie['B'][:20] = nan
+ biggie.loc[:20,'A'] = nan
+ biggie.loc[:20,'B'] = nan
with tm.ensure_clean('test.html') as path:
biggie.to_html(path)
with open(path, 'r') as f:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6a31f573951cd..7912debd0d409 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -32,7 +32,8 @@
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, notnull, isnull,
- MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv)
+ MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv,
+ option_context)
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
@@ -4437,8 +4438,8 @@ def test_repr_mixed_big(self):
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
- biggie['A'][:20] = nan
- biggie['B'][:20] = nan
+ biggie.loc[:20,'A'] = nan
+ biggie.loc[:20,'B'] = nan
foo = repr(biggie)
@@ -4469,13 +4470,13 @@ def test_repr(self):
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
- with pd.option_context('display.show_dimensions', True):
+ with option_context('display.show_dimensions', True):
self.assertTrue("2 rows x 2 columns" in repr(df))
- with pd.option_context('display.show_dimensions', False):
+ with option_context('display.show_dimensions', False):
self.assertFalse("2 rows x 2 columns" in repr(df))
- with pd.option_context('display.show_dimensions', 'truncate'):
+ with option_context('display.show_dimensions', 'truncate'):
self.assertFalse("2 rows x 2 columns" in repr(df))
@slow
@@ -6475,7 +6476,7 @@ def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(4, None), (4, False), (9, True)]:
# For verbose always ^ setting ^ summarize ^ full output
- with pd.option_context('max_info_columns', 4):
+ with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
@@ -6484,7 +6485,7 @@ def test_info_max_cols(self):
for len_, verbose in [(9, None), (4, False), (9, True)]:
# max_cols no exceeded
- with pd.option_context('max_info_columns', 5):
+ with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
@@ -6492,14 +6493,14 @@ def test_info_max_cols(self):
for len_, max_cols in [(9, 5), (4, 4)]:
# setting truncates
- with pd.option_context('max_info_columns', 4):
+ with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.split('\n')), len_)
# setting wouldn't truncate
- with pd.option_context('max_info_columns', 5):
+ with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
@@ -7411,19 +7412,19 @@ def test_drop(self):
assert_frame_equal(df,expected)
def test_fillna(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
+ self.tsframe.ix[:5,'A'] = nan
+ self.tsframe.ix[-5:,'A'] = nan
zero_filled = self.tsframe.fillna(0)
- self.assertTrue((zero_filled['A'][:5] == 0).all())
+ self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
- self.assertTrue(np.isnan(padded['A'][:5]).all())
- self.assertTrue((padded['A'][-5:] == padded['A'][-5]).all())
+ self.assertTrue(np.isnan(padded.ix[:5,'A']).all())
+ self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())
# mixed type
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
+ self.mixed_frame.ix[5:20,'foo'] = nan
+ self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
@@ -7432,7 +7433,7 @@ def test_fillna(self):
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A','B','D'])
- mf['A'][-10:] = nan
+ mf.ix[-10:,'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype = dict(C = None))
@@ -7604,8 +7605,8 @@ def test_replace_inplace(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
+ self.mixed_frame.ix[5:20,'foo'] = nan
+ self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
@@ -8193,8 +8194,8 @@ def test_replace_convert(self):
assert_series_equal(expec, res)
def test_replace_mixed(self):
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
+ self.mixed_frame.ix[5:20,'foo'] = nan
+ self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
@@ -9872,7 +9873,7 @@ def test_apply_modify_traceback(self):
'E': np.random.randn(11),
'F': np.random.randn(11)})
- data['C'][4] = np.nan
+ data.loc[4,'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
@@ -11716,11 +11717,11 @@ def test_rename_objects(self):
self.assertNotIn('foo', renamed)
def test_fill_corner(self):
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
+ self.mixed_frame.ix[5:20,'foo'] = nan
+ self.mixed_frame.ix[-10:,'A'] = nan
filled = self.mixed_frame.fillna(value=0)
- self.assertTrue((filled['foo'][5:20] == 0).all())
+ self.assertTrue((filled.ix[5:20,'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
@@ -12551,15 +12552,18 @@ def test_idxmax(self):
self.assertRaises(ValueError, frame.idxmax, axis=2)
def test_stale_cached_series_bug_473(self):
- Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
- columns=('e', 'f', 'g', 'h'))
- repr(Y)
- Y['e'] = Y['e'].astype('object')
- Y['g']['c'] = np.NaN
- repr(Y)
- result = Y.sum()
- exp = Y['g'].sum()
- self.assertTrue(isnull(Y['g']['c']))
+
+ # this is chained, but ok
+ with option_context('chained_assignment',None):
+ Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
+ columns=('e', 'f', 'g', 'h'))
+ repr(Y)
+ Y['e'] = Y['e'].astype('object')
+ Y['g']['c'] = np.NaN
+ repr(Y)
+ result = Y.sum()
+ exp = Y['g'].sum()
+ self.assertTrue(isnull(Y['g']['c']))
def test_index_namedtuple(self):
from collections import namedtuple
@@ -12712,6 +12716,7 @@ def __nonzero__(self):
self.assertTrue(r1.all())
def test_strange_column_corruption_issue(self):
+
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index c607ccc3572b2..8d80962eb9902 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -734,8 +734,8 @@ def test_interp_basic(self):
result = df.set_index('C').interpolate()
expected = df.set_index('C')
- expected.A.loc[3] = 3
- expected.B.loc[5] = 9
+ expected.loc[3,'A'] = 3
+ expected.loc[5,'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
@@ -810,8 +810,8 @@ def test_interp_alt_scipy(self):
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
- expected['A'].iloc[2] = 3
- expected['A'].iloc[5] = 6
+ expected.ix[2,'A'] = 3
+ expected.ix[5,'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
@@ -819,15 +819,13 @@ def test_interp_alt_scipy(self):
result = df.interpolate(method='krogh')
expectedk = df.copy()
- # expectedk['A'].iloc[2] = 3
- # expectedk['A'].iloc[5] = 6
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
result = df.interpolate(method='pchip')
- expected['A'].iloc[2] = 3
- expected['A'].iloc[5] = 6.125
+ expected.ix[2,'A'] = 3
+ expected.ix[5,'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
@@ -838,9 +836,9 @@ def test_interp_rowwise(self):
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
- expected[1].loc[3] = 5
- expected[2].loc[0] = 3
- expected[3].loc[1] = 3
+ expected.loc[3,1] = 5
+ expected.loc[0,2] = 3
+ expected.loc[1,3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 3def1b74af0c7..6f39750de9d9b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1664,7 +1664,7 @@ def test_cythonized_aggers(self):
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
- df['C'][2:10:2] = nan
+ df.loc[2:10:2,'C'] = nan
def _testit(op):
# single column
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index b8f51d0ca9950..3552c75900745 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -10,6 +10,7 @@
import pandas as pd
import pandas.core.common as com
+from pandas import option_context
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Float64Index, Timestamp)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
@@ -2320,10 +2321,12 @@ def test_ix_assign_column_mixed(self):
assert_frame_equal(df,expected)
# ok, but chained assignments are dangerous
- df = pd.DataFrame({'a': lrange(4) })
- df['b'] = np.nan
- df['b'].ix[[1,3]] = [100,-100]
- assert_frame_equal(df,expected)
+ # if we turn off chained assignement it will work
+ with option_context('chained_assignment',None):
+ df = pd.DataFrame({'a': lrange(4) })
+ df['b'] = np.nan
+ df['b'].ix[[1,3]] = [100,-100]
+ assert_frame_equal(df,expected)
def test_ix_get_set_consistency(self):
@@ -3036,22 +3039,26 @@ def test_cache_updating(self):
self.assertEqual(result, 2)
def test_slice_consolidate_invalidate_item_cache(self):
- # #3970
- df = DataFrame({ "aa":lrange(5), "bb":[2.2]*5})
- # Creates a second float block
- df["cc"] = 0.0
+ # this is chained assignment, but will 'work'
+ with option_context('chained_assignment',None):
+
+ # #3970
+ df = DataFrame({ "aa":lrange(5), "bb":[2.2]*5})
+
+ # Creates a second float block
+ df["cc"] = 0.0
- # caches a reference to the 'bb' series
- df["bb"]
+ # caches a reference to the 'bb' series
+ df["bb"]
- # repr machinery triggers consolidation
- repr(df)
+ # repr machinery triggers consolidation
+ repr(df)
- # Assignment to wrong series
- df['bb'].iloc[0] = 0.17
- df._clear_item_cache()
- self.assertAlmostEqual(df['bb'][0], 0.17)
+ # Assignment to wrong series
+ df['bb'].iloc[0] = 0.17
+ df._clear_item_cache()
+ self.assertAlmostEqual(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
@@ -3072,6 +3079,7 @@ def test_setitem_cache_updating(self):
# GH 7084
# not updating cache on series setting with slices
+ expected = DataFrame({'A': [600, 600, 600]}, index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
@@ -3079,9 +3087,18 @@ def test_setitem_cache_updating(self):
six = Timestamp('5/7/2014')
eix = Timestamp('5/9/2014')
for ix, row in df.iterrows():
- out[row['C']][six:eix] = out[row['C']][six:eix] + row['D']
+ out.loc[six:eix,row['C']] = out.loc[six:eix,row['C']] + row['D']
+
+ assert_frame_equal(out, expected)
+ assert_series_equal(out['A'], expected['A'])
+
+ # try via a chain indexing
+ # this actually works
+ out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014'))
+ for ix, row in df.iterrows():
+ v = out[row['C']][six:eix] + row['D']
+ out[row['C']][six:eix] = v
- expected = DataFrame({'A': [600, 600, 600]}, index=date_range('5/7/2014', '5/9/2014'))
assert_frame_equal(out, expected)
assert_series_equal(out['A'], expected['A'])
@@ -3135,17 +3152,19 @@ def test_detect_chained_assignment(self):
expected = DataFrame([[-5,1],[-6,3]],columns=list('AB'))
df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),dtype='int64')
self.assertIsNone(df.is_copy)
-
df['A'][0] = -5
df['A'][1] = -6
assert_frame_equal(df, expected)
- expected = DataFrame([[-5,2],[np.nan,3.]],columns=list('AB'))
+ # test with the chaining
df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
self.assertIsNone(df.is_copy)
- df['A'][0] = -5
- df['A'][1] = np.nan
- assert_frame_equal(df, expected)
+ def f():
+ df['A'][0] = -5
+ self.assertRaises(com.SettingWithCopyError, f)
+ def f():
+ df['A'][1] = np.nan
+ self.assertRaises(com.SettingWithCopyError, f)
self.assertIsNone(df['A'].is_copy)
# using a copy (the chain), fails
@@ -3167,22 +3186,18 @@ def f():
indexer = df.a.str.startswith('o')
df[indexer]['c'] = 42
self.assertRaises(com.SettingWithCopyError, f)
- df['c'][df.a.str.startswith('o')] = 42
- assert_frame_equal(df,expected)
expected = DataFrame({'A':[111,'bbb','ccc'],'B':[1,2,3]})
df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
- df['A'][0] = 111
+ def f():
+ df['A'][0] = 111
+ self.assertRaises(com.SettingWithCopyError, f)
def f():
df.loc[0]['A'] = 111
self.assertRaises(com.SettingWithCopyError, f)
- assert_frame_equal(df,expected)
- # warnings
- pd.set_option('chained_assignment','warn')
- df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
- with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning):
- df.loc[0]['A'] = 111
+ df.loc[0,'A'] = 111
+ assert_frame_equal(df,expected)
# make sure that is_copy is picked up reconstruction
# GH5475
@@ -3196,7 +3211,6 @@ def f():
# a suprious raise as we are setting the entire column here
# GH5597
- pd.set_option('chained_assignment','raise')
from string import ascii_letters as letters
def random_text(nobs=100):
@@ -3295,6 +3309,28 @@ def f():
df.iloc[0:5]['group'] = 'a'
self.assertRaises(com.SettingWithCopyError, f)
+ # mixed type setting
+ # same dtype & changing dtype
+ df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde')))
+
+ def f():
+ df.ix[2]['D'] = 'foo'
+ self.assertRaises(com.SettingWithCopyError, f)
+ def f():
+ df.ix[2]['C'] = 'foo'
+ self.assertRaises(com.SettingWithCopyError, f)
+ def f():
+ df['C'][2] = 'foo'
+ self.assertRaises(com.SettingWithCopyError, f)
+
+ def test_detect_chained_assignment_warnings(self):
+
+ # warnings
+ with option_context('chained_assignment','warn'):
+ df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
+ with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning):
+ df.loc[0]['A'] = 111
+
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0, 1: 78.0, 2: 2716.0, 3: 0.0, 4: 369.0, 5: 0.0, 6: 269.0, 7: 0.0, 8: 0.0, 9: 0.0, 10: 3536.0, 11: 0.0, 12: 24.0, 13: 0.0, 14: 931.0, 15: 0.0, 16: 101.0, 17: 78.0, 18: 9643.0, 19: 0.0, 20: 0.0, 21: 0.0, 22: 63761.0, 23: 0.0, 24: 446.0, 25: 0.0, 26: 34773.0, 27: 0.0, 28: 729.0, 29: 78.0, 30: 0.0, 31: 0.0, 32: 3374.0, 33: 0.0, 34: 1391.0, 35: 0.0, 36: 361.0, 37: 0.0, 38: 61808.0, 39: 0.0, 40: 0.0, 41: 0.0, 42: 6677.0, 43: 0.0, 44: 802.0, 45: 0.0, 46: 2691.0, 47: 0.0, 48: 3582.0, 49: 0.0, 50: 734.0, 51: 0.0, 52: 627.0, 53: 70.0, 54: 2584.0, 55: 0.0, 56: 324.0, 57: 0.0, 58: 605.0, 59: 0.0, 60: 0.0, 61: 0.0, 62: 3989.0, 63: 10.0, 64: 42.0, 65: 0.0, 66: 904.0, 67: 0.0, 68: 88.0, 69: 70.0, 70: 8172.0, 71: 0.0, 72: 0.0, 73: 0.0, 74: 64902.0, 75: 0.0, 76: 347.0, 77: 0.0, 78: 36605.0, 79: 0.0, 80: 379.0, 81: 70.0, 82: 0.0, 83: 0.0, 84: 3001.0, 85: 0.0, 86: 1630.0, 87: 7.0, 88: 364.0, 89: 0.0, 90: 67404.0, 91: 9.0, 92: 0.0, 93: 0.0, 94: 7685.0, 95: 0.0, 96: 1017.0, 97: 0.0, 98: 2831.0, 99: 0.0, 100: 2963.0, 101: 0.0, 102: 854.0, 103: 0.0, 104: 0.0, 105: 0.0, 106: 0.0, 107: 0.0, 108: 0.0, 109: 0.0, 110: 0.0, 111: 0.0, 112: 0.0, 113: 0.0, 114: 0.0, 115: 0.0, 116: 0.0, 117: 0.0, 118: 0.0, 119: 0.0, 120: 0.0, 121: 0.0, 122: 0.0, 123: 0.0, 124: 0.0, 125: 0.0, 126: 67744.0, 127: 22.0, 128: 264.0, 129: 0.0, 260: 197.0, 268: 0.0, 265: 0.0, 269: 0.0, 261: 0.0, 266: 1198.0, 267: 0.0, 262: 2629.0, 258: 775.0, 257: 0.0, 263: 0.0, 259: 0.0, 264: 163.0, 250: 10326.0, 251: 0.0, 252: 1228.0, 253: 0.0, 254: 2769.0, 255: 0.0}
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index aa718a11d97cf..4ecb9a1430eba 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3668,6 +3668,8 @@ def test_underlying_data_conversion(self):
tm.assert_frame_equal(df,expected)
# GH 3970
+ # these are chained assignments as well
+ pd.set_option('chained_assignment',None)
df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
df["cc"] = 0.0
ck = [True]*len(df)
@@ -3675,6 +3677,7 @@ def test_underlying_data_conversion(self):
df_tmp = df.iloc[ck]
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
+ pd.set_option('chained_assignment','raise')
# GH 3217
df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index df2f270346e20..919f30ef2a72f 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -930,8 +930,8 @@ def test_left_join_index_preserve_order(self):
expected = left.copy()
expected['v2'] = np.nan
- expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
- expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
+ expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
+ expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
@@ -948,8 +948,8 @@ def test_left_join_index_preserve_order(self):
expected = left.copy()
expected['v2'] = np.nan
- expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
- expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
+ expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
+ expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
@@ -976,8 +976,8 @@ def _test(dtype1,dtype2):
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan,dtype=dtype2)
- expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
- expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
+ expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
+ expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
@@ -1683,7 +1683,7 @@ def test_handle_empty_objects(self):
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
- expected['foo'][:5] = 'bar'
+ expected.loc[0:4,'foo'] = 'bar'
tm.assert_frame_equal(concatted, expected)
| This will detect even more situations where chained assignment is being used.
FYI technically there are 2 situations where the same error is raised: 1) setting on a copy, 2) chained assignment on a view. Both are really similar and result in usually the same issue. An assignment that doesn't appear to assign anything.
This detected a number of tests that were incorrectly using chaining in the pandas test suite as well (fixed below).
| https://api.github.com/repos/pandas-dev/pandas/pulls/7950 | 2014-08-06T21:45:32Z | 2014-08-11T13:06:48Z | 2014-08-11T13:06:48Z | 2014-08-11T13:06:48Z |
BUG: Fix for to_excel +/- infinity | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 4caf22357b1d3..82eb798e04965 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -621,11 +621,11 @@ Bug Fixes
return a non scalar value that appeared valid but wasn't (:issue:`7870`).
- Bug in ``date_range()``/``DatetimeIndex()`` when the timezone was inferred from input dates yet incorrect
times were returned when crossing DST boundaries (:issue:`7835`, :issue:`7901`).
-
-
+- Bug in ``to_excel()`` where a negative sign was being prepended to positive infinity and was absent for negative infinity (:issue`7949`)
- Bug in area plot draws legend with incorrect ``alpha`` when ``stacked=True`` (:issue:`8027`)
-
- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`)
+ times were returned when crossing DST boundaries (:issue:`7835`, :issue:`7901`).
+- Bug in ``to_excel()`` where a negative sign was being prepended to positive infinity and was absent for negative infinity (:issue`7949`)
- ``Holiday`` bug in Holiday with no offset or observance (:issue:`7987`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 131a2dbbad348..339cd9344f089 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1512,9 +1512,9 @@ def _format_value(self, val):
val = self.na_rep
elif com.is_float(val):
if np.isposinf(val):
- val = '-%s' % self.inf_rep
- elif np.isneginf(val):
val = self.inf_rep
+ elif np.isneginf(val):
+ val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
return val
diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx
index bff070421c841..4977a80acc936 100644
--- a/pandas/src/testing.pyx
+++ b/pandas/src/testing.pyx
@@ -122,6 +122,10 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False):
if np.isinf(a):
assert np.isinf(b), "First object is inf, second isn't"
+ if np.isposinf(a):
+ assert np.isposinf(b), "First object is positive inf, second is negative inf"
+ else:
+ assert np.isneginf(b), "First object is negative inf, second is positive inf"
else:
fa, fb = a, b
| BUG: Previously, a negative sign was being prepended for positive infinity, not for negative infinity. (GH6812)
xref #6812
| https://api.github.com/repos/pandas-dev/pandas/pulls/7949 | 2014-08-06T17:10:27Z | 2014-09-04T22:02:54Z | 2014-09-04T22:02:54Z | 2014-09-04T22:03:08Z |
DOC: add intersphinx mapping to numpy | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 4f01fe4f4b278..9acb1252f3746 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -278,7 +278,8 @@
intersphinx_mapping = {
'statsmodels': ('http://statsmodels.sourceforge.net/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
- 'python': ('http://docs.python.org/', None)
+ 'python': ('http://docs.python.org/', None),
+ 'numpy': ('http://docs.scipy.org/doc/numpy', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
| There are already some references in the API docs to numpy functions, but the links did not yet work up to now.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7946 | 2014-08-06T13:40:03Z | 2014-08-06T14:26:12Z | 2014-08-06T14:26:12Z | 2014-08-06T14:26:12Z |
Update deprecate_kwarg message | diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
index 476a643b34ff7..d94897a6685a2 100644
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -48,7 +48,7 @@ def _deprecate_kwarg(func):
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
- msg = "%s is deprecated, use %s instead" % \
+ msg = "the '%s' keyword is deprecated, use '%s' instead" % \
(old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning)
if kwargs.get(new_arg_name, None) is not None:
| Small adaptation, I think it is a bit clearer this way.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7945 | 2014-08-06T07:38:50Z | 2014-08-06T14:07:00Z | 2014-08-06T14:07:00Z | 2014-08-06T14:07:00Z |
Add warning about HDFStore concurrent reads | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 91ffb5091e927..f4f3f3d1f89c0 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2956,6 +2956,9 @@ Notes & Caveats
need to serialize these operations in a single thread in a single
process. You will corrupt your data otherwise. See the issue
(:`2397`) for more information.
+ - ``HDFStore`` may not support concurrent reads either, depending on the
+ underlying version of ``PyTables`` being used. See issue (:`7838`)
+ for more information.
- If you use locks to manage write access between multiple processes, you
may want to use :py:func:`~os.fsync` before releasing write locks. For
convenience you can use ``store.flush(fsync=True)`` to do this for you.
| Not too much to say; this is a documentation PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7944 | 2014-08-05T21:26:15Z | 2015-07-28T21:52:04Z | null | 2015-07-28T21:52:04Z |
API: Coerce None according to the dtype of the container | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index d3024daaa59c9..69afd861df325 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -105,6 +105,34 @@ pandas objects provide intercompatibility between ``NaT`` and ``NaN``.
df2
df2.get_dtype_counts()
+.. _missing.inserting:
+
+Inserting missing data
+----------------------
+
+You can insert missing values by simply assigning to containers. The
+actual missing value used will be chosen based on the dtype.
+
+For example, numeric containers will always use ``NaN`` regardless of
+the missing value type chosen:
+
+.. ipython:: python
+
+ s = Series([1, 2, 3])
+ s.loc[0] = None
+ s
+
+Likewise, datetime containers will always use ``NaT``.
+
+For object containers, pandas will use the value given:
+
+.. ipython:: python
+
+ s = Series(["a", "b", "c"])
+ s.loc[0] = None
+ s.loc[1] = np.nan
+ s
+
Calculations with missing data
------------------------------
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 4bd55b2172013..a0371f84a5649 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -232,6 +232,31 @@ API changes
idx.duplicated()
idx.drop_duplicates()
+- Assigning values to ``None`` now considers the dtype when choosing an 'empty' value (:issue:`7941`).
+
+ Previously, assigning to ``None`` in numeric containers changed the
+ dtype to object (or errored, depending on the call). It now uses
+ NaN:
+
+ .. ipython:: python
+
+ s = Series([1, 2, 3])
+ s.loc[0] = None
+ s
+
+ ``NaT`` is now used similarly for datetime containers.
+
+ For object containers, we now preserve None values (previously these
+ were converted to NaN values).
+
+ .. ipython:: python
+
+ s = Series(["a", "b", "c"])
+ s.loc[0] = None
+ s
+
+ To insert a NaN, you must explicitly use ``np.nan``. See the :ref:`docs <missing.inserting>`.
+
.. _whatsnew_0150.dt:
.dt accessor
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 48fb75f59ac34..36f89a81836ae 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -368,7 +368,7 @@ def _is_null_datelike_scalar(other):
return isnull(other)
return False
-def array_equivalent(left, right):
+def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs in
corresponding locations. False otherwise. It is assumed that left and right
@@ -379,6 +379,8 @@ def array_equivalent(left, right):
Parameters
----------
left, right : ndarrays
+ strict_nan : bool, default False
+ If True, consider NaN and None to be different.
Returns
-------
@@ -394,11 +396,32 @@ def array_equivalent(left, right):
"""
left, right = np.asarray(left), np.asarray(right)
if left.shape != right.shape: return False
- # NaNs occur only in object arrays, float or complex arrays.
+
+ # Object arrays can contain None, NaN and NaT.
if issubclass(left.dtype.type, np.object_):
- return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all()
+
+ if not strict_nan:
+ # pd.isnull considers NaN and None to be equivalent.
+ return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all()
+
+ for left_value, right_value in zip(left, right):
+ if left_value is tslib.NaT and right_value is not tslib.NaT:
+ return False
+
+ elif isinstance(left_value, float) and np.isnan(left_value):
+ if not isinstance(right_value, float) or not np.isnan(right_value):
+ return False
+ else:
+ if left_value != right_value:
+ return False
+
+ return True
+
+ # NaNs can occur in float and complex arrays.
if issubclass(left.dtype.type, (np.floating, np.complexfloating)):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
+
+ # NaNs cannot occur otherwise.
return np.array_equal(left, right)
def _iterable_not_string(x):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f3b8a54034d56..0d61475905e75 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -494,6 +494,11 @@ def setitem(self, indexer, value):
compatible shape
"""
+ # coerce None values, if appropriate
+ if value is None:
+ if self.is_numeric:
+ value = np.nan
+
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
@@ -587,7 +592,7 @@ def putmask(self, mask, new, align=True, inplace=False):
mask = mask.values.T
# if we are passed a scalar None, convert it here
- if not is_list_like(new) and isnull(new):
+ if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e7bb716de60f3..967f437fc5ca1 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -2,8 +2,10 @@
import nose
import itertools
import warnings
+from datetime import datetime
from pandas.compat import range, lrange, lzip, StringIO, lmap, map
+from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
@@ -14,7 +16,8 @@
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Float64Index, Timestamp)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
- assert_frame_equal, assert_panel_equal)
+ assert_frame_equal, assert_panel_equal,
+ assert_attr_equal)
from pandas import concat
import pandas.util.testing as tm
@@ -3816,6 +3819,139 @@ def test_float_index_non_scalar_assignment(self):
tm.assert_frame_equal(df,df2)
+class TestSeriesNoneCoercion(tm.TestCase):
+ EXPECTED_RESULTS = [
+ # For numeric series, we should coerce to NaN.
+ ([1, 2, 3], [np.nan, 2, 3]),
+ ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
+
+ # For datetime series, we should coerce to NaT.
+ ([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
+
+ # For objects, we should preserve the None value.
+ (["foo", "bar", "baz"], [None, "bar", "baz"]),
+ ]
+
+ def test_coercion_with_setitem(self):
+ for start_data, expected_result in self.EXPECTED_RESULTS:
+ start_series = Series(start_data)
+ start_series[0] = None
+
+ expected_series = Series(expected_result)
+
+ assert_attr_equal('dtype', start_series, expected_series)
+ self.assert_numpy_array_equivalent(
+ start_series.values,
+ expected_series.values, strict_nan=True)
+
+ def test_coercion_with_loc_setitem(self):
+ for start_data, expected_result in self.EXPECTED_RESULTS:
+ start_series = Series(start_data)
+ start_series.loc[0] = None
+
+ expected_series = Series(expected_result)
+
+ assert_attr_equal('dtype', start_series, expected_series)
+ self.assert_numpy_array_equivalent(
+ start_series.values,
+ expected_series.values, strict_nan=True)
+
+ def test_coercion_with_setitem_and_series(self):
+ for start_data, expected_result in self.EXPECTED_RESULTS:
+ start_series = Series(start_data)
+ start_series[start_series == start_series[0]] = None
+
+ expected_series = Series(expected_result)
+
+ assert_attr_equal('dtype', start_series, expected_series)
+ self.assert_numpy_array_equivalent(
+ start_series.values,
+ expected_series.values, strict_nan=True)
+
+ def test_coercion_with_loc_and_series(self):
+ for start_data, expected_result in self.EXPECTED_RESULTS:
+ start_series = Series(start_data)
+ start_series.loc[start_series == start_series[0]] = None
+
+ expected_series = Series(expected_result)
+
+ assert_attr_equal('dtype', start_series, expected_series)
+ self.assert_numpy_array_equivalent(
+ start_series.values,
+ expected_series.values, strict_nan=True)
+
+
+class TestDataframeNoneCoercion(tm.TestCase):
+ EXPECTED_SINGLE_ROW_RESULTS = [
+ # For numeric series, we should coerce to NaN.
+ ([1, 2, 3], [np.nan, 2, 3]),
+ ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
+
+ # For datetime series, we should coerce to NaT.
+ ([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
+
+ # For objects, we should preserve the None value.
+ (["foo", "bar", "baz"], [None, "bar", "baz"]),
+ ]
+
+ def test_coercion_with_loc(self):
+ for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
+ start_dataframe = DataFrame({'foo': start_data})
+ start_dataframe.loc[0, ['foo']] = None
+
+ expected_dataframe = DataFrame({'foo': expected_result})
+
+ assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
+ self.assert_numpy_array_equivalent(
+ start_dataframe['foo'].values,
+ expected_dataframe['foo'].values, strict_nan=True)
+
+ def test_coercion_with_setitem_and_dataframe(self):
+ for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
+ start_dataframe = DataFrame({'foo': start_data})
+ start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][0]] = None
+
+ expected_dataframe = DataFrame({'foo': expected_result})
+
+ assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
+ self.assert_numpy_array_equivalent(
+ start_dataframe['foo'].values,
+ expected_dataframe['foo'].values, strict_nan=True)
+
+ def test_none_coercion_loc_and_dataframe(self):
+ for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
+ start_dataframe = DataFrame({'foo': start_data})
+ start_dataframe.loc[start_dataframe['foo'] == start_dataframe['foo'][0]] = None
+
+ expected_dataframe = DataFrame({'foo': expected_result})
+
+ assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
+ self.assert_numpy_array_equivalent(
+ start_dataframe['foo'].values,
+ expected_dataframe['foo'].values, strict_nan=True)
+
+ def test_none_coercion_mixed_dtypes(self):
+ start_dataframe = DataFrame({
+ 'a': [1, 2, 3],
+ 'b': [1.0, 2.0, 3.0],
+ 'c': [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ 'd': ['a', 'b', 'c']})
+ start_dataframe.iloc[0] = None
+
+ expected_dataframe = DataFrame({
+ 'a': [np.nan, 2, 3],
+ 'b': [np.nan, 2.0, 3.0],
+ 'c': [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ 'd': [None, 'b', 'c']})
+
+ for column in expected_dataframe.columns:
+ assert_attr_equal('dtype', start_dataframe[column], expected_dataframe[column])
+ self.assert_numpy_array_equivalent(
+ start_dataframe[column].values,
+ expected_dataframe[column].values, strict_nan=True)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index a59994970009f..ef9d7d1566ec2 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -105,7 +105,7 @@ def round_trip_pickle(self, obj, path=None):
pd.to_pickle(obj, path)
return pd.read_pickle(path)
- def assert_numpy_array_equivalent(self, np_array, assert_equal):
+ def assert_numpy_array_equivalent(self, np_array, assert_equal, strict_nan=False):
"""Checks that 'np_array' is equivalent to 'assert_equal'
Two numpy arrays are equivalent if the arrays have equal non-NaN elements, and
@@ -115,7 +115,7 @@ def assert_numpy_array_equivalent(self, np_array, assert_equal):
similar to `assert_numpy_array_equal()`. If the expected array includes `np.nan` use this
function.
"""
- if array_equivalent(np_array, assert_equal):
+ if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
return
raise AssertionError('{0} is not equivalent to {1}.'.format(np_array, assert_equal))
| This pull request fixes #7939 by ensuring that None coercion always obeys the following rules:
1. If the container is numeric, None should be coerced to NaN
2. If the container is for time data, None should be coerced to NaT
3. If the container contains heterogenous data (i.e. objects), None should be preserved.
## Old behaviour examples
### Assigning None in integer containers
None is sometimes preserved, sometimes coerced to NaN, and sometimes causes an error.
``` python
>>> s = Series([1, 2, 3])
>>> s[0] = None # error
>>> s = Series([1, 2, 3])
>>> s.loc[0] = None
>>> s
0 None
1 2
2 3
dtype: object
>>> s = Series([1, 2, 3])
>>> s[s == 1] = None
>>> s
0 NaN
1 2
2 3
dtype: float64
>>> s = Series([1, 2, 3])
>>> s.loc[s == 1] = None
0 None
1 2
2 3
dtype: object
```
Datetimes are similar.
### Assigning None in object containers
None is sometimes preserved, and sometimes coerced.
``` python
>>> s = Series(["a", "b", "c"])
>>> s[0] = None
0 None
1 b
2 c
dtype: object
>>> s = Series(["a", "b", "c"])
>>> s.loc[0] = None
0 None
1 b
2 c
dtype: object
>>> s = Series(["a", "b", "c"])
>>> s[s == 'a'] = None
0 NaN
1 b
2 c
dtype: object
>>> s = Series(["a", "b", "c"])
>>> s.loc[s == 'a'] = None
0 None
1 b
2 c
dtype: object
```
## New behaviour examples
### None is always coerced to NaN in integer containers
``` python
>>> s = Series([1, 2, 3])
>>> s[0] = None
0 NaN
1 2
2 3
dtype: float64
>>> s = Series([1, 2, 3])
>>> s.loc[0] = None # same
>>> s = Series([1, 2, 3])
>>> s[s == 1] = None # same
>>> s = Series([1, 2, 3])
>>> s.loc[s == 1] = None # same
```
### None is always preserved in object containers
``` python
>>> s = Series(["a", "b", "c"])
>>> s[0] = None
0 None
1 b
2 c
dtype: object
>>> s = Series(["a", "b", "c"])
>>> s.loc[0] = None # same
>>> s = Series(["a", "b", "c"])
>>> s[s == 'a'] = None
>>> s = Series(["a", "b", "c"])
>>> s.loc[s == 'a'] = None # same
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/7941 | 2014-08-05T12:59:05Z | 2014-08-19T16:42:50Z | 2014-08-19T16:42:50Z | 2014-09-04T00:23:14Z |
BUG/API: Consistency in .where() when setting with None for both inplace in a Series (GH7939) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 767cc59882233..a7bda314a8264 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -142,6 +142,7 @@ API changes
In [3]: idx.isin(['a', 'c', 'e'], level=1)
Out[3]: array([ True, False, True, True, False, True], dtype=bool)
+- Consistency in ``.where()`` when setting with ``None`` for both ``inplace=True`` and ``inplace=False`` in a Series (:issue:`7939`)
.. _whatsnew_0150.cat:
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f5cb48fd94022..4d85b8a010015 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -967,6 +967,10 @@ def where(self, other, cond, align=True, raise_on_error=True,
values = values.T
is_transposed = True
+ # if we are passed a scalar None, convert it here
+ if not is_list_like(other) and isnull(other):
+ other = self.fill_value
+
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index fcd4b89377176..1917ba58ebcbb 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1597,6 +1597,24 @@ def test_where_inplace(self):
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
+ # GH 7939
+ # treatment of None different in where inplace
+ s1 = Series(['a', 'b', 'c'])
+ result = s1.where(s1 != 'a', None)
+
+ s2 = Series(['a', 'b', 'c'])
+ s2.where(s1 != 'a', None, inplace=True)
+ assert_series_equal(result, s2)
+
+ # this sets None directly, a little bit inconsistent
+ # but no easy way to deal with this in object arrays
+ s3 = Series(['a', 'b', 'c'])
+ s3[0] = None
+ s3[s3 == 'b'] = None
+ expected = Series([None,np.nan,'c'])
+ assert_series_equal(s3, expected)
+
+
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
| closes #7939
| https://api.github.com/repos/pandas-dev/pandas/pulls/7940 | 2014-08-05T12:51:27Z | 2014-08-05T13:18:00Z | null | 2014-08-06T15:44:30Z |
DOC: mention that stack/unstack implicicly sort | diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 768190975db2d..92a35d0276e22 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -151,6 +151,20 @@ unstacks the **last level**:
stacked.unstack(1)
stacked.unstack(0)
+Notice that the ``stack`` and ``unstack`` methods implicitly sort the index
+levels involved. Hence a call to ``stack`` and then ``unstack``, or viceversa,
+will result in a **sorted** copy of the original DataFrame or Series:
+
+.. ipython:: python
+
+ index = MultiIndex.from_product([[2,1], ['a', 'b']])
+ df = DataFrame(randn(4), index=index, columns=['A'])
+ df
+ all(df.unstack().stack() == df.sort())
+
+while the above code will raise a ``TypeError`` if the call to ``sort`` is
+removed.
+
.. _reshaping.unstack_by_name:
If the indexes have names, you can use the level names instead of specifying
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4b8d13ce30355..81e43fb039554 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3278,6 +3278,7 @@ def stack(self, level=-1, dropna=True):
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
+ The level involved will automatically get sorted.
Parameters
----------
@@ -3317,7 +3318,8 @@ def unstack(self, level=-1):
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
- not a MultiIndex)
+ not a MultiIndex).
+ The level involved will automatically get sorted.
Parameters
----------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c0e1e8a13eea3..d1f861b7f7fd7 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1884,7 +1884,8 @@ def reorder_levels(self, order):
def unstack(self, level=-1):
"""
- Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame
+ Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
+ The level involved will automatically get sorted.
Parameters
----------
| It took me a while to discover what was causing this TypeError... I guess this can be useful to other people.
(Yes, I know there is a note somewhere warning users that some operations depend on the index being sorted, but maybe this is a sligthly different case, since the operations do work after all, the problem is they do something more than what is documented)
| https://api.github.com/repos/pandas-dev/pandas/pulls/7937 | 2014-08-05T09:25:06Z | 2014-08-05T17:03:01Z | 2014-08-05T17:03:01Z | 2014-08-05T22:05:03Z |
BUG: rolling/expanding_* treatment of center | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index bb52cf92a6b93..472114f1dea39 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -93,6 +93,39 @@ API changes
Previously the first ``min_periods`` entries of the result were set to ``NaN``.
The new behavior accords with the existing documentation. (:issue:`7884`)
+- :func:`rolling_max`, :func:`rolling_min`, :func:`rolling_sum`, :func:`rolling_mean`, :func:`rolling_median`,
+ :func:`rolling_std`, :func:`rolling_var`, :func:`rolling_skew`, :func:`rolling_kurt`, and :func:`rolling_quantile`,
+ :func:`rolling_cov`, :func:`rolling_corr`, :func:`rolling_corr_pairwise`,
+ :func:`rolling_window`, and :func:`rolling_apply` with ``center=True`` previously would return a result of the same
+ structure as the input ``arg`` with ``NaN``s in the final ``(window-1)/2`` entries.
+ Now the final ``(window-1)/2`` entries of the result are calculated as if the input ``arg`` were followed
+ by ``(window-1)/2`` ``NaN``s. (:issue:`7925`)
+
+ Prior behavior (note final value is ``NaN``):
+
+ .. code-block:: python
+
+ In [7]: rolling_sum(Series(range(5)), window=3, min_periods=0, center=True)
+ Out[7]:
+ 0 1
+ 1 3
+ 2 6
+ 3 9
+ 4 NaN
+ dtype: float64
+
+ New behavior (note final value is ``7 = sum([3, 4, NaN])``):
+
+ .. ipython:: python
+
+ rolling_sum(Series(range(5)), window=3, min_periods=0, center=True)
+
+- Removed ``center`` argument from :func:`expanding_max`, :func:`expanding_min`, :func:`expanding_sum`,
+ :func:`expanding_mean`, :func:`expanding_median`, :func:`expanding_std`, :func:`expanding_var`,
+ :func:`expanding_skew`, :func:`expanding_kurt`, :func:`expanding_quantile`, :func:`expanding_count`,
+ :func:`expanding_cov`, :func:`expanding_corr`, :func:`expanding_corr_pairwise`, and :func:`expanding_apply`,
+ as the results produced when ``center=True`` did not make much sense. (:issue:`7925`)
+
- Bug in passing a ``DatetimeIndex`` with a timezone that was not being retained in DataFrame construction from a dict (:issue:`7822`)
In prior versions this would drop the timezone.
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 44b8bbd0c9078..74545a08d45b6 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -372,7 +372,10 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
- calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs,
+ offset = int((window - 1) / 2.) if center else 0
+ additional_nans = np.array([np.NaN] * offset)
+ calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
+ window, minp=minp, args=args, kwargs=kwargs,
**kwds)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
@@ -381,10 +384,10 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
else:
result = calc(values)
- rs = return_hook(result)
if center:
- rs = _center_window(rs, window, axis)
- return rs
+ result = _center_window(result, window, axis)
+
+ return return_hook(result)
def _center_window(rs, window, axis):
@@ -393,20 +396,13 @@ def _center_window(rs, window, axis):
"dimensions")
offset = int((window - 1) / 2.)
- if isinstance(rs, (Series, DataFrame, Panel)):
- rs = rs.shift(-offset, axis=axis)
- else:
- rs_indexer = [slice(None)] * rs.ndim
- rs_indexer[axis] = slice(None, -offset)
-
- lead_indexer = [slice(None)] * rs.ndim
- lead_indexer[axis] = slice(offset, None)
-
- na_indexer = [slice(None)] * rs.ndim
- na_indexer[axis] = slice(-offset, None)
-
- rs[tuple(rs_indexer)] = np.copy(rs[tuple(lead_indexer)])
- rs[tuple(na_indexer)] = np.nan
+ if offset > 0:
+ if isinstance(rs, (Series, DataFrame, Panel)):
+ rs = rs.slice_shift(-offset, axis=axis)
+ else:
+ lead_indexer = [slice(None)] * rs.ndim
+ lead_indexer[axis] = slice(offset, None)
+ rs = np.copy(rs[tuple(lead_indexer)])
return rs
@@ -821,13 +817,16 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None,
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
- f = lambda x: algos.roll_window(x, window, minp, avg=mean)
+ offset = int((len(window) - 1) / 2.) if center else 0
+ additional_nans = np.array([np.NaN] * offset)
+ f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
+ window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
- rs = return_hook(result)
if center:
- rs = _center_window(rs, len(window), axis)
- return rs
+ result = _center_window(result, len(window), axis)
+
+ return return_hook(result)
def _validate_win_type(win_type, kwargs):
@@ -856,14 +855,14 @@ def _expanding_func(func, desc, check_minp=_use_window):
@Substitution(desc, _unary_arg, _expanding_kw, _type_of_input_retval, "")
@Appender(_doc_template)
@wraps(func)
- def f(arg, min_periods=1, freq=None, center=False, **kwargs):
+ def f(arg, min_periods=1, freq=None, **kwargs):
window = len(arg)
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
- center=center, **kwargs)
+ **kwargs)
return f
@@ -887,7 +886,7 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
check_minp=_require_min_periods(4))
-def expanding_count(arg, freq=None, center=False):
+def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
@@ -897,8 +896,6 @@ def expanding_count(arg, freq=None, center=False):
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
- center : boolean, default False
- Whether the label should correspond with center of window.
Returns
-------
@@ -910,11 +907,10 @@ def expanding_count(arg, freq=None, center=False):
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
- return rolling_count(arg, len(arg), freq=freq, center=center)
+ return rolling_count(arg, len(arg), freq=freq)
-def expanding_quantile(arg, quantile, min_periods=1, freq=None,
- center=False):
+def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
@@ -928,8 +924,6 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None,
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
- center : boolean, default False
- Whether the label should correspond with center of window.
Returns
-------
@@ -942,14 +936,13 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None,
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods,
- freq=freq, center=center)
+ freq=freq)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw+_pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
-def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False,
- pairwise=None):
+def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
@@ -960,14 +953,13 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False,
window = len(arg1) + len(arg2)
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
- center=center, pairwise=pairwise)
+ pairwise=pairwise)
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw+_pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
-def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False,
- pairwise=None):
+def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
@@ -978,22 +970,21 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False,
window = len(arg1) + len(arg2)
return rolling_corr(arg1, arg2, window,
min_periods=min_periods,
- freq=freq, center=center, pairwise=pairwise)
+ freq=freq, pairwise=pairwise)
@Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n"
"Pairwise expanding sample correlation", _pairwise_arg,
_expanding_kw, _pairwise_retval, "")
@Appender(_doc_template)
-def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None,
- center=False):
+def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None):
import warnings
warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning)
return expanding_corr(df1, df2, min_periods=min_periods,
- freq=freq, center=center, pairwise=True)
+ freq=freq, pairwise=True)
-def expanding_apply(arg, func, min_periods=1, freq=None, center=False,
+def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
@@ -1008,8 +999,6 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False,
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
- center : boolean, default False
- Whether the label should correspond with center of window.
args : tuple
Passed on to func
kwargs : dict
@@ -1027,4 +1016,4 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False,
"""
window = len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
- center=center, args=args, kwargs=kwargs)
+ args=args, kwargs=kwargs)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index ce7f9c8a225a8..359868262a681 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -406,24 +406,16 @@ def _check_ndarray(self, func, static_comp, window=50,
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
+ # GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
- expected = func(arr, 20, min_periods=15)
+ expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
- expected = func(arr, 20)
+ expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
- assert_almost_equal(result[1], expected[10])
- if fill_value is None:
- self.assertTrue(np.isnan(result[-9:]).all())
- else:
- self.assertTrue((result[-9:] == 0).all())
- if has_min_periods:
- self.assertTrue(np.isnan(expected[23]))
- self.assertTrue(np.isnan(result[14]))
- self.assertTrue(np.isnan(expected[-5]))
- self.assertTrue(np.isnan(result[-14]))
+ self.assert_numpy_array_equivalent(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
@@ -488,11 +480,12 @@ def _check_structures(self, func, static_comp,
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
+ # GH 7925
if has_center:
if has_min_periods:
minp = 10
- series_xp = func(self.series, 25, min_periods=minp).shift(-12)
- frame_xp = func(self.frame, 25, min_periods=minp).shift(-12)
+ series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
+ frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
@@ -500,8 +493,8 @@ def _check_structures(self, func, static_comp,
center=True)
else:
- series_xp = func(self.series, 25).shift(-12)
- frame_xp = func(self.frame, 25).shift(-12)
+ series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
+ frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
| Closes https://github.com/pydata/pandas/issues/7925.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7934 | 2014-08-04T23:31:33Z | 2014-08-19T14:47:20Z | 2014-08-19T14:47:20Z | 2014-09-14T19:17:18Z |
DOC: update docs to show construction of periodindex when needing out-of bounds spans | diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 438e2f79c5ff3..100588e2db40d 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -358,6 +358,8 @@ such as ``numpy.logical_and``.
See the `this old issue <https://github.com/pydata/pandas/issues/2388>`__ for a more
detailed discussion.
+.. _gotchas.timestamp-limits:
+
Timestamp limitations
---------------------
@@ -375,14 +377,7 @@ can be represented using a 64-bit integer is limited to approximately 584 years:
end = Timestamp.max
end
-If you need to represent time series data outside the nanosecond timespan, use
-PeriodIndex:
-
-.. ipython:: python
-
- span = period_range('1215-01-01', '1381-01-01', freq='D')
- span
-
+See :ref:`here <timeseries.oob>` for ways to represent data outside these bound.
Parsing Dates from Text Files
-----------------------------
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 05fd82b2f448d..c672a3d030bb9 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1272,6 +1272,39 @@ the quarter end:
ts.head()
+.. _timeseries.oob:
+
+Representing out-of-bounds spans
+--------------------------------
+
+If you have data that is outside of the ``Timestamp`` bounds, see :ref:`Timestamp limitations <gotchas.timestamp-limits>`,
+then you can use a ``PeriodIndex`` and/or ``Series`` of ``Periods`` to do computations.
+
+.. ipython:: python
+
+ span = period_range('1215-01-01', '1381-01-01', freq='D')
+ span
+
+To convert from a ``int64`` based YYYYMMDD representation.
+
+.. ipython:: python
+
+ s = Series([20121231, 20141130, 99991231])
+ s
+
+ def conv(x):
+ return Period(year = x // 10000, month = x//100 % 100, day = x%100, freq='D')
+
+ s.apply(conv)
+ s.apply(conv)[2]
+
+These can easily be converted to a ``PeriodIndex``
+
+.. ipython:: python
+
+ span = PeriodIndex(s.apply(conv))
+ span
+
.. _timeseries.timezone:
Time Zone Handling
@@ -1355,13 +1388,13 @@ tz-aware data to another time zone:
Be wary of conversions between libraries. For some zones ``pytz`` and ``dateutil`` have different
definitions of the zone. This is more of a problem for unusual timezones than for
- 'standard' zones like ``US/Eastern``.
+ 'standard' zones like ``US/Eastern``.
-.. warning::
+.. warning::
- Be aware that a timezone definition across versions of timezone libraries may not
- be considered equal. This may cause problems when working with stored data that
- is localized using one version and operated on with a different version.
+ Be aware that a timezone definition across versions of timezone libraries may not
+ be considered equal. This may cause problems when working with stored data that
+ is localized using one version and operated on with a different version.
See :ref:`here<io.hdf5-notes>` for how to handle such a situation.
Under the hood, all timestamps are stored in UTC. Scalar values from a
| https://api.github.com/repos/pandas-dev/pandas/pulls/7933 | 2014-08-04T23:23:49Z | 2014-08-04T23:48:44Z | 2014-08-04T23:48:44Z | 2014-08-04T23:48:44Z | |
BUG: Bug in to_datetime when format='%Y%m%d and coerce=True are specified (GH7930) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 767cc59882233..ef2b91d044d86 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -303,7 +303,8 @@ Bug Fixes
- Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`)
- Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`)
- Bug in ``HDFStore.select_column()`` not preserving UTC timezone info when selecting a DatetimeIndex (:issue:`7777`)
-
+- Bug in ``to_datetime`` when ``format='%Y%m%d'`` and ``coerce=True`` are specified, where previously an object array was returned (rather than
+ a coerced time-series with ``NaT``), (:issue:`7930`)
- Bug in ``DatetimeIndex`` and ``PeriodIndex`` in-place addition and subtraction cause different result from normal one (:issue:`6527`)
- Bug in adding and subtracting ``PeriodIndex`` with ``PeriodIndex`` raise ``TypeError`` (:issue:`7741`)
- Bug in ``combine_first`` with ``PeriodIndex`` data raises ``TypeError`` (:issue:`3367`)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index c54c133dd2afe..6dbf095189d36 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -3934,6 +3934,16 @@ def test_to_datetime_format_YYYYMMDD(self):
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
+ # coercion
+ # GH 7930
+ s = Series([20121231, 20141231, 99991231])
+ result = pd.to_datetime(s,format='%Y%m%d')
+ expected = np.array([ datetime(2012,12,31), datetime(2014,12,31), datetime(9999,12,31) ], dtype=object)
+ self.assert_numpy_array_equal(result, expected)
+
+ result = pd.to_datetime(s,format='%Y%m%d', coerce=True)
+ expected = Series(['20121231','20141231','NaT'],dtype='M8[ns]')
+ assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 457a95deb16d9..45bea00ac104f 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -262,7 +262,7 @@ def _convert_listlike(arg, box, format):
# shortcut formatting here
if format == '%Y%m%d':
try:
- result = _attempt_YYYYMMDD(arg)
+ result = _attempt_YYYYMMDD(arg, coerce=coerce)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
@@ -313,14 +313,14 @@ def _convert_listlike(arg, box, format):
class DateParseError(ValueError):
pass
-def _attempt_YYYYMMDD(arg):
+def _attempt_YYYYMMDD(arg, coerce):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) """
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
- return lib.try_parse_year_month_day(carg/10000,carg/100 % 100, carg % 100)
+ return tslib.array_to_datetime(lib.try_parse_year_month_day(carg/10000,carg/100 % 100, carg % 100), coerce=coerce)
def calc_with_mask(carg,mask):
result = np.empty(carg.shape, dtype='M8[ns]')
| closes #7930
| https://api.github.com/repos/pandas-dev/pandas/pulls/7931 | 2014-08-04T21:50:55Z | 2014-08-04T22:42:19Z | 2014-08-04T22:42:19Z | 2014-08-04T22:42:19Z |
BUG: define empty product on Series and DataFrame to be 1 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 024ee68ced303..ecf9f9aca4f89 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -129,6 +129,10 @@ API changes
strings must contain 244 or fewer characters. Attempting to write Stata
dta files with strings longer than 244 characters raises a ``ValueError``. (:issue:`7858`)
+- Empty product computations now have a default value of 1. This means that
+ during resampling, for example, now instead of ``nan``, the value for empty
+ Periods, Timestamps, etc. will be 1 (:issue:`7889`).
+
.. _whatsnew_0150.cat:
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index aa6140383a27a..b0fcd4c9542c2 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1,4 +1,3 @@
-import sys
import itertools
import functools
@@ -10,7 +9,6 @@
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
-import pandas.core.common as com
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
@@ -49,53 +47,48 @@ def _f(*args, **kwargs):
return _f
-class bottleneck_switch(object):
+def bottleneck_switch(alt=None, zero_value=None, **kwargs):
+ if alt is None:
+ return functools.partial(bottleneck_switch, zero_value=zero_value,
+ **kwargs)
- def __init__(self, zero_value=None, **kwargs):
- self.zero_value = zero_value
- self.kwargs = kwargs
-
- def __call__(self, alt):
- bn_name = alt.__name__
+ bn_name = alt.__name__
+ try:
+ bn_func = getattr(bn, bn_name)
+ except (AttributeError, NameError): # pragma: no cover
+ bn_func = None
+
+ @functools.wraps(alt)
+ def f(values, axis=None, skipna=True, **kwds):
+ for k, v in compat.iteritems(kwargs):
+ kwds.setdefault(k, v)
try:
- bn_func = getattr(bn, bn_name)
- except (AttributeError, NameError): # pragma: no cover
- bn_func = None
-
- @functools.wraps(alt)
- def f(values, axis=None, skipna=True, **kwds):
- if len(self.kwargs) > 0:
- for k, v in compat.iteritems(self.kwargs):
- if k not in kwds:
- kwds[k] = v
- try:
- if self.zero_value is not None and values.size == 0:
- if values.ndim == 1:
- return 0
- else:
- result_shape = (values.shape[:axis] +
- values.shape[axis + 1:])
- result = np.empty(result_shape)
- result.fill(0)
- return result
-
- if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
- bn_name):
- result = bn_func(values, axis=axis, **kwds)
-
- # prefer to treat inf/-inf as NA, but must compute the func
- # twice :(
- if _has_infs(result):
- result = alt(values, axis=axis, skipna=skipna, **kwds)
+ if zero_value is not None and values.size == 0:
+ if values.ndim == 1:
+ return zero_value
else:
+ result_shape = (values.shape[:axis] +
+ values.shape[axis + 1:])
+ result = np.empty(result_shape)
+ result.fill(zero_value)
+ return result
+
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
+ bn_name):
+ result = bn_func(values, axis=axis, **kwds)
+
+ # prefer to treat inf/-inf as NA, but must compute the func
+ # twice :(
+ if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
- except Exception:
+ else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
+ except (ValueError, TypeError, ZeroDivisionError):
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
- return result
-
- return f
+ return result
+ return f
def _bn_ok_dtype(dt, name):
@@ -121,7 +114,7 @@ def _has_infs(result):
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
- except (TypeError, NotImplementedError) as e:
+ except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
@@ -260,7 +253,7 @@ def nansum(values, axis=None, skipna=True):
@disallow('M8')
-@bottleneck_switch()
+@bottleneck_switch
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max))
@@ -278,7 +271,7 @@ def nanmean(values, axis=None, skipna=True):
@disallow('M8')
-@bottleneck_switch()
+@bottleneck_switch
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
@@ -365,7 +358,7 @@ def nansem(values, axis=None, skipna=True, ddof=1):
return np.sqrt(var)/np.sqrt(count)
-@bottleneck_switch()
+@bottleneck_switch
def nanmin(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='+inf')
@@ -395,7 +388,7 @@ def nanmin(values, axis=None, skipna=True):
return _maybe_null_out(result, axis, mask)
-@bottleneck_switch()
+@bottleneck_switch
def nanmax(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='-inf')
@@ -517,6 +510,7 @@ def nankurt(values, axis=None, skipna=True):
@disallow('M8')
+@bottleneck_switch(zero_value=1)
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not _is_any_int_dtype(values):
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index fcd4b89377176..a9b1ebbba618c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3018,6 +3018,8 @@ def test_isnull_for_inf(self):
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
+ def test_empty_product(self):
+ tm.assert_equal(Series().prod(), 1)
# TimeSeries-specific
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index ff8b6945a23be..c604bf809a171 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1363,22 +1363,36 @@ def test_aggregate_with_nat(self):
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
- for func in ['min', 'max', 'prod']:
+ for func in ['min', 'max']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]],
index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
- expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
+ expected.index = date_range(start='2013-01-01', freq='D', periods=5,
+ name='key')
+ assert_frame_equal(expected, dt_result)
+
+ for func in ['prod']:
+ normal_result = getattr(normal_grouped, func)()
+ dt_result = getattr(dt_grouped, func)()
+ pad = DataFrame([[1] * 4],
+ index=[3], columns=['A', 'B', 'C', 'D'])
+ expected = normal_result.append(pad)
+ expected = expected.sort_index()
+ expected.index = date_range(start='2013-01-01', freq='D', periods=5,
+ name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
normal_result = getattr(normal_grouped, func)()
- pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D'])
+ pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C',
+ 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
- expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
+ expected.index = date_range(start='2013-01-01', freq='D', periods=5,
+ name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
@@ -1387,7 +1401,8 @@ def test_aggregate_with_nat(self):
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
- expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
+ expected.index = date_range(start='2013-01-01', freq='D', periods=5,
+ name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
| closes #7889
| https://api.github.com/repos/pandas-dev/pandas/pulls/7928 | 2014-08-04T18:02:16Z | 2015-03-02T11:53:04Z | null | 2016-07-26T05:10:56Z |
API/BUG/ENH: ewmvar/cov debiasing factors; add 'adjust' to ewmvar/std/vol/cov/corr; ewm*() min_periods | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index d5dcacf53ec23..b8559eb51ece8 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -413,6 +413,8 @@ columns using ``ix`` indexing:
@savefig rolling_corr_pairwise_ex.png
correls.ix[:, 'A', 'C'].plot()
+.. _stats.moments.expanding:
+
Expanding window moment functions
---------------------------------
A common alternative to rolling statistics is to use an *expanding* window,
@@ -485,60 +487,79 @@ relative impact of an individual data point. As an example, here is the
@savefig expanding_mean_frame.png
expanding_mean(ts).plot(style='k')
+.. _stats.moments.exponentially_weighted:
+
Exponentially weighted moment functions
---------------------------------------
-A related set of functions are exponentially weighted versions of many of the
-above statistics. A number of EW (exponentially weighted) functions are
-provided using the blending method. For example, where :math:`y_t` is the
-result and :math:`x_t` the input, we compute an exponentially weighted moving
-average as
+A related set of functions are exponentially weighted versions of several of
+the above statistics. A number of expanding EW (exponentially weighted)
+functions are provided:
+
+.. csv-table::
+ :header: "Function", "Description"
+ :widths: 20, 80
+
+ ``ewma``, EW moving average
+ ``ewmvar``, EW moving variance
+ ``ewmstd``, EW moving standard deviation
+ ``ewmcorr``, EW moving correlation
+ ``ewmcov``, EW moving covariance
+
+In general, a weighted moving average is calculated as
.. math::
- y_t = (1 - \alpha) y_{t-1} + \alpha x_t
+ y_t = \frac{\sum_{i=0}^t w_i x_{t-i}}{\sum_{i=0}^t w_i},
-One must have :math:`0 < \alpha \leq 1`, but rather than pass :math:`\alpha`
-directly, it's easier to think about either the **span**, **center of mass
-(com)** or **halflife** of an EW moment:
+where :math:`x_t` is the input at :math:`y_t` is the result.
+
+The EW functions support two variants of exponential weights:
+The default, ``adjust=True``, uses the weights :math:`w_i = (1 - \alpha)^i`.
+When ``adjust=False`` is specified, moving averages are calculated as
.. math::
- \alpha =
- \begin{cases}
- \frac{2}{s + 1}, s = \text{span}\\
- \frac{1}{1 + c}, c = \text{center of mass}\\
- 1 - \exp^{\frac{\log 0.5}{h}}, h = \text{half life}
+ y_0 &= x_0 \\
+ y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,
+
+which is equivalent to using weights
+
+.. math::
+
+ w_i = \begin{cases}
+ \alpha (1 - \alpha)^i & \text{if } i < t \\
+ (1 - \alpha)^i & \text{if } i = t.
\end{cases}
.. note::
- the equation above is sometimes written in the form
+ These equations are sometimes written in terms of :math:`\alpha' = 1 - \alpha`, e.g.
+
+ .. math::
- .. math::
+ y_t = \alpha' y_{t-1} + (1 - \alpha') x_t.
- y_t = \alpha' y_{t-1} + (1 - \alpha') x_t
+One must have :math:`0 < \alpha \leq 1`, but rather than pass :math:`\alpha`
+directly, it's easier to think about either the **span**, **center of mass
+(com)** or **halflife** of an EW moment:
- where :math:`\alpha' = 1 - \alpha`.
+.. math::
-You can pass one of the three to these functions but not more. **Span**
+ \alpha =
+ \begin{cases}
+ \frac{2}{s + 1}, & s = \text{span}\\
+ \frac{1}{1 + c}, & c = \text{center of mass}\\
+ 1 - \exp^{\frac{\log 0.5}{h}}, & h = \text{half life}
+ \end{cases}
+
+One must specify precisely one of the three to the EW functions. **Span**
corresponds to what is commonly called a "20-day EW moving average" for
example. **Center of mass** has a more physical interpretation. For example,
**span** = 20 corresponds to **com** = 9.5. **Halflife** is the period of
-time for the exponential weight to reduce to one half. Here is the list of
-functions available:
-
-.. csv-table::
- :header: "Function", "Description"
- :widths: 20, 80
-
- ``ewma``, EW moving average
- ``ewmvar``, EW moving variance
- ``ewmstd``, EW moving standard deviation
- ``ewmcorr``, EW moving correlation
- ``ewmcov``, EW moving covariance
+time for the exponential weight to reduce to one half.
-Here are an example for a univariate time series:
+Here is an example for a univariate time series:
.. ipython:: python
@@ -548,8 +569,45 @@ Here are an example for a univariate time series:
@savefig ewma_ex.png
ewma(ts, span=20).plot(style='k')
-.. note::
+All the EW functions have a ``min_periods`` argument, which has the same
+meaning it does for all the ``expanding_`` and ``rolling_`` functions:
+no output values will be set until at least ``min_periods`` non-null values
+are encountered in the (expanding) window.
+(This is a change from versions prior to 0.15.0, in which the ``min_periods``
+argument affected only the ``min_periods`` consecutive entries starting at the
+first non-null value.)
+
+All the EW functions also have an ``ignore_na`` argument, which deterines how
+intermediate null values affect the calculation of the weights.
+When ``ignore_na=False`` (the default), weights are calculated based on absolute
+positions, so that intermediate null values affect the result.
+When ``ignore_na=True`` (which reproduces the behavior in versions prior to 0.15.0),
+weights are calculated by ignoring intermediate null values.
+For example, assuming ``adjust=True``, if ``ignore_na=False``, the weighted
+average of ``3, NaN, 5`` would be calculated as
+
+.. math::
+
+ \frac{(1-\alpha)^2 \cdot 3 + 1 \cdot 5}{(1-\alpha)^2 + 1}
+
+Whereas if ``ignore_na=True``, the weighted average would be calculated as
+
+.. math::
+
+ \frac{(1-\alpha) \cdot 3 + 1 \cdot 5}{(1-\alpha) + 1}.
+
+The ``ewmvar``, ``ewmstd``, and ``ewmcov`` functions have a ``bias`` argument,
+specifying whether the result should contain biased or unbiased statistics.
+For example, if ``bias=True``, ``ewmvar(x)`` is calculated as
+``ewmvar(x) = ewma(x**2) - ewma(x)**2``;
+whereas if ``bias=False`` (the default), the biased variance statistics
+are scaled by debiasing factors
+
+.. math::
+
+ \frac{\left(\sum_{i=0}^t w_i\right)^2}{\left(\sum_{i=0}^t w_i\right)^2 - \sum_{i=0}^t w_i^2}.
- The EW functions perform a standard adjustment to the initial observations
- whereby if there are fewer observations than called for in the span, those
- observations are reweighted accordingly.
+(For :math:`w_i = 1`, this reduces to the usual :math:`N / (N - 1)` factor,
+with :math:`N = t + 1`.)
+See http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
+for further details.
\ No newline at end of file
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 721d232a1931e..4789ac280b9d8 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -83,25 +83,8 @@ API changes
rolling_min(s, window=10, min_periods=5)
-- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcorr`, and :func:`ewmcov`
- now have an optional ``ignore_na`` argument.
- When ``ignore_na=False`` (the default), missing values are taken into account in the weights calculation.
- When ``ignore_na=True`` (which reproduces the pre-0.15.0 behavior), missing values are ignored in the weights calculation.
- (:issue:`7543`)
-
- .. ipython:: python
-
- ewma(Series([None, 1., 100.]), com=2.5)
- ewma(Series([1., None, 100.]), com=2.5, ignore_na=True) # pre-0.15.0 behavior
- ewma(Series([1., None, 100.]), com=2.5, ignore_na=False) # default
-
-- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcorr`, and :func:`ewmcov`
- now set to ``NaN`` the first ``min_periods-1`` entries of the result (for ``min_periods>1``).
- Previously the first ``min_periods`` entries of the result were set to ``NaN``.
- The new behavior accords with the existing documentation. (:issue:`7884`)
-
- :func:`rolling_max`, :func:`rolling_min`, :func:`rolling_sum`, :func:`rolling_mean`, :func:`rolling_median`,
- :func:`rolling_std`, :func:`rolling_var`, :func:`rolling_skew`, :func:`rolling_kurt`, and :func:`rolling_quantile`,
+ :func:`rolling_std`, :func:`rolling_var`, :func:`rolling_skew`, :func:`rolling_kurt`, :func:`rolling_quantile`,
:func:`rolling_cov`, :func:`rolling_corr`, :func:`rolling_corr_pairwise`,
:func:`rolling_window`, and :func:`rolling_apply` with ``center=True`` previously would return a result of the same
structure as the input ``arg`` with ``NaN`` in the final ``(window-1)/2`` entries.
@@ -112,20 +95,19 @@ API changes
.. code-block:: python
- In [7]: rolling_sum(Series(range(5)), window=3, min_periods=0, center=True)
+ In [7]: rolling_sum(Series(range(4)), window=3, min_periods=0, center=True)
Out[7]:
0 1
1 3
2 6
- 3 9
- 4 NaN
+ 3 NaN
dtype: float64
-
- New behavior (note final value is ``7 = sum([3, 4, NaN])``):
+
+ New behavior (note final value is ``5 = sum([2, 3, NaN])``):
.. ipython:: python
- rolling_sum(Series(range(5)), window=3, min_periods=0, center=True)
+ rolling_sum(Series(range(4)), window=3, min_periods=0, center=True)
- Removed ``center`` argument from :func:`expanding_max`, :func:`expanding_min`, :func:`expanding_sum`,
:func:`expanding_mean`, :func:`expanding_median`, :func:`expanding_std`, :func:`expanding_var`,
@@ -133,6 +115,55 @@ API changes
:func:`expanding_cov`, :func:`expanding_corr`, :func:`expanding_corr_pairwise`, and :func:`expanding_apply`,
as the results produced when ``center=True`` did not make much sense. (:issue:`7925`)
+- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcov`, and :func:`ewmcorr`
+ now interpret ``min_periods`` in the same manner that the ``rolling_*`` and ``expanding_*`` functions do:
+ a given result entry will be ``NaN`` if the (expanding, in this case) window does not contain
+ at least ``min_periods`` values. The previous behavior was to set to ``NaN`` the ``min_periods`` entries
+ starting with the first non- ``NaN`` value. (:issue:`7977`)
+
+ Prior behavior (note values start at index ``2``, which is ``min_periods`` after index ``0``
+ (the index of the first non-empty value)):
+
+ .. ipython:: python
+
+ s = Series([1, None, None, None, 2, 3])
+
+ .. code-block:: python
+
+ In [51]: ewma(s, com=3., min_periods=2)
+ Out[51]:
+ 0 NaN
+ 1 NaN
+ 2 1.000000
+ 3 1.000000
+ 4 1.571429
+ 5 2.189189
+ dtype: float64
+
+ New behavior (note values start at index ``4``, the location of the 2nd (since ``min_periods=2``) non-empty value):
+
+ .. ipython:: python
+
+ ewma(s, com=3., min_periods=2)
+
+- :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcov`, and :func:`ewmcorr`
+ now have an optional ``adjust`` argument, just like :func:`ewma` does,
+ affecting how the weights are calculated.
+ The default value of ``adjust`` is ``True``, which is backwards-compatible.
+ See :ref:`Exponentially weighted moment functions <stats.moments.exponentially_weighted>` for details. (:issue:`7911`)
+
+- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcov`, and :func:`ewmcorr`
+ now have an optional ``ignore_na`` argument.
+ When ``ignore_na=False`` (the default), missing values are taken into account in the weights calculation.
+ When ``ignore_na=True`` (which reproduces the pre-0.15.0 behavior), missing values are ignored in the weights calculation.
+ (:issue:`7543`)
+
+ .. ipython:: python
+
+ ewma(Series([None, 1., 8.]), com=2.)
+ ewma(Series([1., None, 8.]), com=2., ignore_na=True) # pre-0.15.0 behavior
+ ewma(Series([1., None, 8.]), com=2., ignore_na=False) # new default
+
- Bug in passing a ``DatetimeIndex`` with a timezone that was not being retained in DataFrame construction from a dict (:issue:`7822`)
In prior versions this would drop the timezone.
@@ -580,12 +611,61 @@ Bug Fixes
- Bug in ``DataFrame.plot`` with ``subplots=True`` may draw unnecessary minor xticks and yticks (:issue:`7801`)
- Bug in ``StataReader`` which did not read variable labels in 117 files due to difference between Stata documentation and implementation (:issue:`7816`)
- Bug in ``StataReader`` where strings were always converted to 244 characters-fixed width irrespective of underlying string size (:issue:`7858`)
-- Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, ``rolling_cov``, ``ewmcov``, and ``ewmcorr``
+
+- Bug in :func:`expanding_cov`, :func:`expanding_corr`, :func:`rolling_cov`, :func:`rolling_cor`, :func:`ewmcov`, and :func:`ewmcorr`
returning results with columns sorted by name and producing an error for non-unique columns;
now handles non-unique columns and returns columns in original order
(except for the case of two DataFrames with ``pairwise=False``, where behavior is unchanged) (:issue:`7542`)
- Bug in :func:`rolling_count` and ``expanding_*`` functions unnecessarily producing error message for zero-length data (:issue:`8056`)
- Bug in :func:`rolling_apply` and :func:`expanding_apply` interpreting ``min_periods=0`` as ``min_periods=1`` (:issue:`8080`)
+- Bug in :func:`expanding_std` and :func:`expanding_var` for a single value producing a confusing error message (:issue:`7900`)
+- Bug in :func:`rolling_std` and :func:`rolling_var` for a single value producing ``0`` rather than ``NaN`` (:issue:`7900`)
+
+- Bug in :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, and :func:`ewmcov`
+ calculation of de-biasing factors when ``bias=False`` (the default).
+ Previously an incorrect constant factor was used, based on ``adjust=True``, ``ignore_na=True``,
+ and an infinite number of observations.
+ Now a different factor is used for each entry, based on the actual weights
+ (analogous to the usual ``N/(N-1)`` factor).
+ In particular, for a single point a value of ``NaN`` is returned when ``bias=False``,
+ whereas previously a value of (approximately) ``0`` was returned.
+
+ For example, consider the following pre-0.15.0 results for ``ewmvar(..., bias=False)``,
+ and the corresponding debiasing factors:
+
+ .. ipython:: python
+
+ s = Series([1., 2., 0., 4.])
+
+ .. code-block:: python
+
+ In [69]: ewmvar(s, com=2., bias=False)
+ Out[69]:
+ 0 -2.775558e-16
+ 1 3.000000e-01
+ 2 9.556787e-01
+ 3 3.585799e+00
+ dtype: float64
+
+ In [70]: ewmvar(s, com=2., bias=False) / ewmvar(s, com=2., bias=True)
+ Out[70]:
+ 0 1.25
+ 1 1.25
+ 2 1.25
+ 3 1.25
+ dtype: float64
+
+ Note that entry ``0`` is approximately 0, and the debiasing factors are a constant 1.25.
+ By comparison, the following 0.15.0 results have a ``NaN`` for entry ``0``,
+ and the debiasing factors are decreasing (towards 1.25):
+
+ .. ipython:: python
+
+ ewmvar(s, com=2., bias=False)
+ ewmvar(s, com=2., bias=False) / ewmvar(s, com=2., bias=True)
+
+ See :ref:`Exponentially weighted moment functions <stats.moments.exponentially_weighted>` for details. (:issue:`7912`)
+
- Bug in ``DataFrame.plot`` and ``Series.plot`` may ignore ``rot`` and ``fontsize`` keywords (:issue:`7844`)
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index c0f0590c22a25..77d8cea4de507 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -977,7 +977,7 @@ def roll_mean(ndarray[double_t] input,
#-------------------------------------------------------------------------------
# Exponentially weighted moving average
-def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na):
+def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na, int minp):
'''
Compute exponentially-weighted moving average using center-of-mass.
@@ -987,45 +987,146 @@ def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na):
com : float64
adjust: int
ignore_na: int
+ minp: int
Returns
-------
y : ndarray
'''
- cdef double cur, prev, neww, oldw, adj
- cdef Py_ssize_t i
cdef Py_ssize_t N = len(input)
-
cdef ndarray[double_t] output = np.empty(N, dtype=float)
-
if N == 0:
return output
+ minp = max(minp, 1)
+
+ cdef double alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur
+ cdef Py_ssize_t i, nobs
+
alpha = 1. / (1. + com)
old_wt_factor = 1. - alpha
- new_wt = 1.0 if adjust else alpha
+ new_wt = 1. if adjust else alpha
- output[0] = input[0]
- weighted_avg = output[0]
+ weighted_avg = input[0]
+ is_observation = (weighted_avg == weighted_avg)
+ nobs = int(is_observation)
+ output[0] = weighted_avg if (nobs >= minp) else NaN
old_wt = 1.
for i from 1 <= i < N:
cur = input[i]
+ is_observation = (cur == cur)
+ nobs += int(is_observation)
if weighted_avg == weighted_avg:
- if cur == cur:
- old_wt *= old_wt_factor
- weighted_avg = ((old_wt * weighted_avg) + (new_wt * cur)) / (old_wt + new_wt)
- if adjust:
- old_wt += new_wt
- else:
- old_wt = 1.
- elif not ignore_na:
+ if is_observation or (not ignore_na):
old_wt *= old_wt_factor
- else:
+ if is_observation:
+ if weighted_avg != cur: # avoid numerical errors on constant series
+ weighted_avg = ((old_wt * weighted_avg) + (new_wt * cur)) / (old_wt + new_wt)
+ if adjust:
+ old_wt += new_wt
+ else:
+ old_wt = 1.
+ elif is_observation:
weighted_avg = cur
- output[i] = weighted_avg
+ output[i] = weighted_avg if (nobs >= minp) else NaN
+
+ return output
+
+#-------------------------------------------------------------------------------
+# Exponentially weighted moving covariance
+
+def ewmcov(ndarray[double_t] input_x, ndarray[double_t] input_y,
+ double_t com, int adjust, int ignore_na, int minp, int bias):
+ '''
+ Compute exponentially-weighted moving variance using center-of-mass.
+
+ Parameters
+ ----------
+ input_x : ndarray (float64 type)
+ input_y : ndarray (float64 type)
+ com : float64
+ adjust: int
+ ignore_na: int
+ minp: int
+ bias: int
+
+ Returns
+ -------
+ y : ndarray
+ '''
+
+ cdef Py_ssize_t N = len(input_x)
+ if len(input_y) != N:
+ raise ValueError('arrays are of different lengths (%d and %d)' % (N, len(input_y)))
+ cdef ndarray[double_t] output = np.empty(N, dtype=float)
+ if N == 0:
+ return output
+
+ minp = max(minp, 1)
+
+ cdef double alpha, old_wt_factor, new_wt, mean_x, mean_y, cov
+ cdef double sum_wt, sum_wt2, old_wt, cur_x, cur_y, old_mean_x, old_mean_y
+ cdef Py_ssize_t i, nobs
+
+ alpha = 1. / (1. + com)
+ old_wt_factor = 1. - alpha
+ new_wt = 1. if adjust else alpha
+
+ mean_x = input_x[0]
+ mean_y = input_y[0]
+ is_observation = ((mean_x == mean_x) and (mean_y == mean_y))
+ nobs = int(is_observation)
+ if not is_observation:
+ mean_x = NaN
+ mean_y = NaN
+ output[0] = (0. if bias else NaN) if (nobs >= minp) else NaN
+ cov = 0.
+ sum_wt = 1.
+ sum_wt2 = 1.
+ old_wt = 1.
+
+ for i from 1 <= i < N:
+ cur_x = input_x[i]
+ cur_y = input_y[i]
+ is_observation = ((cur_x == cur_x) and (cur_y == cur_y))
+ nobs += int(is_observation)
+ if mean_x == mean_x:
+ if is_observation or (not ignore_na):
+ sum_wt *= old_wt_factor
+ sum_wt2 *= (old_wt_factor * old_wt_factor)
+ old_wt *= old_wt_factor
+ if is_observation:
+ old_mean_x = mean_x
+ old_mean_y = mean_y
+ if mean_x != cur_x: # avoid numerical errors on constant series
+ mean_x = ((old_wt * old_mean_x) + (new_wt * cur_x)) / (old_wt + new_wt)
+ if mean_y != cur_y: # avoid numerical errors on constant series
+ mean_y = ((old_wt * old_mean_y) + (new_wt * cur_y)) / (old_wt + new_wt)
+ cov = ((old_wt * (cov + ((old_mean_x - mean_x) * (old_mean_y - mean_y)))) +
+ (new_wt * ((cur_x - mean_x) * (cur_y - mean_y)))) / (old_wt + new_wt)
+ sum_wt += new_wt
+ sum_wt2 += (new_wt * new_wt)
+ old_wt += new_wt
+ if not adjust:
+ sum_wt /= old_wt
+ sum_wt2 /= (old_wt * old_wt)
+ old_wt = 1.
+ elif is_observation:
+ mean_x = cur_x
+ mean_y = cur_y
+
+ if nobs >= minp:
+ if not bias:
+ numerator = sum_wt * sum_wt
+ denominator = numerator - sum_wt2
+ output[i] = ((numerator / denominator) * cov) if (denominator > 0.) else NaN
+ else:
+ output[i] = cov
+ else:
+ output[i] = NaN
return output
@@ -1180,7 +1281,7 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1):
mean_x += delta / nobs
ssqdm_x += delta * (val - mean_x)
- if nobs >= minp:
+ if (nobs >= minp) and (nobs > ddof):
#pathological case
if nobs == 1:
val = 0
@@ -1224,7 +1325,7 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1):
ssqdm_x = 0
# Variance is unchanged if no observation is added or removed
- if nobs >= minp:
+ if (nobs >= minp) and (nobs > ddof):
#pathological case
if nobs == 1:
val = 0
@@ -1285,17 +1386,14 @@ def roll_skew(ndarray[double_t] input, int win, int minp):
xxx -= prev * prev * prev
nobs -= 1
-
if nobs >= minp:
A = x / nobs
B = xx / nobs - A * A
C = xxx / nobs - A * A * A - 3 * A * B
-
- R = sqrt(B)
-
- if B == 0 or nobs < 3:
+ if B <= 0 or nobs < 3:
output[i] = NaN
else:
+ R = sqrt(B)
output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
((nobs-2) * R * R * R))
else:
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index a2c7cc30e4798..49de02c23cc47 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -80,8 +80,8 @@
halflife : float, optional
Specify decay in terms of halflife, :math:`\alpha = 1 - exp(log(0.5) / halflife)`
min_periods : int, default 0
- Number of observations in sample to require (only affects
- beginning)
+ Minimum number of observations in window required to have a value
+ (otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
@@ -201,7 +201,8 @@ def rolling_count(arg, window, freq=None, center=False, how=None):
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq, how)
- window = min(window, len(arg))
+ if not center:
+ window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
@@ -211,7 +212,6 @@ def rolling_count(arg, window, freq=None, center=False, how=None):
# putmask here?
result[np.isnan(result)] = 0
-
return return_hook(result)
@@ -462,50 +462,46 @@ def _get_center_of_mass(com, span, halflife):
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None,
adjust=True, how=None, ignore_na=False):
- com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq, how)
+ com = _get_center_of_mass(com, span, halflife)
def _ewma(v):
- result = algos.ewma(v, com, int(adjust), int(ignore_na))
- if min_periods > 1:
- first_index = _first_valid_index(v)
- result[first_index: first_index + min_periods - 1] = NaN
- return result
+ return algos.ewma(v, com, int(adjust), int(ignore_na), int(min_periods))
return_hook, values = _process_data_structure(arg)
- output = np.apply_along_axis(_ewma, 0, values)
+ if values.size == 0:
+ output = values.copy()
+ else:
+ output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
-def _first_valid_index(arr):
- # argmax scans from left
- return notnull(arr).argmax() if len(arr) else 0
-
-
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
- freq=None, how=None, ignore_na=False):
- com = _get_center_of_mass(com, span, halflife)
+ freq=None, how=None, ignore_na=False, adjust=True):
arg = _conv_timerule(arg, freq, how)
- moment2nd = ewma(arg * arg, com=com, min_periods=min_periods, ignore_na=ignore_na)
- moment1st = ewma(arg, com=com, min_periods=min_periods, ignore_na=ignore_na)
+ com = _get_center_of_mass(com, span, halflife)
- result = moment2nd - moment1st ** 2
- if not bias:
- result *= (1.0 + 2.0 * com) / (2.0 * com)
+ def _ewmvar(v):
+ return algos.ewmcov(v, v, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
- return result
+ return_hook, values = _process_data_structure(arg)
+ if values.size == 0:
+ output = values.copy()
+ else:
+ output = np.apply_along_axis(_ewmvar, 0, values)
+ return return_hook(output)
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
- ignore_na=False):
+ ignore_na=False, adjust=True):
result = ewmvar(arg, com=com, span=span, halflife=halflife,
- min_periods=min_periods, bias=bias, ignore_na=ignore_na)
+ min_periods=min_periods, bias=bias, adjust=adjust, ignore_na=ignore_na)
return _zsqrt(result)
ewmvol = ewmstd
@@ -515,7 +511,7 @@ def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
- bias=False, freq=None, pairwise=None, how=None, ignore_na=False):
+ bias=False, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
@@ -525,17 +521,17 @@ def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
+ com = _get_center_of_mass(com, span, halflife)
def _get_ewmcov(X, Y):
- mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods,
- ignore_na=ignore_na)
- return (mean(X * Y) - mean(X) * mean(Y))
+ # X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
+ return_hook, x_values = _process_data_structure(X)
+ return_hook, y_values = _process_data_structure(Y)
+ cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
+ return return_hook(cov)
+
result = _flex_binary_moment(arg1, arg2, _get_ewmcov,
pairwise=bool(pairwise))
- if not bias:
- com = _get_center_of_mass(com, span, halflife)
- result *= (1.0 + 2.0 * com) / (2.0 * com)
-
return result
@@ -543,7 +539,7 @@ def _get_ewmcov(X, Y):
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
- freq=None, pairwise=None, how=None, ignore_na=False):
+ freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
@@ -553,13 +549,18 @@ def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
+ com = _get_center_of_mass(com, span, halflife)
def _get_ewmcorr(X, Y):
- mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods,
- ignore_na=ignore_na)
- var = lambda x: ewmvar(x, com=com, span=span, halflife=halflife, min_periods=min_periods,
- bias=True, ignore_na=ignore_na)
- return (mean(X * Y) - mean(X) * mean(Y)) / _zsqrt(var(X) * var(Y))
+ # X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
+ return_hook, x_values = _process_data_structure(X)
+ return_hook, y_values = _process_data_structure(Y)
+ cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
+ x_var = algos.ewmcov(x_values, x_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
+ y_var = algos.ewmcov(y_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
+ corr = cov / _zsqrt(x_var * y_var)
+ return return_hook(corr)
+
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr,
pairwise=bool(pairwise))
return result
@@ -886,9 +887,9 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
expanding_std = _expanding_func(_ts_std,
'Unbiased expanding standard deviation.',
- check_minp=_require_min_periods(2))
+ check_minp=_require_min_periods(1))
expanding_var = _expanding_func(algos.roll_var, 'Unbiased expanding variance.',
- check_minp=_require_min_periods(2))
+ check_minp=_require_min_periods(1))
expanding_skew = _expanding_func(
algos.roll_skew, 'Unbiased expanding skewness.',
check_minp=_require_min_periods(3))
@@ -961,7 +962,7 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
- window = len(arg1) + len(arg2)
+ window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
pairwise=pairwise)
@@ -978,7 +979,7 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
- window = len(arg1) + len(arg2)
+ window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_corr(arg1, arg2, window,
min_periods=min_periods,
freq=freq, pairwise=pairwise)
@@ -1025,6 +1026,6 @@ def expanding_apply(arg, func, min_periods=1, freq=None,
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
- window = len(arg)
+ window = max(len(arg), min_periods) if min_periods else len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
args=args, kwargs=kwargs)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 2c2a19660f266..1d0be4ce48f4f 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -270,8 +270,12 @@ def test_rolling_std(self):
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
- expected = np.zeros(5)
+ expected = np.array([np.nan] * 5)
+ assert_almost_equal(result, expected)
+ result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
+ 1, min_periods=1, ddof=0)
+ expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
@@ -642,10 +646,9 @@ def _check_ew_ndarray(self, func, preserve_nan=False):
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
- # ewmstd, ewmvol, ewmvar *should* require at least two values,
- # but currently require only one, for some reason
- self.assertTrue(np.isnan(result.values[:10]).all())
- self.assertFalse(np.isnan(result.values[10:]).any())
+ # ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
+ self.assertTrue(np.isnan(result.values[:11]).all())
+ self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
@@ -656,9 +659,8 @@ def _check_ew_ndarray(self, func, preserve_nan=False):
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
- # ewmstd, ewmvol, ewmvar *should* require at least two values,
- # so should return NaN, but currently require one, so return 0.
- assert_series_equal(result, Series([0.]))
+ # ewmstd, ewmvol, ewmvar with bias=False require at least two values
+ assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
@@ -670,6 +672,342 @@ def _check_ew_structures(self, func):
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
+ def _test_series(self):
+ return [Series(),
+ Series([np.nan]),
+ Series([np.nan, np.nan]),
+ Series([3.]),
+ Series([np.nan, 3.]),
+ Series([3., np.nan]),
+ Series([1., 3.]),
+ Series([2., 2.]),
+ Series([3., 1.]),
+ Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
+ Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
+ Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
+ Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
+ Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
+ Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
+ Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
+ Series(range(10)),
+ Series(range(20, 0, -2)),
+ ]
+
+ def _test_dataframes(self):
+ return [DataFrame(),
+ DataFrame(columns=['a']),
+ DataFrame(columns=['a', 'a']),
+ DataFrame(columns=['a', 'b']),
+ DataFrame(np.arange(10).reshape((5, 2))),
+ DataFrame(np.arange(25).reshape((5, 5))),
+ DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
+ ] + [DataFrame(s) for s in self._test_series()]
+
+ def _test_data(self):
+ return self._test_series() + self._test_dataframes()
+
+ def _test_moments_consistency(self,
+ min_periods,
+ count, mean, mock_mean, corr,
+ var_unbiased=None, std_unbiased=None, cov_unbiased=None,
+ var_biased=None, std_biased=None, cov_biased=None,
+ var_debiasing_factors=None):
+
+ def _non_null_values(x):
+ return set([v for v in x.values.reshape(x.values.size) if notnull(v)])
+
+ for x in self._test_data():
+ assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
+ is_constant = (len(_non_null_values(x)) == 1)
+ count_x = count(x)
+ mean_x = mean(x)
+
+ if mock_mean:
+ # check that mean equals mock_mean
+ expected = mock_mean(x)
+ assert_equal(mean_x, expected)
+
+ # check that correlation of a series with itself is either 1 or NaN
+ corr_x_x = corr(x, x)
+ # self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
+
+ if is_constant:
+ # check mean of constant series
+ expected = x * np.nan
+ expected[count_x >= max(min_periods, 1)] = x.max().max()
+ assert_equal(mean_x, expected)
+
+ # check correlation of constant series with itself is NaN
+ expected[:] = np.nan
+ assert_equal(corr_x_x, expected)
+
+ if var_unbiased and var_biased and var_debiasing_factors:
+ # check variance debiasing factors
+ var_unbiased_x = var_unbiased(x)
+ var_biased_x = var_biased(x)
+ var_debiasing_factors_x = var_debiasing_factors(x)
+ assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
+
+ for (std, var, cov) in [(std_biased, var_biased, cov_biased),
+ (std_unbiased, var_unbiased, cov_unbiased)]:
+
+ # check that var(x), std(x), and cov(x) are all >= 0
+ var_x = var(x)
+ std_x = std(x)
+ self.assertFalse((var_x < 0).any().any())
+ self.assertFalse((std_x < 0).any().any())
+ if cov:
+ cov_x_x = cov(x, x)
+ self.assertFalse((cov_x_x < 0).any().any())
+
+ # check that var(x) == cov(x, x)
+ assert_equal(var_x, cov_x_x)
+
+ # check that var(x) == std(x)^2
+ assert_equal(var_x, std_x * std_x)
+
+ if var is var_biased:
+ # check that biased var(x) == mean(x^2) - mean(x)^2
+ mean_x2 = mean(x * x)
+ assert_equal(var_x, mean_x2 - (mean_x * mean_x))
+
+ if is_constant:
+ # check that variance of constant series is identically 0
+ self.assertFalse((var_x > 0).any().any())
+ expected = x * np.nan
+ expected[count_x >= max(min_periods, 1)] = 0.
+ if var is var_unbiased:
+ expected[count_x < 2] = np.nan
+ assert_equal(var_x, expected)
+
+ if isinstance(x, Series):
+ for y in self._test_data():
+ if not x.isnull().equals(y.isnull()):
+ # can only easily test two Series with similar structure
+ continue
+
+ # check that cor(x, y) is symmetric
+ corr_x_y = corr(x, y)
+ corr_y_x = corr(y, x)
+ assert_equal(corr_x_y, corr_y_x)
+
+ if cov:
+ # check that cov(x, y) is symmetric
+ cov_x_y = cov(x, y)
+ cov_y_x = cov(y, x)
+ assert_equal(cov_x_y, cov_y_x)
+
+ # check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
+ var_x_plus_y = var(x + y)
+ var_y = var(y)
+ assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
+
+ # check that corr(x, y) == cov(x, y) / (std(x) * std(y))
+ std_y = std(y)
+ assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
+
+ if cov is cov_biased:
+ # check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
+ mean_y = mean(y)
+ mean_x_times_y = mean(x * y)
+ assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
+
+ def test_ewm_consistency(self):
+
+ def _weights(s, com, adjust, ignore_na):
+ if isinstance(s, DataFrame):
+ w = DataFrame(index=s.index, columns=s.columns)
+ for i, _ in enumerate(s.columns):
+ w.iloc[:, i] = _weights(s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na)
+ return w
+
+ w = Series(np.nan, index=s.index)
+ alpha = 1. / (1. + com)
+ if ignore_na:
+ w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
+ elif adjust:
+ for i in range(len(s)):
+ if s.iat[i] == s.iat[i]:
+ w.iat[i] = pow(1. / (1. - alpha), i)
+ else:
+ sum_wts = 0.
+ prev_i = -1
+ for i in range(len(s)):
+ if s.iat[i] == s.iat[i]:
+ if prev_i == -1:
+ w.iat[i] = 1.
+ else:
+ w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
+ sum_wts += w.iat[i]
+ prev_i = i
+ return w
+
+ def _variance_debiasing_factors(s, com, adjust, ignore_na):
+ weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
+ cum_sum = weights.cumsum().fillna(method='ffill')
+ cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
+ numerator = cum_sum * cum_sum
+ denominator = numerator - cum_sum_sq
+ denominator[denominator <= 0.] = np.nan
+ return numerator / denominator
+
+ def _ewma(s, com, min_periods, adjust, ignore_na):
+ weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
+ result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
+ result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
+ return result
+
+ com = 3.
+ for min_periods in [0, 1, 2, 3, 4]:
+ for adjust in [True, False]:
+ for ignore_na in [False, True]:
+ # test consistency between different ewm* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=mom.expanding_count,
+ mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
+ mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
+ corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
+ var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
+ std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
+ cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
+ var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
+ std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
+ cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
+ var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
+
+ def test_expanding_consistency(self):
+ for min_periods in [0, 1, 2, 3, 4]:
+
+ # test consistency between different expanding_* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=mom.expanding_count,
+ mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
+ mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
+ corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
+ var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
+ std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
+ cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
+ var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
+ std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
+ cov_biased=None,
+ var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
+ )
+
+ # test consistency between expanding_xyz() and expanding_apply of Series/DataFrame.xyz()
+ for x in self._test_data():
+ assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
+ for (expanding_f, f, require_min_periods) in [
+ (mom.expanding_count, lambda v: Series(v).count(), None),
+ (mom.expanding_max, lambda v: Series(v).max(), None),
+ (mom.expanding_min, lambda v: Series(v).min(), None),
+ (mom.expanding_sum, lambda v: Series(v).sum(), None),
+ (mom.expanding_mean, lambda v: Series(v).mean(), None),
+ (mom.expanding_std, lambda v: Series(v).std(), 1),
+ (mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
+ (mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
+ (mom.expanding_var, lambda v: Series(v).var(), 1),
+ #(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
+ #(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
+ #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
+ # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
+ (mom.expanding_median, lambda v: Series(v).median(), None),
+ ]:
+ if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
+ continue
+
+ if expanding_f is mom.expanding_count:
+ expanding_f_result = expanding_f(x)
+ expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
+ else:
+ if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
+ expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
+ else:
+ expanding_f_result = expanding_f(x, min_periods=min_periods)
+ expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
+ assert_equal(expanding_f_result, expanding_apply_f_result)
+
+ if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
+ # test pairwise=True
+ expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
+ expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
+ for i, _ in enumerate(x.columns):
+ for j, _ in enumerate(x.columns):
+ expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
+ assert_panel_equal(expanding_f_result, expected)
+
+ def test_rolling_consistency(self):
+ for window in [1, 3, 10, 20]:
+ for min_periods in set([0, 1, 2, 3, 4, window]):
+ if min_periods and (min_periods > window):
+ continue
+ for center in [False, True]:
+
+ # test consistency between different rolling_* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=lambda x: mom.rolling_count(x, window=window, center=center),
+ mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
+ mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
+ mom.rolling_count(x, window=window, center=center)),
+ corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
+ var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
+ std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
+ cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
+ var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
+ std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
+ cov_biased=None,
+ var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
+ (mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
+ )
+
+ # test consistency between rolling_xyz and rolling_apply of Series/DataFrame.xyz
+ for x in self._test_data():
+ assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
+ for (rolling_f, f, require_min_periods) in [
+ (mom.rolling_count, lambda v: Series(v).count(), None),
+ (mom.rolling_max, lambda v: Series(v).max(), None),
+ (mom.rolling_min, lambda v: Series(v).min(), None),
+ (mom.rolling_sum, lambda v: Series(v).sum(), None),
+ (mom.rolling_mean, lambda v: Series(v).mean(), None),
+ (mom.rolling_std, lambda v: Series(v).std(), 1),
+ (mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
+ (mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
+ (mom.rolling_var, lambda v: Series(v).var(), 1),
+ #(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
+ # (mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
+ #(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
+ # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
+ (mom.rolling_median, lambda v: Series(v).median(), None),
+ ]:
+ if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
+ continue
+
+ if rolling_f is mom.rolling_count:
+ rolling_f_result = rolling_f(x, window=window, center=center)
+ rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
+ min_periods=0, center=center)
+ else:
+ if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
+ rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
+ else:
+ rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
+ rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
+ min_periods=min_periods, center=center)
+ assert_equal(rolling_f_result, rolling_apply_f_result)
+
+ if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
+ # test pairwise=True
+ rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
+ center=center, pairwise=True)
+ expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
+ for i, _ in enumerate(x.columns):
+ for j, _ in enumerate(x.columns):
+ expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
+ window=window, min_periods=min_periods, center=center)
+ assert_panel_equal(rolling_f_result, expected)
+
# binary moments
def test_rolling_cov(self):
A = self.series
@@ -786,14 +1124,9 @@ def _check_binary_ew(self, func):
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
- # binary functions (ewmcov, ewmcorr) *should* require at least two values
- if (func == mom.ewmcov) and (min_periods <= 1):
- # currenty ewmcov requires only one value, for some reason.
- self.assertTrue(np.isnan(result.values[:10]).all())
- self.assertFalse(np.isnan(result.values[10:]).any())
- else:
- self.assertTrue(np.isnan(result.values[:11]).all())
- self.assertFalse(np.isnan(result.values[11:]).any())
+ # binary functions (ewmcov, ewmcorr) with bias=False require at least two values
+ self.assertTrue(np.isnan(result.values[:11]).all())
+ self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
@@ -801,11 +1134,7 @@ def _check_binary_ew(self, func):
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
- if (func == mom.ewmcov) and (min_periods <= 1):
- # currenty ewmcov requires only one value, for some reason.
- assert_series_equal(result, Series([0.]))
- else:
- assert_series_equal(result, Series([np.NaN]))
+ assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
| Closes https://github.com/pydata/pandas/issues/7911.
Closes https://github.com/pydata/pandas/issues/7912.
Closes https://github.com/pydata/pandas/issues/7977.
Closes https://github.com/pydata/pandas/issues/7900.
These are all fixed simultaneously in order to enable consistency testing across the various functions.
Enhancement -- add `adjust` to `ewm*()` (https://github.com/pydata/pandas/issues/7911):
This code adds `adjust` arguments to `ewmvar`, `ewmstd`, `ewmvol`, `ewmcov`, and `ewmcorr` (`ewma` already had it). If ok, I'd like to reorder the parameters to be more logical and consistent, but for now I just added `adjust=True` to the end of the arguments list.
Bug fix -- `ewmvar/std/cov` debiasing factors (https://github.com/pydata/pandas/issues/7912):
This code corrects the bias correction factor for exponentially weighted (co)variance calculations. The prior factor was the asymptotic value for an infinite number of observations.
As a result of this change, `ewmvar/cov` will now return `NaN` for the (co)variance of a single value when `bias=False`, and `0` when `bias=True`. Previously they always returned `0` for a single value. See also https://github.com/pydata/pandas/issues/7900.
API -- meaning of `min_periods` (https://github.com/pydata/pandas/issues/7977):
The meaning of the `min_periods` argument to the `ewm*()` functions is now consistent with that of the `expanding_*()` functions, i.e. an entry will be `NaN` if the count of non-`NaN` input values through that point is `<min_periods`. Note that in view of this change, https://github.com/pydata/pandas/issues/7884 / https://github.com/pydata/pandas/pull/7898 is no longer relevant.
Bug fix -- `rolling/expanding_var/std` for a single value (https://github.com/pydata/pandas/issues/7900)
| https://api.github.com/repos/pandas-dev/pandas/pulls/7926 | 2014-08-04T16:34:07Z | 2014-09-10T00:02:26Z | 2014-09-10T00:02:26Z | 2014-09-10T00:11:00Z |
BUG/DOC: Categorical fixes (GH7918) | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index d8b8168e05d8b..c08351eb87a79 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -509,35 +509,7 @@ The same applies to ``df.append(df)``.
Getting Data In/Out
-------------------
-Writing data (`Series`, `Frames`) to a HDF store and reading it in entirety works. Querying the HDF
-store does not yet work.
-
-.. ipython:: python
- :suppress:
-
- hdf_file = "test.h5"
-
-.. ipython:: python
-
- hdf_file = "test.h5"
- s = pd.Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c'], levels=['a','b','c','d']))
- df = pd.DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
- df.to_hdf(hdf_file, "frame")
- df2 = pd.read_hdf(hdf_file, "frame")
- df2
- try:
- pd.read_hdf(hdf_file, "frame", where = ['index>2'])
- except TypeError as e:
- print("TypeError: " + str(e))
-
-.. ipython:: python
- :suppress:
-
- try:
- os.remove(hdf_file)
- except:
- pass
-
+Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype will currently raise ``NotImplementedError``.
Writing to a CSV file will convert the data, effectively removing any information about the
`Categorical` (levels and ordering). So if you read back the CSV file you have to convert the
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 024ee68ced303..58d43ab40e610 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -137,7 +137,7 @@ Categoricals in Series/DataFrame
:class:`~pandas.Categorical` can now be included in `Series` and `DataFrames` and gained new
methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`,
-:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`).
+:issue:`7444`, :issue:`7839`, :issue:`7848`, :issue:`7864`, :issue:`7914`).
For full docs, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`.
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 23ba06938825d..f5cb48fd94022 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -82,6 +82,11 @@ def _consolidate_key(self):
def _is_single_block(self):
return self.ndim == 1
+ @property
+ def is_view(self):
+ """ return a boolean if I am possibly a view """
+ return self.values.base is not None
+
@property
def is_datelike(self):
""" return True if I am a non-datelike """
@@ -1558,6 +1563,11 @@ def __init__(self, values, placement,
fastpath=True, placement=placement,
**kwargs)
+ @property
+ def is_view(self):
+ """ I am never a view """
+ return False
+
def to_dense(self):
return self.values.to_dense().view()
@@ -2522,7 +2532,7 @@ def is_datelike_mixed_type(self):
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
- return self.blocks[0].values.base is not None
+ return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c130ed4fc52ba..b95c1ed0b77e9 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1782,7 +1782,7 @@ def set_atom(self, block, block_items, existing_col, min_itemsize,
"[unicode] is not implemented as a table column")
elif dtype == 'category':
- raise NotImplementedError
+ raise NotImplementedError("cannot store a category dtype")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
@@ -2420,6 +2420,9 @@ def write_array(self, key, value, items=None):
empty_array = self._is_empty_array(value.shape)
transposed = False
+ if com.is_categorical_dtype(value):
+ raise NotImplementedError("cannot store a category dtype")
+
if not empty_array:
value = value.T
transposed = True
@@ -3451,10 +3454,10 @@ def read_column(self, column, where=None, start=None, stop=None, **kwargs):
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
- return Series(_set_tz(a.convert(c[start:stop],
+ return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
- ).take_data(),
+ ).take_data(),
a.tz, True))
raise KeyError("column [%s] not found in the table" % column)
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index f08f7a7f16841..024415409cdca 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -4318,7 +4318,7 @@ def test_tseries_select_index_column(self):
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
+
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
@@ -4327,7 +4327,7 @@ def test_tseries_select_index_column(self):
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
-
+
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
@@ -4398,13 +4398,15 @@ def test_categorical(self):
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], levels=['a','b','c','d']))
- self.assertRaises(NotImplementedError, store.append, 's', s, format='table')
+ self.assertRaises(NotImplementedError, store.put, 's_fixed', s, format='fixed')
+ self.assertRaises(NotImplementedError, store.append, 's_table', s, format='table')
#store.append('s', s, format='table')
#result = store.select('s')
#tm.assert_series_equal(s, result)
df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
- self.assertRaises(NotImplementedError, store.append, 'df', df, format='table')
+ self.assertRaises(NotImplementedError, store.put, 'df_fixed', df, format='fixed')
+ self.assertRaises(NotImplementedError, store.append, 'df_table', df, format='table')
#store.append('df', df, format='table')
#result = store.select('df')
#tm.assert_frame_equal(df, df2)
@@ -4413,17 +4415,17 @@ def test_categorical(self):
# FIXME: TypeError: cannot pass a where specification when reading from a Fixed format store. this store must be selected in its entirety
#result = store.select('df', where = ['index>2'])
#tm.assert_frame_equal(df[df.index>2],result)
-
+
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
-
+
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed')
-
+
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
-
+
def _test_sort(obj):
if isinstance(obj, DataFrame):
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 642912805d06d..421e05f5a3bc7 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1185,6 +1185,30 @@ def test_slicing_and_getting_ops(self):
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
+ def test_slicing_doc_examples(self):
+
+ #GH 7918
+ cats = Categorical(["a","b","b","b","c","c","c"], levels=["a","b","c"])
+ idx = Index(["h","i","j","k","l","m","n",])
+ values= [1,2,2,2,3,4,5]
+ df = DataFrame({"cats":cats,"values":values}, index=idx)
+
+ result = df.iloc[2:4,:]
+ expected = DataFrame({"cats":Categorical(['b','b'],levels=['a','b','c']),"values":[2,2]}, index=['j','k'])
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[2:4,:].dtypes
+ expected = Series(['category','int64'],['cats','values'])
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc["h":"j","cats"]
+ expected = Series(Categorical(['a','b','b'],levels=['a','b','c']),index=['h','i','j'])
+ tm.assert_series_equal(result, expected)
+
+ result = df.ix["h":"j",0:1]
+ expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],levels=['a','b','c']),index=['h','i','j']) })
+ tm.assert_frame_equal(result, expected)
+
def test_assigning_ops(self):
# systematically test the assigning operations:
| Categoricals now raise NotImplementedError when writing to HDFStore with a Fixed type store
Slicing bug with a single-dtyped category and a possible view
closes #7918
| https://api.github.com/repos/pandas-dev/pandas/pulls/7924 | 2014-08-04T15:01:57Z | 2014-08-04T20:29:34Z | 2014-08-04T20:29:34Z | 2014-08-04T20:29:34Z |
BUG: Fix Grouper with multi-level index and frequency (GH7885) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index aa6d1dff2c547..f73c080b6e71d 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -480,7 +480,7 @@ Enhancements
-
+- Bug in ``DataFrame.groupby`` where ``Grouper`` does not recognize level when frequency is specified (:issue:`7885`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 41ff6a6964841..afebdb306c987 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -248,7 +248,7 @@ def _set_grouper(self, obj, sort=False):
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
- ax = Index(obj[key],name=key)
+ ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
@@ -258,18 +258,12 @@ def _set_grouper(self, obj, sort=False):
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
-
- if isinstance(level, compat.string_types):
- if obj.index.name != level:
- raise ValueError('level name %s is not the name of the '
- 'index' % level)
- elif level > 0:
- raise ValueError('level > 0 only valid with MultiIndex')
- ax = Index(ax.get_level_values(level), name=level)
+ level = ax._get_level_number(level)
+ ax = Index(ax.get_level_values(level), name=ax.names[level])
else:
- if not (level == 0 or level == ax.name):
- raise ValueError("The grouper level {0} is not valid".format(level))
+ if level not in (0, ax.name):
+ raise ValueError("The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 84aaed8194013..b44b948c9702c 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -351,6 +351,24 @@ def test_grouper_index_types(self):
df.index = list(reversed(df.index.tolist()))
df.groupby(list('abcde')).apply(lambda x: x)
+ def test_grouper_multilevel_freq(self):
+ # GH 7885
+ from datetime import date, timedelta
+ d0 = date.today() - timedelta(days=14)
+ dates = date_range(d0, date.today())
+ date_index = pd.MultiIndex.from_product([dates, dates], names=['foo', 'bar'])
+ df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)
+ # Check string level
+ expected = df.reset_index().groupby([pd.Grouper(key='foo', freq='W'),
+ pd.Grouper(key='bar', freq='W')]).sum()
+ result = df.groupby([pd.Grouper(level='foo', freq='W'),
+ pd.Grouper(level='bar', freq='W')]).sum()
+ assert_frame_equal(result, expected)
+ # Check integer level
+ result = df.groupby([pd.Grouper(level=0, freq='W'),
+ pd.Grouper(level=1, freq='W')]).sum()
+ assert_frame_equal(result, expected)
+
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
| close #7885
| https://api.github.com/repos/pandas-dev/pandas/pulls/7923 | 2014-08-04T15:00:30Z | 2014-09-07T14:30:53Z | 2014-09-07T14:30:53Z | 2014-09-08T04:26:23Z |
REGR: Regression in multi-index indexing with a non-scalar type object (GH7914) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 109ed8b286c22..315bd34de8815 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -296,7 +296,7 @@ Bug Fixes
- Bug in adding and subtracting ``PeriodIndex`` with ``PeriodIndex`` raise ``TypeError`` (:issue:`7741`)
- Bug in ``combine_first`` with ``PeriodIndex`` data raises ``TypeError`` (:issue:`3367`)
- Bug in multi-index slicing with missing indexers (:issue:`7866`)
-
+- Regression in multi-index indexing with a non-scalar type object (:issue:`7914`)
- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 367a283958051..b02fe523df998 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -838,7 +838,7 @@ def _getitem_nested_tuple(self, tup):
axis += 1
# if we have a scalar, we are done
- if np.isscalar(obj):
+ if np.isscalar(obj) or not hasattr(obj,'ndim'):
break
# has the dim of the obj changed?
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 234056e553ec3..b8f51d0ca9950 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1974,6 +1974,15 @@ def f():
result = s.loc[idx[:,['foo','bah']]]
assert_series_equal(result,expected)
+ # regression from < 0.14.0
+ # GH 7914
+ df = DataFrame([[np.mean, np.median],['mean','median']],
+ columns=MultiIndex.from_tuples([('functs','mean'),
+ ('functs','median')]),
+ index=['function', 'name'])
+ result = df.loc['function',('functs','mean')]
+ self.assertEqual(result,np.mean)
+
def test_setitem_dtype_upcast(self):
# GH3216
| closes #7914
| https://api.github.com/repos/pandas-dev/pandas/pulls/7921 | 2014-08-04T13:40:12Z | 2014-08-04T15:17:33Z | 2014-08-04T15:17:33Z | 2014-08-04T15:17:33Z |
DOC: Fix release note for GH7798 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 109ed8b286c22..c3311be66e310 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -233,8 +233,7 @@ Enhancements
-- Bug in ``tslib.tz_convert`` and ``tslib.tz_convert_single`` may return different results (:issue:`7798`)
-- Bug in ``DatetimeIndex.intersection`` of non-overlapping timestamps with tz raises ``IndexError`` (:issue:`7880`)
+
@@ -353,7 +352,8 @@ Bug Fixes
-
+- Bug in ``tslib.tz_convert`` and ``tslib.tz_convert_single`` may return different results (:issue:`7798`)
+- Bug in ``DatetimeIndex.intersection`` of non-overlapping timestamps with tz raises ``IndexError`` (:issue:`7880`)
| #7798 was added to incorrect section.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7920 | 2014-08-04T13:16:01Z | 2014-08-04T14:10:46Z | 2014-08-04T14:10:46Z | 2014-08-07T22:13:28Z |
DOC: small fixes categorical docs | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 32c0a78e394c5..93933140ab11c 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1574,8 +1574,8 @@ dtypes:
'float64': np.arange(4.0, 7.0),
'bool1': [True, False, True],
'bool2': [False, True, False],
- 'dates': pd.date_range('now', periods=3).values}),
- 'category': pd.Categorical(list("ABC))
+ 'dates': pd.date_range('now', periods=3).values,
+ 'category': pd.Categorical(list("ABC"))})
df['tdeltas'] = df.dates.diff()
df['uint64'] = np.arange(3, 6).astype('u8')
df['other_dates'] = pd.date_range('20130101', periods=3).values
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index c758dde16837b..d8b8168e05d8b 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -228,11 +228,11 @@ Appending levels can be done by assigning a levels list longer than the current
Adding levels in other positions can be done with ``.reorder_levels(<levels_including_new>)``.
Removing a level is also possible, but only the last level(s) can be removed by assigning a
-shorter list than current levels. Values which are omitted are replaced by `np.nan`.
+shorter list than current levels. Values which are omitted are replaced by ``np.nan``.
.. ipython:: python
- s.levels = [1,2]
+ s.cat.levels = [1,2]
s
.. note::
@@ -322,7 +322,7 @@ old levels:
.. ipython:: python
s3 = pd.Series(pd.Categorical(["a","b","d"]))
- s3.cat.reorder_levels(["a","b","c",d"])
+ s3.cat.reorder_levels(["a","b","c","d"])
s3
| This fixes some doc build errors introduced by the categorical docs.
There were also some other failures, but I will open an issue for those (don't directly see the solution for them).
| https://api.github.com/repos/pandas-dev/pandas/pulls/7917 | 2014-08-04T07:57:10Z | 2014-08-04T14:11:01Z | 2014-08-04T14:11:01Z | 2014-08-04T14:11:01Z |
WIP: Experimental changes in `rolling_var` related to #7900 | diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 1c1d32e1d2a20..22d6a5ba9af7b 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1160,75 +1160,68 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1):
"""
Numerically stable implementation using Welford's method.
"""
- cdef double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta
- cdef Py_ssize_t i
+ cdef double val, prev, mean_x = 0, ssqdm_x = 0, delta, rep = NaN
+ cdef Py_ssize_t nobs = 0, nrep = 0, i
cdef Py_ssize_t N = len(input)
cdef ndarray[double_t] output = np.empty(N, dtype=float)
minp = _check_minp(win, minp, N)
- # Check for windows larger than array, addresses #7297
- win = min(win, N)
-
- # Over the first window, observations can only be added, never removed
- for i from 0 <= i < win:
+ for i from 0 <= i < N:
val = input[i]
+ prev = NaN if i < win else input[i - win]
+
+ # First, count the number of observations and consecutive repeats
+ if prev == prev:
+ # prev is not NaN, removing an observation...
+ if nobs == nrep:
+ # ...and removing a repeat
+ nrep -= 1
+ if nrep == 0:
+ rep = NaN
+ nobs -= 1
- # Not NaN
if val == val:
- nobs += 1
- delta = (val - mean_x)
- mean_x += delta / nobs
- ssqdm_x += delta * (val - mean_x)
-
- if nobs >= minp:
- #pathological case
- if nobs == 1:
- val = 0
+ # next is not NaN, adding an observation...
+ if val == prev:
+ # ...and adding a repeat
+ nrep += 1
else:
- val = ssqdm_x / (nobs - ddof)
- if val < 0:
- val = 0
- else:
- val = NaN
-
- output[i] = val
-
- # After the first window, observations can both be added and removed
- for i from win <= i < N:
- val = input[i]
- prev = input[i - win]
+ # ...and resetting repeats
+ nrep = 1
+ rep = val
+ nobs += 1
- if val == val:
+ # Then, compute the new mean and sum of squared differences
+ if nobs == nrep:
+ # All non-NaN values in window are identical...
+ ssqdm_x = 0
+ mean_x = rep if nobs > 0 else 0
+ elif val == val:
+ # Adding one observation...
if prev == prev:
- # Adding one observation and removing another one
+ # ...and removing another
delta = val - prev
prev -= mean_x
mean_x += delta / nobs
val -= mean_x
ssqdm_x += (val + prev) * delta
else:
- # Adding one observation and not removing any
- nobs += 1
+ # ...and not removing any
delta = (val - mean_x)
mean_x += delta / nobs
ssqdm_x += delta * (val - mean_x)
elif prev == prev:
# Adding no new observation, but removing one
- nobs -= 1
- if nobs:
- delta = (prev - mean_x)
- mean_x -= delta / nobs
- ssqdm_x -= delta * (prev - mean_x)
- else:
- mean_x = 0
- ssqdm_x = 0
+ delta = (prev - mean_x)
+ mean_x -= delta / nobs
+ ssqdm_x -= delta * (prev - mean_x)
# Variance is unchanged if no observation is added or removed
+ # Finally, compute and write the rolling variance to the output array
if nobs >= minp:
- #pathological case
- if nobs == 1:
+ if nobs <= ddof:
val = 0
else:
val = ssqdm_x / (nobs - ddof)
| Added logic to `rolling_var` to detect windows where all non-NaN
values are identical.
Need to assess both correctness and performance impact.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7916 | 2014-08-04T05:47:44Z | 2014-09-15T06:24:01Z | null | 2014-09-16T17:31:21Z |
ENH: New `level` argument for DataFrame.tz_localize and DataFrame.tz_convert (GH7846) | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 2520015581cc8..a9266c24df8ee 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -162,6 +162,9 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
didx
didx.tz_localize(None)
+- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
+ for localizing a specific level of a MultiIndex (:issue:`7846`)
+
.. _whatsnew_0150.refactoring:
Internal Refactoring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2815f05ce313b..90c3fa207e3bb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3467,7 +3467,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True):
return result
- def tz_convert(self, tz, axis=0, copy=True):
+ def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert the axis to target time zone. If it is time zone naive, it
will be localized to the passed time zone.
@@ -3475,6 +3475,10 @@ def tz_convert(self, tz, axis=0, copy=True):
Parameters
----------
tz : string or pytz.timezone object
+ axis : the axis to convert
+ level : int, str, default None
+ If axis ia a MultiIndex, convert a specific level. Otherwise
+ must be None
copy : boolean, default True
Also make a copy of the underlying data
@@ -3484,27 +3488,44 @@ def tz_convert(self, tz, axis=0, copy=True):
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
- if not hasattr(ax, 'tz_convert'):
- if len(ax) > 0:
- ax_name = self._get_axis_name(axis)
- raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
- ax_name)
+ def _tz_convert(ax, tz):
+ if not hasattr(ax, 'tz_convert'):
+ if len(ax) > 0:
+ ax_name = self._get_axis_name(axis)
+ raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
+ ax_name)
+ else:
+ ax = DatetimeIndex([],tz=tz)
else:
- ax = DatetimeIndex([],tz=tz)
+ ax = ax.tz_convert(tz)
+ return ax
+
+ # if a level is given it must be a MultiIndex level or
+ # equivalent to the axis name
+ if isinstance(ax, MultiIndex):
+ level = ax._get_level_number(level)
+ new_level = _tz_convert(ax.levels[level], tz)
+ ax = ax.set_levels(new_level, level=level)
else:
- ax = ax.tz_convert(tz)
+ if level not in (None, 0, ax.name):
+ raise ValueError("The level {0} is not valid".format(level))
+ ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
return result.__finalize__(self)
- def tz_localize(self, tz, axis=0, copy=True, infer_dst=False):
+ def tz_localize(self, tz, axis=0, level=None, copy=True, infer_dst=False):
"""
Localize tz-naive TimeSeries to target time zone
Parameters
----------
tz : string or pytz.timezone object
+ axis : the axis to localize
+ level : int, str, default None
+ If axis ia a MultiIndex, localize a specific level. Otherwise
+ must be None
copy : boolean, default True
Also make a copy of the underlying data
infer_dst : boolean, default False
@@ -3516,15 +3537,28 @@ def tz_localize(self, tz, axis=0, copy=True, infer_dst=False):
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
- if not hasattr(ax, 'tz_localize'):
- if len(ax) > 0:
- ax_name = self._get_axis_name(axis)
- raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
- ax_name)
+ def _tz_localize(ax, tz, infer_dst):
+ if not hasattr(ax, 'tz_localize'):
+ if len(ax) > 0:
+ ax_name = self._get_axis_name(axis)
+ raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
+ ax_name)
+ else:
+ ax = DatetimeIndex([],tz=tz)
else:
- ax = DatetimeIndex([],tz=tz)
+ ax = ax.tz_localize(tz, infer_dst=infer_dst)
+ return ax
+
+ # if a level is given it must be a MultiIndex level or
+ # equivalent to the axis name
+ if isinstance(ax, MultiIndex):
+ level = ax._get_level_number(level)
+ new_level = _tz_localize(ax.levels[level], tz, infer_dst)
+ ax = ax.set_levels(new_level, level=level)
else:
- ax = ax.tz_localize(tz, infer_dst=infer_dst)
+ if level not in (None, 0, ax.name):
+ raise ValueError("The level {0} is not valid".format(level))
+ ax = _tz_localize(ax, tz, infer_dst)
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 044d4054755ba..c607ccc3572b2 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -7,7 +7,7 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, Panel,
- isnull, notnull,date_range)
+ isnull, notnull, date_range, period_range)
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
@@ -1102,6 +1102,80 @@ def finalize(self, other, method=None, **kwargs):
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
+ def test_tz_convert_and_localize(self):
+ l0 = date_range('20140701', periods=5, freq='D')
+
+ # TODO: l1 should be a PeriodIndex for testing
+ # after GH2106 is addressed
+ with tm.assertRaises(NotImplementedError):
+ period_range('20140701', periods=1).tz_convert('UTC')
+ with tm.assertRaises(NotImplementedError):
+ period_range('20140701', periods=1).tz_localize('UTC')
+ # l1 = period_range('20140701', periods=5, freq='D')
+ l1 = date_range('20140701', periods=5, freq='D')
+
+ int_idx = Index(range(5))
+
+ for fn in ['tz_localize', 'tz_convert']:
+
+ if fn == 'tz_convert':
+ l0 = l0.tz_localize('UTC')
+ l1 = l1.tz_localize('UTC')
+
+ for idx in [l0, l1]:
+
+ l0_expected = getattr(idx, fn)('US/Pacific')
+ l1_expected = getattr(idx, fn)('US/Pacific')
+
+ df1 = DataFrame(np.ones(5), index=l0)
+ df1 = getattr(df1, fn)('US/Pacific')
+ self.assertTrue(df1.index.equals(l0_expected))
+
+ # MultiIndex
+ # GH7846
+ df2 = DataFrame(np.ones(5),
+ MultiIndex.from_arrays([l0, l1]))
+
+ df3 = getattr(df2, fn)('US/Pacific', level=0)
+ self.assertFalse(df3.index.levels[0].equals(l0))
+ self.assertTrue(df3.index.levels[0].equals(l0_expected))
+ self.assertTrue(df3.index.levels[1].equals(l1))
+ self.assertFalse(df3.index.levels[1].equals(l1_expected))
+
+ df3 = getattr(df2, fn)('US/Pacific', level=1)
+ self.assertTrue(df3.index.levels[0].equals(l0))
+ self.assertFalse(df3.index.levels[0].equals(l0_expected))
+ self.assertTrue(df3.index.levels[1].equals(l1_expected))
+ self.assertFalse(df3.index.levels[1].equals(l1))
+
+ df4 = DataFrame(np.ones(5),
+ MultiIndex.from_arrays([int_idx, l0]))
+
+ df5 = getattr(df4, fn)('US/Pacific', level=1)
+ self.assertTrue(df3.index.levels[0].equals(l0))
+ self.assertFalse(df3.index.levels[0].equals(l0_expected))
+ self.assertTrue(df3.index.levels[1].equals(l1_expected))
+ self.assertFalse(df3.index.levels[1].equals(l1))
+
+ # Bad Inputs
+ for fn in ['tz_localize', 'tz_convert']:
+ # Not DatetimeIndex / PeriodIndex
+ with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
+ df = DataFrame(index=int_idx)
+ df = getattr(df, fn)('US/Pacific')
+
+ # Not DatetimeIndex / PeriodIndex
+ with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
+ df = DataFrame(np.ones(5),
+ MultiIndex.from_arrays([int_idx, l0]))
+ df = getattr(df, fn)('US/Pacific', level=0)
+
+ # Invalid level
+ with tm.assertRaisesRegexp(ValueError, 'not valid'):
+ df = DataFrame(index=l0)
+ df = getattr(df, fn)('US/Pacific', level=1)
+
+
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index ddd1ee34f0798..e80fdf28c4089 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1174,6 +1174,52 @@ def __setstate__(self, state):
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
+
+ def tz_convert(self, tz):
+ """
+ Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
+
+ Parameters
+ ----------
+ tz : string, pytz.timezone, dateutil.tz.tzfile or None
+ Time zone for time. Corresponding timestamps would be converted to
+ time zone of the TimeSeries.
+ None will remove timezone holding UTC time.
+
+ Returns
+ -------
+ normalized : DatetimeIndex
+
+ Note
+ ----
+ Not currently implemented for PeriodIndex
+ """
+ raise NotImplementedError("Not yet implemented for PeriodIndex")
+
+ def tz_localize(self, tz, infer_dst=False):
+ """
+ Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
+ or remove timezone from tz-aware DatetimeIndex
+
+ Parameters
+ ----------
+ tz : string, pytz.timezone, dateutil.tz.tzfile or None
+ Time zone for time. Corresponding timestamps would be converted to
+ time zone of the TimeSeries.
+ None will remove timezone holding local time.
+ infer_dst : boolean, default False
+ Attempt to infer fall dst-transition hours based on order
+
+ Returns
+ -------
+ localized : DatetimeIndex
+
+ Note
+ ----
+ Not currently implemented for PeriodIndex
+ """
+ raise NotImplementedError("Not yet implemented for PeriodIndex")
+
PeriodIndex._add_numeric_methods_disabled()
def _get_ordinal_range(start, end, periods, freq):
| Closes #7846
New `level` argument for `DataFrame.tz_localize()` and `DataFrame.tz_convert()`, needed for a DataFrame with MultiIndex:
```
tz_convert(self, tz, axis=0, level=None, copy=True)
tz_localize(self, tz, axis=0, level=None, copy=True, infer_dst=False)
```
@jreback Not sure if `test_generic.py` is the right place to add the tests.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7915 | 2014-08-04T05:34:58Z | 2014-08-07T20:47:32Z | 2014-08-07T20:47:32Z | 2014-08-07T20:47:42Z |
update to holiday to help with GH#7070 | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 148cf85d0b5ab..8ab66ebd2de18 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -299,6 +299,8 @@ Enhancements
- ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`)
+-``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue: `7070`)
+-``pandas.tseries.holiday.Holiday`` now supports a list of offsets in Python3 (:issue: `7070`)
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 6291be340d651..f42ad174b8f0f 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -2,6 +2,7 @@
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
+from pandas.tseries.offsets import Easter, Day
def next_monday(dt):
"""
@@ -46,6 +47,20 @@ def sunday_to_monday(dt):
return dt + timedelta(1)
return dt
+
+def weekend_to_monday(dt):
+ """
+ If holiday falls on Sunday or Saturday,
+ use day thereafter (Monday) instead.
+ Needed for holidays such as Christmas observation in Europe
+ """
+ if dt.weekday() == 6:
+ return dt + timedelta(1)
+ elif dt.weekday() == 5:
+ return dt + timedelta(2)
+ return dt
+
+
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
@@ -57,6 +72,44 @@ def nearest_workday(dt):
return dt + timedelta(1)
return dt
+
+def next_workday(dt):
+ """
+ returns next weekday used for observances
+ """
+ dt += timedelta(days=1)
+ while dt.weekday() > 4:
+ # Mon-Fri are 0-4
+ dt += timedelta(days=1)
+ return dt
+
+
+def previous_workday(dt):
+ """
+ returns previous weekday used for observances
+ """
+ dt -= timedelta(days=1)
+ while dt.weekday() > 4:
+ # Mon-Fri are 0-4
+ dt -= timedelta(days=1)
+ return dt
+
+
+def before_nearest_workday(dt):
+ """
+ returns previous workday after nearest workday
+ """
+ return previous_workday(nearest_workday(dt))
+
+
+def after_nearest_workday(dt):
+ """
+ returns next workday after nearest workday
+ needed for Boxing day or multiple holidays in a series
+ """
+ return next_workday(nearest_workday(dt))
+
+
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
@@ -64,6 +117,17 @@ class Holiday(object):
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None):
+ """
+ Parameters
+ ----------
+ name : str
+ Name of the holiday , defaults to class name
+ offset : array of pandas.tseries.offsets or
+ class from pandas.tseries.offsets
+ computes offset from date
+ observance: function
+ computes when holiday is given a pandas Timestamp
+ """
self.name = name
self.year = year
self.month = month
@@ -149,7 +213,7 @@ def _apply_rule(self, dates):
offsets = self.offset
for offset in offsets:
- dates = map(lambda d: d + offset, dates)
+ dates = list(map(lambda d: d + offset, dates))
return dates
@@ -330,6 +394,11 @@ def merge(self, other, inplace=False):
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
+GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
+
+EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
+
+
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
diff --git a/pandas/tseries/tests/test_holiday.py b/pandas/tseries/tests/test_holiday.py
index 0d5cc11bea7da..adc2c0d237265 100644
--- a/pandas/tseries/tests/test_holiday.py
+++ b/pandas/tseries/tests/test_holiday.py
@@ -6,7 +6,10 @@
nearest_workday, next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday, DateOffset,
MO, Timestamp, AbstractHolidayCalendar, get_calendar,
- HolidayCalendarFactory)
+ HolidayCalendarFactory, next_workday, previous_workday,
+ before_nearest_workday, EasterMonday, GoodFriday,
+ after_nearest_workday, weekend_to_monday)
+import nose
class TestCalendar(tm.TestCase):
@@ -69,6 +72,37 @@ def test_usmemorialday(self):
]
self.assertEqual(list(holidays), holidayList)
+ def test_easter(self):
+ holidays = EasterMonday.dates(self.start_date,
+ self.end_date)
+ holidayList = [Timestamp('2011-04-25 00:00:00'),
+ Timestamp('2012-04-09 00:00:00'),
+ Timestamp('2013-04-01 00:00:00'),
+ Timestamp('2014-04-21 00:00:00'),
+ Timestamp('2015-04-06 00:00:00'),
+ Timestamp('2016-03-28 00:00:00'),
+ Timestamp('2017-04-17 00:00:00'),
+ Timestamp('2018-04-02 00:00:00'),
+ Timestamp('2019-04-22 00:00:00'),
+ Timestamp('2020-04-13 00:00:00')]
+
+
+ self.assertEqual(list(holidays), holidayList)
+ holidays = GoodFriday.dates(self.start_date,
+ self.end_date)
+ holidayList = [Timestamp('2011-04-22 00:00:00'),
+ Timestamp('2012-04-06 00:00:00'),
+ Timestamp('2013-03-29 00:00:00'),
+ Timestamp('2014-04-18 00:00:00'),
+ Timestamp('2015-04-03 00:00:00'),
+ Timestamp('2016-03-25 00:00:00'),
+ Timestamp('2017-04-14 00:00:00'),
+ Timestamp('2018-03-30 00:00:00'),
+ Timestamp('2019-04-19 00:00:00'),
+ Timestamp('2020-04-10 00:00:00')]
+ self.assertEqual(list(holidays), holidayList)
+
+
def test_usthanksgivingday(self):
holidays = USThanksgivingDay.dates(self.start_date,
self.end_date)
@@ -166,3 +200,33 @@ def test_nearest_workday(self):
self.assertEqual(nearest_workday(self.su), self.mo)
self.assertEqual(nearest_workday(self.mo), self.mo)
+ def test_weekend_to_monday(self):
+ self.assertEqual(weekend_to_monday(self.sa), self.mo)
+ self.assertEqual(weekend_to_monday(self.su), self.mo)
+ self.assertEqual(weekend_to_monday(self.mo), self.mo)
+
+ def test_next_workday(self):
+ self.assertEqual(next_workday(self.sa), self.mo)
+ self.assertEqual(next_workday(self.su), self.mo)
+ self.assertEqual(next_workday(self.mo), self.tu)
+
+ def test_previous_workday(self):
+ self.assertEqual(previous_workday(self.sa), self.fr)
+ self.assertEqual(previous_workday(self.su), self.fr)
+ self.assertEqual(previous_workday(self.tu), self.mo)
+
+ def test_before_nearest_workday(self):
+ self.assertEqual(before_nearest_workday(self.sa), self.th)
+ self.assertEqual(before_nearest_workday(self.su), self.fr)
+ self.assertEqual(before_nearest_workday(self.tu), self.mo)
+
+ def test_after_nearest_workday(self):
+ self.assertEqual(after_nearest_workday(self.sa), self.mo)
+ self.assertEqual(after_nearest_workday(self.su), self.tu)
+ self.assertEqual(after_nearest_workday(self.fr), self.mo)
+
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
+
| I am working on a financial calendar and wanted to add the following functions to pandas.tseries.holiday in order to properly handle half days.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7913 | 2014-08-04T03:09:11Z | 2014-08-11T12:57:15Z | 2014-08-11T12:57:15Z | 2014-08-19T19:33:03Z |
added support for selecting multiple nth values | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index eaccbfddc1f86..fb1004edca785 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -869,7 +869,7 @@ This shows the first or last n rows from each group.
Taking the nth row of each group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-To select from a DataFrame or Series the nth item, use the nth method. This is a reduction method, and will return a single row (or no row) per group:
+To select from a DataFrame or Series the nth item, use the nth method. This is a reduction method, and will return a single row (or no row) per group if you pass an int for n:
.. ipython:: python
@@ -880,7 +880,7 @@ To select from a DataFrame or Series the nth item, use the nth method. This is a
g.nth(-1)
g.nth(1)
-If you want to select the nth not-null method, use the ``dropna`` kwarg. For a DataFrame this should be either ``'any'`` or ``'all'`` just like you would pass to dropna, for a Series this just needs to be truthy.
+If you want to select the nth not-null item, use the ``dropna`` kwarg. For a DataFrame this should be either ``'any'`` or ``'all'`` just like you would pass to dropna, for a Series this just needs to be truthy.
.. ipython:: python
@@ -904,6 +904,15 @@ As with other methods, passing ``as_index=False``, will achieve a filtration, wh
g.nth(0)
g.nth(-1)
+You can also select multiple rows from each group by specifying multiple nth values as a list of ints.
+
+.. ipython:: python
+
+ business_dates = date_range(start='4/1/2014', end='6/30/2014', freq='B')
+ df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ # get the first, 4th, and last date index for each month
+ df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
+
Enumerate group items
~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 8cfa0e25b789f..18a16b3262236 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -756,12 +756,21 @@ def ohlc(self):
def nth(self, n, dropna=None):
"""
- Take the nth row from each group.
+ Take the nth row from each group if n is an int, or a subset of rows
+ if n is a list of ints.
- If dropna, will not show nth non-null row, dropna is either
+ If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
+ Parameters
+ ----------
+ n : int or list of ints
+ a single nth value for the row or a list of nth values
+ dropna : None or str, optional
+ apply the specified dropna operation before counting which row is
+ the nth row. Needs to be None, 'any' or 'all'
+
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
@@ -789,19 +798,36 @@ def nth(self, n, dropna=None):
5 NaN
"""
+ if isinstance(n, int):
+ nth_values = [n]
+ elif isinstance(n, (set, list, tuple)):
+ nth_values = list(set(n))
+ if dropna is not None:
+ raise ValueError("dropna option with a list of nth values is not supported")
+ else:
+ raise TypeError("n needs to be an int or a list/set/tuple of ints")
+
+ m = self.grouper._max_groupsize
+ # filter out values that are outside [-m, m)
+ pos_nth_values = [i for i in nth_values if i >= 0 and i < m]
+ neg_nth_values = [i for i in nth_values if i < 0 and i >= -m]
self._set_selection_from_grouper()
if not dropna: # good choice
- m = self.grouper._max_groupsize
- if n >= m or n < -m:
+ if not pos_nth_values and not neg_nth_values:
+ # no valid nth values
return self._selected_obj.loc[[]]
+
rng = np.zeros(m, dtype=bool)
- if n >= 0:
- rng[n] = True
- is_nth = self._cumcount_array(rng)
- else:
- rng[- n - 1] = True
- is_nth = self._cumcount_array(rng, ascending=False)
+ for i in pos_nth_values:
+ rng[i] = True
+ is_nth = self._cumcount_array(rng)
+
+ if neg_nth_values:
+ rng = np.zeros(m, dtype=bool)
+ for i in neg_nth_values:
+ rng[- i - 1] = True
+ is_nth |= self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f958d5481ad33..4c9caecfb99ed 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -312,6 +312,30 @@ def test_nth(self):
expected = g.B.first()
assert_series_equal(result,expected)
+ # test multiple nth values
+ df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
+ columns=['A', 'B'])
+ g = df.groupby('A')
+
+ assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
+ assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
+ assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
+ assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
+ assert_frame_equal(g.nth([3, 4]), df.loc[[],['B']])
+
+ business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')
+ df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ # get the first, fourth and last two business days for each month
+ result = df.groupby((df.index.year, df.index.month)).nth([0, 3, -2, -1])
+ expected_dates = pd.to_datetime(['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30',
+ '2014/5/1', '2014/5/6', '2014/5/29', '2014/5/30',
+ '2014/6/2', '2014/6/5', '2014/6/27', '2014/6/30'])
+ expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
+ assert_frame_equal(result, expected)
+
def test_grouper_index_types(self):
# related GH5375
# groupby misbehaving when using a Floatlike index
| I'm not 100% sure this is the best way to implement this, but I think it'd be nice for GroupBy.nth() to support selecting multiple values. For instance if we have
```
df = DataFrame(1, index=pd.date_range(start='1/1/2013', end='6/30/2014', freq='B'), columns=['a', 'b'])
```
and currently we can easily get the nth entry for each month by:
```
df.groupby((df.index.year, df.index.month)).nth(0)
```
however we can't easily get, for instance, the first 5 entries for each group without having a more complex loop in the user code. If nth() accepts a list of integers we would be able to simply do
```
df.groupby((df.index.year, df.index.month)).nth(range(5))
```
This PR makes the above work when there's no dropna. I can't quite figure out the section where dropna is handled, and therefore I'm currently requiring that no dropna be passed when n is a list of values. I'm happy to improve this and also enhance the tests. Please let me know your feedback. Thanks.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7910 | 2014-08-03T21:20:30Z | 2014-09-04T00:42:46Z | 2014-09-04T00:42:46Z | 2015-04-29T15:33:46Z |
Remove from start/end dates if tz is not None (#7901, #7835) | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 05fd82b2f448d..9dd7845864e59 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1364,6 +1364,12 @@ tz-aware data to another time zone:
is localized using one version and operated on with a different version.
See :ref:`here<io.hdf5-notes>` for how to handle such a situation.
+.. warning::
+
+ It is incorrect to pass a timezone directly into the ``datetime.datetime`` constructor (e.g.,
+ ``datetime.datetime(2011, 1, 1, tz=timezone('US/Eastern'))``. Instead, the datetime
+ needs to be localized using the the localize method on the timezone.
+
Under the hood, all timestamps are stored in UTC. Scalar values from a
``DatetimeIndex`` with a time zone will have their fields (day, hour, minute)
localized to the time zone. However, timestamps with the same UTC value are
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 109ed8b286c22..2e3aeaf69957b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -358,7 +358,9 @@ Bug Fixes
- Bug in ``GroupBy.filter()`` where fast path vs. slow path made the filter
- return a non scalar value that appeared valid but wasnt' (:issue:`7870`).
+ return a non scalar value that appeared valid but wasn't (:issue:`7870`).
+- Bug in ``date_range()``/``DatetimeIndex()`` when the timezone was inferred from input dates yet incorrect
+ times were returned when crossing DST boundaries (:issue:`7835`, :issue:`7901`).
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 5f7c93d38653a..80f9e6cd8db7a 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -416,7 +416,7 @@ def _generate(cls, start, end, periods, name, offset,
else:
- if inferred_tz is None and tz is not None:
+ if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 81cf34bbc269b..7b0bfa98690e2 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -354,25 +354,51 @@ def test_range_bug(self):
def test_range_tz_pytz(self):
# GH 2906
tm._skip_if_no_pytz()
- from pytz import timezone as tz
+ from pytz import timezone
- start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern'))
- end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern'))
+ tz = timezone('US/Eastern')
+ start = tz.localize(datetime(2011, 1, 1))
+ end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
- self.assertEqual(dr.tz, tz('US/Eastern'))
+ self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
dr = date_range(end=end, periods=3)
- self.assertEqual(dr.tz, tz('US/Eastern'))
+ self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
dr = date_range(start=start, end=end)
- self.assertEqual(dr.tz, tz('US/Eastern'))
+ self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
+
+ def test_range_tz_dst_straddle_pytz(self):
+
+ tm._skip_if_no_pytz()
+ from pytz import timezone
+ tz = timezone('US/Eastern')
+ dates = [(tz.localize(datetime(2014, 3, 6)),
+ tz.localize(datetime(2014, 3, 12))),
+ (tz.localize(datetime(2013, 11, 1)),
+ tz.localize(datetime(2013, 11, 6)))]
+ for (start, end) in dates:
+ dr = date_range(start, end, freq='D')
+ self.assertEqual(dr[0], start)
+ self.assertEqual(dr[-1], end)
+ self.assertEqual(np.all(dr.hour==0), True)
+
+ dr = date_range(start, end, freq='D', tz='US/Eastern')
+ self.assertEqual(dr[0], start)
+ self.assertEqual(dr[-1], end)
+ self.assertEqual(np.all(dr.hour==0), True)
+
+ dr = date_range(start.replace(tzinfo=None), end.replace(tzinfo=None), freq='D', tz='US/Eastern')
+ self.assertEqual(dr[0], start)
+ self.assertEqual(dr[-1], end)
+ self.assertEqual(np.all(dr.hour==0), True)
def test_range_tz_dateutil(self):
# GH 2906
| Below fixes date_range when input dates are localized. In that case inferred_freq is not None and so the dates do not have their tzinfo removed. This causes a fixed offset to be applied when the range is created. If tzinfo is removed, they will be correctly localized.
Fixes #7901
Fixes #7835.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7909 | 2014-08-03T19:40:39Z | 2014-08-05T17:14:01Z | 2014-08-05T17:14:00Z | 2014-08-05T17:14:05Z |
BUG: Timestamp cannot parse nanosecond from string | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 148cf85d0b5ab..7029438c13d67 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -166,6 +166,8 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`)
- ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument
for localizing a specific level of a MultiIndex (:issue:`7846`)
+- ``Timestamp.__repr__`` displays ``dateutil.tz.tzoffset`` info (:issue:`7907`)
+
.. _whatsnew_0150.dt:
.dt accessor
@@ -443,7 +445,8 @@ Bug Fixes
- Bug in ``Series.str.cat`` with an index which was filtered as to not include the first item (:issue:`7857`)
-
+- Bug in ``Timestamp`` cannot parse ``nanosecond`` from string (:issue:`7878`)
+- Bug in ``Timestamp`` with string offset and ``tz`` results incorrect (:issue:`7833`)
- Bug in ``tslib.tz_convert`` and ``tslib.tz_convert_single`` may return different results (:issue:`7798`)
- Bug in ``DatetimeIndex.intersection`` of non-overlapping timestamps with tz raises ``IndexError`` (:issue:`7880`)
diff --git a/pandas/src/datetime.pxd b/pandas/src/datetime.pxd
index abd3bc3333adb..0896965162698 100644
--- a/pandas/src/datetime.pxd
+++ b/pandas/src/datetime.pxd
@@ -109,7 +109,8 @@ cdef extern from "datetime/np_datetime_strings.h":
int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
NPY_CASTING casting, pandas_datetimestruct *out,
- npy_bool *out_local, PANDAS_DATETIMEUNIT *out_bestunit,
+ int *out_local, int *out_tzoffset,
+ PANDAS_DATETIMEUNIT *out_bestunit,
npy_bool *out_special)
int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
@@ -123,7 +124,8 @@ cdef extern from "datetime/np_datetime_strings.h":
-cdef inline _string_to_dts(object val, pandas_datetimestruct* dts):
+cdef inline _string_to_dts(object val, pandas_datetimestruct* dts,
+ int* out_local, int* out_tzoffset):
cdef int result
cdef char *tmp
@@ -131,21 +133,22 @@ cdef inline _string_to_dts(object val, pandas_datetimestruct* dts):
val = PyUnicode_AsASCIIString(val);
tmp = val
- result = _cstring_to_dts(tmp, len(val), dts)
+ result = _cstring_to_dts(tmp, len(val), dts, out_local, out_tzoffset)
if result == -1:
raise ValueError('Unable to parse %s' % str(val))
cdef inline int _cstring_to_dts(char *val, int length,
- pandas_datetimestruct* dts):
+ pandas_datetimestruct* dts,
+ int* out_local, int* out_tzoffset):
cdef:
- npy_bool islocal, special
+ npy_bool special
PANDAS_DATETIMEUNIT out_bestunit
int result
result = parse_iso_8601_datetime(val, length, PANDAS_FR_ns,
NPY_UNSAFE_CASTING,
- dts, &islocal, &out_bestunit, &special)
+ dts, out_local, out_tzoffset, &out_bestunit, &special)
return result
diff --git a/pandas/src/datetime/np_datetime_strings.c b/pandas/src/datetime/np_datetime_strings.c
index 9c78e995f4fe3..3f09de851e231 100644
--- a/pandas/src/datetime/np_datetime_strings.c
+++ b/pandas/src/datetime/np_datetime_strings.c
@@ -363,7 +363,9 @@ convert_datetimestruct_local_to_utc(pandas_datetimestruct *out_dts_utc,
* to be cast to the 'unit' parameter.
*
* 'out' gets filled with the parsed date-time.
- * 'out_local' gets set to 1 if the parsed time was in local time,
+ * 'out_local' gets whether returned value contains timezone. 0 for UTC, 1 for local time.
+ * 'out_tzoffset' gets set to timezone offset by minutes
+ * if the parsed time was in local time,
* to 0 otherwise. The values 'now' and 'today' don't get counted
* as local, and neither do UTC +/-#### timezone offsets, because
* they aren't using the computer's local timezone offset.
@@ -381,7 +383,8 @@ parse_iso_8601_datetime(char *str, int len,
PANDAS_DATETIMEUNIT unit,
NPY_CASTING casting,
pandas_datetimestruct *out,
- npy_bool *out_local,
+ int *out_local,
+ int *out_tzoffset,
PANDAS_DATETIMEUNIT *out_bestunit,
npy_bool *out_special)
{
@@ -778,19 +781,6 @@ parse_iso_8601_datetime(char *str, int len,
if (sublen == 0) {
// Unlike NumPy, treating no time zone as naive
goto finish;
-
-/*
- if (convert_datetimestruct_local_to_utc(out, out) < 0) {
- goto error;
- }
-
- // Since neither "Z" nor a time-zone was specified, it's local
- if (out_local != NULL) {
- *out_local = 1;
- }
-
- goto finish;
-*/
}
/* UTC specifier */
@@ -816,9 +806,6 @@ parse_iso_8601_datetime(char *str, int len,
* Since "local" means local with respect to the current
* machine, we say this is non-local.
*/
- if (out_local != NULL) {
- *out_local = 0;
- }
if (*substr == '-') {
offset_neg = 1;
@@ -872,7 +859,11 @@ parse_iso_8601_datetime(char *str, int len,
offset_hour = -offset_hour;
offset_minute = -offset_minute;
}
- add_minutes_to_datetimestruct(out, -60 * offset_hour - offset_minute);
+ if (out_local != NULL) {
+ *out_local = 1;
+ // Unlike NumPy, do not change internal value to local time
+ *out_tzoffset = 60 * offset_hour - offset_minute;
+ }
}
/* Skip trailing whitespace */
diff --git a/pandas/src/datetime/np_datetime_strings.h b/pandas/src/datetime/np_datetime_strings.h
index 9a2488fefaf56..0d9a0944310fb 100644
--- a/pandas/src/datetime/np_datetime_strings.h
+++ b/pandas/src/datetime/np_datetime_strings.h
@@ -27,7 +27,9 @@
* to be cast to the 'unit' parameter.
*
* 'out' gets filled with the parsed date-time.
- * 'out_local' gets set to 1 if the parsed time was in local time,
+ * 'out_local' gets whether returned value contains timezone. 0 for UTC, 1 for local time.
+ * 'out_tzoffset' gets set to timezone offset by minutes
+ * if the parsed time was in local time,
* to 0 otherwise. The values 'now' and 'today' don't get counted
* as local, and neither do UTC +/-#### timezone offsets, because
* they aren't using the computer's local timezone offset.
@@ -45,7 +47,8 @@ parse_iso_8601_datetime(char *str, int len,
PANDAS_DATETIMEUNIT unit,
NPY_CASTING casting,
pandas_datetimestruct *out,
- npy_bool *out_local,
+ int *out_local,
+ int *out_tzoffset,
PANDAS_DATETIMEUNIT *out_bestunit,
npy_bool *out_special);
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index e651370be7d6d..f94910d9dec89 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2173,10 +2173,31 @@ def test_constructor_coverage(self):
def test_constructor_datetime64_tzformat(self):
# GH 6572
tm._skip_if_no_pytz()
+ import pytz
+ # ISO 8601 format results in pytz.FixedOffset
+ for freq in ['AS', 'W-SUN']:
+ idx = date_range('2013-01-01T00:00:00-05:00', '2016-01-01T23:59:59-05:00', freq=freq)
+ expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
+ freq=freq, tz=pytz.FixedOffset(-300))
+ tm.assert_index_equal(idx, expected)
+ # Unable to use `US/Eastern` because of DST
+ expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
+ freq=freq, tz='America/Lima')
+ self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
+
+ idx = date_range('2013-01-01T00:00:00+09:00', '2016-01-01T23:59:59+09:00', freq=freq)
+ expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
+ freq=freq, tz=pytz.FixedOffset(540))
+ tm.assert_index_equal(idx, expected)
+ expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
+ freq=freq, tz='Asia/Tokyo')
+ self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
+
tm._skip_if_no_dateutil()
from dateutil.tz import tzoffset
+ # Non ISO 8601 format results in dateutil.tz.tzoffset
for freq in ['AS', 'W-SUN']:
- idx = date_range('2013-01-01T00:00:00-05:00', '2016-01-01T23:59:59-05:00', freq=freq)
+ idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=tzoffset(None, -18000))
tm.assert_index_equal(idx, expected)
@@ -2185,7 +2206,7 @@ def test_constructor_datetime64_tzformat(self):
freq=freq, tz='America/Lima')
self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
- idx = date_range('2013-01-01T00:00:00+09:00', '2016-01-01T23:59:59+09:00', freq=freq)
+ idx = date_range('2013/1/1 0:00:00+9:00', '2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=tzoffset(None, 32400))
tm.assert_index_equal(idx, expected)
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py
index 563ab74ad975a..a700a617b0dee 100644
--- a/pandas/tseries/tests/test_tslib.py
+++ b/pandas/tseries/tests/test_tslib.py
@@ -15,40 +15,180 @@
from pandas.util.testing import assert_series_equal
class TestTimestamp(tm.TestCase):
+
+ def test_constructor(self):
+ base_str = '2014-07-01 09:00'
+ base_dt = datetime.datetime(2014, 7, 1, 9)
+ base_expected = 1404205200000000000
+
+ # confirm base representation is correct
+ import calendar
+ self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
+
+ tests = [(base_str, base_dt, base_expected),
+ ('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),
+ base_expected + 3600 * 1000000000),
+ ('2014-07-01 09:00:00.000008000',
+ datetime.datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000),
+ ('2014-07-01 09:00:00.000000005',
+ Timestamp('2014-07-01 09:00:00.000000005'), base_expected + 5)]
+
+ tm._skip_if_no_pytz()
+ tm._skip_if_no_dateutil()
+ import pytz
+ import dateutil
+ timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
+ ('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7),
+ (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
+
+ for date_str, date, expected in tests:
+ for result in [Timestamp(date_str), Timestamp(date)]:
+ # only with timestring
+ self.assertEqual(result.value, expected)
+ self.assertEqual(tslib.pydt_to_i8(result), expected)
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ self.assertEqual(result.value, expected)
+ self.assertEqual(tslib.pydt_to_i8(result), expected)
+
+ # with timezone
+ for tz, offset in timezones:
+ for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
+ expected_tz = expected - offset * 3600 * 1000000000
+ self.assertEqual(result.value, expected_tz)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
+
+ # should preserve tz
+ result = Timestamp(result)
+ self.assertEqual(result.value, expected_tz)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
+
+ # should convert to UTC
+ result = Timestamp(result, tz='UTC')
+ expected_utc = expected - offset * 3600 * 1000000000
+ self.assertEqual(result.value, expected_utc)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
+
+ def test_constructor_with_stringoffset(self):
+ # GH 7833
+ base_str = '2014-07-01 11:00:00+02:00'
+ base_dt = datetime.datetime(2014, 7, 1, 9)
+ base_expected = 1404205200000000000
+
+ # confirm base representation is correct
+ import calendar
+ self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
+
+ tests = [(base_str, base_expected),
+ ('2014-07-01 12:00:00+02:00', base_expected + 3600 * 1000000000),
+ ('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
+ ('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
+
+ tm._skip_if_no_pytz()
+ tm._skip_if_no_dateutil()
+ import pytz
+ import dateutil
+ timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
+ ('Asia/Tokyo', 9), ('US/Eastern', -4),
+ ('dateutil/US/Pacific', -7),
+ (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
+
+ for date_str, expected in tests:
+ for result in [Timestamp(date_str)]:
+ # only with timestring
+ self.assertEqual(result.value, expected)
+ self.assertEqual(tslib.pydt_to_i8(result), expected)
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ self.assertEqual(result.value, expected)
+ self.assertEqual(tslib.pydt_to_i8(result), expected)
+
+ # with timezone
+ for tz, offset in timezones:
+ result = Timestamp(date_str, tz=tz)
+ expected_tz = expected
+ self.assertEqual(result.value, expected_tz)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
+
+ # should preserve tz
+ result = Timestamp(result)
+ self.assertEqual(result.value, expected_tz)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
+
+ # should convert to UTC
+ result = Timestamp(result, tz='UTC')
+ expected_utc = expected
+ self.assertEqual(result.value, expected_utc)
+ self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
+
+ # This should be 2013-11-01 05:00 in UTC -> converted to Chicago tz
+ result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
+ self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
+ expected_repr = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"
+ self.assertEqual(repr(result), expected_repr)
+ self.assertEqual(result, eval(repr(result)))
+
+ # This should be 2013-11-01 05:00 in UTC -> converted to Tokyo tz (+09:00)
+ result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
+ self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
+ expected_repr = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
+ self.assertEqual(repr(result), expected_repr)
+ self.assertEqual(result, eval(repr(result)))
+
def test_repr(self):
- date = '2014-03-07'
- tz = 'US/Eastern'
- freq = 'M'
-
- date_only = Timestamp(date)
- self.assertIn(date, repr(date_only))
- self.assertNotIn(tz, repr(date_only))
- self.assertNotIn(freq, repr(date_only))
- self.assertEqual(date_only, eval(repr(date_only)))
-
- date_tz = Timestamp(date, tz=tz)
- self.assertIn(date, repr(date_tz))
- self.assertIn(tz, repr(date_tz))
- self.assertNotIn(freq, repr(date_tz))
- self.assertEqual(date_tz, eval(repr(date_tz)))
-
- date_freq = Timestamp(date, offset=freq)
- self.assertIn(date, repr(date_freq))
- self.assertNotIn(tz, repr(date_freq))
- self.assertIn(freq, repr(date_freq))
- self.assertEqual(date_freq, eval(repr(date_freq)))
-
- date_tz_freq = Timestamp(date, tz=tz, offset=freq)
- self.assertIn(date, repr(date_tz_freq))
- self.assertIn(tz, repr(date_tz_freq))
- self.assertIn(freq, repr(date_tz_freq))
- self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
+ dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']
+ timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
+ if _np_version_under1p7:
+ freqs = ['D', 'M', 'S']
+ else:
+ freqs = ['D', 'M', 'S', 'N']
+
+ for date in dates:
+ for tz in timezones:
+ for freq in freqs:
+ # avoid to match with timezone name
+ freq_repr = "'{0}'".format(freq)
+ if tz.startswith('dateutil'):
+ tz_repr = tz.replace('dateutil', '')
+ else:
+ tz_repr = tz
+
+ date_only = Timestamp(date)
+ self.assertIn(date, repr(date_only))
+ self.assertNotIn(tz_repr, repr(date_only))
+ self.assertNotIn(freq_repr, repr(date_only))
+ self.assertEqual(date_only, eval(repr(date_only)))
+
+ date_tz = Timestamp(date, tz=tz)
+ self.assertIn(date, repr(date_tz))
+ self.assertIn(tz_repr, repr(date_tz))
+ self.assertNotIn(freq_repr, repr(date_tz))
+ self.assertEqual(date_tz, eval(repr(date_tz)))
+
+ date_freq = Timestamp(date, offset=freq)
+ self.assertIn(date, repr(date_freq))
+ self.assertNotIn(tz_repr, repr(date_freq))
+ self.assertIn(freq_repr, repr(date_freq))
+ self.assertEqual(date_freq, eval(repr(date_freq)))
+
+ date_tz_freq = Timestamp(date, tz=tz, offset=freq)
+ self.assertIn(date, repr(date_tz_freq))
+ self.assertIn(tz_repr, repr(date_tz_freq))
+ self.assertIn(freq_repr, repr(date_tz_freq))
+ self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
# this can cause the tz field to be populated, but it's redundant to information in the datestring
+ tm._skip_if_no_pytz()
+ import pytz
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))
self.assertNotIn('tzoffset', repr(date_with_utc_offset))
- self.assertEqual(date_with_utc_offset, eval(repr(date_with_utc_offset)))
+ self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))
+ expr = repr(date_with_utc_offset).replace("'pytz.FixedOffset(-240)'",
+ 'pytz.FixedOffset(-240)')
+ self.assertEqual(date_with_utc_offset, eval(expr))
def test_bounds_with_different_units(self):
out_of_bounds_dates = (
@@ -314,8 +454,24 @@ def test_timedelta_ms_arithmetic(self):
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
- self.timestamp = Timestamp('2013-05-01 07:15:45.123456789')
- self.assertEqual(self.timestamp.value, 1367392545123456000)
+ ts = Timestamp('2013-05-01 07:15:45.123456789')
+ # GH 7878
+ expected_repr = '2013-05-01 07:15:45.123456789'
+ expected_value = 1367392545123456789
+ self.assertEqual(ts.value, expected_value)
+ self.assertIn(expected_repr, repr(ts))
+
+ ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
+ self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)
+ self.assertIn(expected_repr, repr(ts))
+
+ ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
+ self.assertEqual(ts.value, expected_value)
+ self.assertIn(expected_repr, repr(ts))
+
+ ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
+ self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)
+ self.assertIn(expected_repr, repr(ts))
def test_nanosecond_timestamp(self):
# GH 7610
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 3b1a969e17a18..7084184b7d423 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -217,12 +217,6 @@ class Timestamp(_Timestamp):
cdef _TSObject ts
cdef _Timestamp ts_base
- if util.is_string_object(ts_input):
- try:
- ts_input = parse_date(ts_input)
- except Exception:
- pass
-
ts = convert_to_tsobject(ts_input, tz, unit)
if ts.value == NPY_NAT:
@@ -263,7 +257,7 @@ class Timestamp(_Timestamp):
except:
pass
- tz = ", tz='{0}'".format(zone) if zone is not None and not isinstance(zone, tzoffset) else ""
+ tz = ", tz='{0}'".format(zone) if zone is not None else ""
offset = ", offset='{0}'".format(self.offset.freqstr) if self.offset is not None else ""
return "Timestamp('{stamp}'{tz}{offset})".format(stamp=stamp, tz=tz, offset=offset)
@@ -926,12 +920,41 @@ cdef convert_to_tsobject(object ts, object tz, object unit):
cdef:
_TSObject obj
bint utc_convert = 1
+ int out_local = 0, out_tzoffset = 0
if tz is not None:
tz = maybe_get_tz(tz)
obj = _TSObject()
+ if util.is_string_object(ts):
+ if ts in _nat_strings:
+ ts = NaT
+ else:
+ try:
+ _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset)
+ obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
+ _check_dts_bounds(&obj.dts)
+ if out_local == 1:
+ obj.tzinfo = pytz.FixedOffset(out_tzoffset)
+ obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC')
+ if tz is None:
+ _check_dts_bounds(&obj.dts)
+ return obj
+ else:
+ # Keep the converter same as PyDateTime's
+ ts = Timestamp(obj.value, tz=obj.tzinfo)
+ else:
+ ts = obj.value
+ if tz is not None:
+ # shift for _localize_tso
+ ts = tz_convert_single(ts, tz, 'UTC')
+ except ValueError:
+ try:
+ ts = parse_datetime_string(ts)
+ except Exception:
+ raise ValueError
+
if ts is None or ts is NaT or ts is np_NaT:
obj.value = NPY_NAT
elif is_datetime64_object(ts):
@@ -954,12 +977,6 @@ cdef convert_to_tsobject(object ts, object tz, object unit):
ts = cast_from_unit(ts,unit)
obj.value = ts
pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
- elif util.is_string_object(ts):
- if ts in _nat_strings:
- obj.value = NPY_NAT
- else:
- _string_to_dts(ts, &obj.dts)
- obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
elif PyDateTime_Check(ts):
if tz is not None:
# sort of a temporary hack
@@ -970,6 +987,10 @@ cdef convert_to_tsobject(object ts, object tz, object unit):
obj.value = _pydatetime_to_dts(ts, &obj.dts)
obj.tzinfo = ts.tzinfo
else: #tzoffset
+ try:
+ tz = ts.astimezone(tz).tzinfo
+ except:
+ pass
obj.value = _pydatetime_to_dts(ts, &obj.dts)
ts_offset = _get_utcoffset(ts.tzinfo, ts)
obj.value -= _delta_to_nanoseconds(ts_offset)
@@ -979,10 +1000,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit):
PANDAS_FR_ns, &obj.dts)
obj.tzinfo = tz
elif not _is_utc(tz):
- try:
- ts = tz.localize(ts)
- except AttributeError:
- ts = ts.replace(tzinfo=tz)
+ ts = _localize_pydatetime(ts, tz)
obj.value = _pydatetime_to_dts(ts, &obj.dts)
obj.tzinfo = ts.tzinfo
else:
@@ -1071,14 +1089,11 @@ def _localize_pydatetime(object dt, object tz):
return dt.tz_localize(tz)
elif tz == 'UTC' or tz is UTC:
return UTC.localize(dt)
-
- elif _treat_tz_as_pytz(tz):
- # datetime.replace may return incorrect result in pytz
+ try:
+ # datetime.replace with pytz may be incorrect result
return tz.localize(dt)
- elif _treat_tz_as_dateutil(tz):
+ except AttributeError:
return dt.replace(tzinfo=tz)
- else:
- raise ValueError(type(tz), tz)
def get_timezone(tz):
@@ -1239,6 +1254,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
bint utc_convert = bool(utc), seen_integer=0, seen_datetime=0
_TSObject _ts
int64_t m = cast_from_unit(None,unit)
+ int out_local = 0, out_tzoffset = 0
try:
result = np.empty(n, dtype='M8[ns]')
@@ -1321,9 +1337,12 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
iresult[i] = iNaT
continue
- _string_to_dts(val, &dts)
- iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns,
- &dts)
+ _string_to_dts(val, &dts, &out_local, &out_tzoffset)
+ value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ if out_local == 1:
+ tz = pytz.FixedOffset(out_tzoffset)
+ value = tz_convert_single(value, tz, 'UTC')
+ iresult[i] = value
_check_dts_bounds(&dts)
except ValueError:
try:
@@ -2867,14 +2886,6 @@ cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts):
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts)
-cdef inline void m8_populate_tsobject(int64_t stamp, _TSObject tso, object tz):
- tso.value = stamp
- pandas_datetime_to_datetimestruct(tso.value, PANDAS_FR_ns, &tso.dts)
-
- if tz is not None:
- _localize_tso(tso, tz)
-
-
def dates_normalized(ndarray[int64_t] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
| Fixes 2 problems related to `Timestamp` string parsing:
- `Timestamp` parsing results incorrect if input contains offset string (Closes #7833).
NOTE: Also modified `Timestamp.__repr__` to display fixed timezone info, because this can be either `pytz` or `dateutil`.
```
# Result after the fix
# If string contains offset, it will be parsed using fixed timezone offset. Following results in 2013-11-01 05:00:00 in UTC (There is some existing tests checking this behaviour).
repr(pd.Timestamp('2013-11-01 00:00:00-0500'))
# Timestamp('2013-11-01 00:00:00-0500', tz='pytz.FixedOffset(-300)')
# If tz is specified simultaneously, it should convert the timezone.
repr(pd.Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago'))
# Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
repr(pd.Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo'))
# Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')
```
- `Timestamp` loses `nanosecond` info when parsing from `str`. (Closes #7878)
```
# Result after the fix
repr(pd.Timestamp('2001-01-01 00:00:00.000000005'))
# Timestamp('2001-01-01 00:00:00.000000005')
repr(pd.Timestamp('2001-01-01 00:00:00.000000005', tz='US/Eastern'))
# Timestamp('2001-01-01 00:00:00.000000005-0500', tz='US/Eastern')
repr(pd.Timestamp('2001-01-01 00:00:00.000001234+09:00'))
# Timestamp('2001-01-01 00:00:00.000001234+0900', tz='pytz.FixedOffset(540)')
repr(pd.Timestamp('2001-01-01 00:00:00.000001234+09:00', tz='Asia/Tokyo'))
# Timestamp('2001-01-01 00:00:00.000001234+0900', tz='Asia/Tokyo')
# Because non ISO 8601 format is parsed by dateutil, nanosecond will lost (no change)
repr(pd.Timestamp('01-01-01 00:00:00.000000001'))
# Timestamp('2001-01-01 00:00:00')
repr(pd.Timestamp('01-01-01 00:00:00.000000001+9:00'))
# Timestamp('2001-01-01 00:00:00+0900', tz='tzoffset(None, 32400)')
```
CC @cyber42 @ischwabacher @rockg @adamgreenhall
| https://api.github.com/repos/pandas-dev/pandas/pulls/7907 | 2014-08-03T12:45:12Z | 2014-08-11T12:17:58Z | 2014-08-11T12:17:58Z | 2014-08-12T22:11:41Z |
DOC: grammar fixes in docs | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 4f01fe4f4b278..717c651d5935f 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -293,7 +293,7 @@
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
- # (windows). It forces pandas to encode it's output reprs using utf8
+ # (windows). It forces pandas to encode its output reprs using utf8
# whereever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 9c360f7ca7697..c32796cf082d4 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -121,7 +121,7 @@
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
- wrap-around across multiple "pages" if it's width exceeds `display.width`.
+ wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4b8d13ce30355..57ddefcc106d3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -471,7 +471,7 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
- # qtconsole doesn't report it's line width, and also
+ # qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 93be135e9ff40..8cfa0e25b789f 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -971,7 +971,7 @@ def tail(self, n=5):
def _cumcount_array(self, arr=None, **kwargs):
"""
- arr is where cumcount gets it's values from
+ arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index e4c24b8c3bcbb..099761f38bb44 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -178,7 +178,7 @@ def main():
print("""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
WARNING: this script uses git clean -f, running it on a repo with untracked files.
-It's recommended that you make a fresh clone and run from it's root directory.
+It's recommended that you make a fresh clone and run from its root directory.
You must specify the -y argument to ignore this warning.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""")
diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index 66e50269f00c6..8cf832ade2813 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -36,7 +36,7 @@
# in some cases then running vbench directly (think perf bisection).
#
# *please*, when you modify this script for whatever reason,
-# make sure you do not break it's functionality when running under older
+# make sure you do not break its functionality when running under older
# pandas versions.
# Note that depreaction warnings are turned off in main(), so there's
# no need to change the actual code to supress such warnings.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7906 | 2014-08-02T21:06:10Z | 2014-08-02T22:47:36Z | 2014-08-02T22:47:36Z | 2015-04-29T15:34:09Z | |
ENH: Add BusinessHour offset | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index ac3302ae40fa7..a78fcf5224fc2 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -4,7 +4,7 @@
.. ipython:: python
:suppress:
- from datetime import datetime, timedelta
+ from datetime import datetime, timedelta, time
import numpy as np
np.random.seed(123456)
from pandas import *
@@ -482,6 +482,7 @@ frequency increment. Specific offset logic like "month", "business day", or
BYearEnd, "business year end"
BYearBegin, "business year begin"
FY5253, "retail (aka 52-53 week) year"
+ BusinessHour, "business hour"
Hour, "one hour"
Minute, "one minute"
Second, "one second"
@@ -667,6 +668,102 @@ in the usual way.
have to change to fix the timezone issues, the behaviour of the
``CustomBusinessDay`` class may have to change in future versions.
+.. _timeseries.businesshour:
+
+Business Hour
+~~~~~~~~~~~~~
+
+The ``BusinessHour`` class provides a business hour representation on ``BusinessDay``,
+allowing to use specific start and end times.
+
+By default, ``BusinessHour`` uses 9:00 - 17:00 as business hours.
+Adding ``BusinessHour`` will increment ``Timestamp`` by hourly.
+If target ``Timestamp`` is out of business hours, move to the next business hour then increment it.
+If the result exceeds the business hours end, remaining is added to the next business day.
+
+.. ipython:: python
+
+ bh = BusinessHour()
+ bh
+
+ # 2014-08-01 is Friday
+ Timestamp('2014-08-01 10:00').weekday()
+ Timestamp('2014-08-01 10:00') + bh
+
+ # Below example is the same as Timestamp('2014-08-01 09:00') + bh
+ Timestamp('2014-08-01 08:00') + bh
+
+ # If the results is on the end time, move to the next business day
+ Timestamp('2014-08-01 16:00') + bh
+
+ # Remainings are added to the next day
+ Timestamp('2014-08-01 16:30') + bh
+
+ # Adding 2 business hours
+ Timestamp('2014-08-01 10:00') + BusinessHour(2)
+
+ # Subtracting 3 business hours
+ Timestamp('2014-08-01 10:00') + BusinessHour(-3)
+
+Also, you can specify ``start`` and ``end`` time by keywords.
+Argument must be ``str`` which has ``hour:minute`` representation or ``datetime.time`` instance.
+Specifying seconds, microseconds and nanoseconds as business hour results in ``ValueError``.
+
+.. ipython:: python
+
+ bh = BusinessHour(start='11:00', end=time(20, 0))
+ bh
+
+ Timestamp('2014-08-01 13:00') + bh
+ Timestamp('2014-08-01 09:00') + bh
+ Timestamp('2014-08-01 18:00') + bh
+
+Passing ``start`` time later than ``end`` represents midnight business hour.
+In this case, business hour exceeds midnight and overlap to the next day.
+Valid business hours are distinguished by whether it started from valid ``BusinessDay``.
+
+.. ipython:: python
+
+ bh = BusinessHour(start='17:00', end='09:00')
+ bh
+
+ Timestamp('2014-08-01 17:00') + bh
+ Timestamp('2014-08-01 23:00') + bh
+
+ # Although 2014-08-02 is Satuaday,
+ # it is valid because it starts from 08-01 (Friday).
+ Timestamp('2014-08-02 04:00') + bh
+
+ # Although 2014-08-04 is Monday,
+ # it is out of business hours because it starts from 08-03 (Sunday).
+ Timestamp('2014-08-04 04:00') + bh
+
+Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in
+the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward``
+may output different results from ``apply`` by definition.
+
+This is because one day's business hour end is equal to next day's business hour start. For example,
+under the default business hours (9:00 - 17:00), there is no gap (0 minutes) between ``2014-08-01 17:00`` and
+``2014-08-04 09:00``.
+
+.. ipython:: python
+
+ # This adjusts a Timestamp to business hour edge
+ BusinessHour().rollback(Timestamp('2014-08-02 15:00'))
+ BusinessHour().rollforward(Timestamp('2014-08-02 15:00'))
+
+ # It is the same as BusinessHour().apply(Timestamp('2014-08-01 17:00')).
+ # And it is the same as BusinessHour().apply(Timestamp('2014-08-04 09:00'))
+ BusinessHour().apply(Timestamp('2014-08-02 15:00'))
+
+ # BusinessDay results (for reference)
+ BusinessHour().rollforward(Timestamp('2014-08-02'))
+
+ # It is the same as BusinessDay().apply(Timestamp('2014-08-01'))
+ # The result is the same as rollworward because BusinessDay never overlap.
+ BusinessHour().apply(Timestamp('2014-08-02'))
+
+
Offset Aliases
~~~~~~~~~~~~~~
@@ -696,6 +793,7 @@ frequencies. We will refer to these aliases as *offset aliases*
"BA", "business year end frequency"
"AS", "year start frequency"
"BAS", "business year start frequency"
+ "BH", "business hour frequency"
"H", "hourly frequency"
"T", "minutely frequency"
"S", "secondly frequency"
diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt
index 3d5c95aee2e92..932a5ae86b219 100755
--- a/doc/source/whatsnew/v0.16.1.txt
+++ b/doc/source/whatsnew/v0.16.1.txt
@@ -11,16 +11,25 @@ Highlights include:
- Support for a ``CategoricalIndex``, a category based index, see :ref:`here <whatsnew_0161.enhancements.categoricalindex>`
+- ``BusinessHour`` offset is supported, see :ref:`here <timeseries.businesshour>`
+
.. contents:: What's new in v0.16.1
:local:
:backlinks: none
-
.. _whatsnew_0161.enhancements:
Enhancements
~~~~~~~~~~~~
+- ``BusinessHour`` offset is now supported, which represents business hours starting from 09:00 - 17:00 on ``BusinessDay`` by default. See :ref:`Here <timeseries.businesshour>` for details. (:issue:`7905`)
+
+ .. ipython:: python
+
+ Timestamp('2014-08-01 09:00') + BusinessHour()
+ Timestamp('2014-08-01 07:00') + BusinessHour()
+ Timestamp('2014-08-01 16:30') + BusinessHour()
+
- Added ``StringMethods.capitalize()`` and ``swapcase`` which behave as the same as standard ``str`` (:issue:`9766`)
- Added ``StringMethods`` (.str accessor) to ``Index`` (:issue:`9068`)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index b220e03fdb327..eff2a36e823d8 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -742,7 +742,7 @@ def __init__(self, index, warn=True):
@cache_readonly
def deltas(self):
return tslib.unique_deltas(self.values)
-
+
@cache_readonly
def deltas_asi8(self):
return tslib.unique_deltas(self.index.asi8)
@@ -750,7 +750,7 @@ def deltas_asi8(self):
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
-
+
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
@@ -763,10 +763,13 @@ def get_freq(self):
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
else:
- # Possibly intraday frequency. Here we use the
+ # Business hourly, maybe. 17: one day / 65: one weekend
+ if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
+ return 'BH'
+ # Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
- if not self.is_unique_asi8:
+ elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
@@ -792,6 +795,10 @@ def get_freq(self):
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
+ @cache_readonly
+ def hour_deltas(self):
+ return [x / _ONE_HOUR for x in self.deltas]
+
@cache_readonly
def fields(self):
return tslib.build_field_sarray(self.values)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index cb6bd2fb2b250..67e27bbffbf73 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -16,6 +16,7 @@
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd','CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
+ 'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
@@ -404,10 +405,6 @@ def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
- attrs = []
-
- if self.offset:
- attrs = ['offset=%s' % repr(self.offset)]
if abs(self.n) != 1:
plural = 's'
@@ -418,10 +415,17 @@ def __repr__(self):
if self.n != 1:
n_str = "%s * " % self.n
- out = '<%s' % n_str + className + plural
+ out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
+ return out
+
+ def _repr_attrs(self):
+ if self.offset:
+ attrs = ['offset=%s' % repr(self.offset)]
+ else:
+ attrs = None
+ out = ''
if attrs:
out += ': ' + ', '.join(attrs)
- out += '>'
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
@@ -531,6 +535,234 @@ def onOffset(self, dt):
return dt.weekday() < 5
+class BusinessHour(BusinessMixin, SingleConstructorOffset):
+ """
+ DateOffset subclass representing possibly n business days
+ """
+ _prefix = 'BH'
+ _anchor = 0
+
+ def __init__(self, n=1, normalize=False, **kwds):
+ self.n = int(n)
+ self.normalize = normalize
+
+ # must be validated here to equality check
+ kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
+ kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
+ self.kwds = kwds
+ self.offset = kwds.get('offset', timedelta(0))
+ self.start = kwds.get('start', '09:00')
+ self.end = kwds.get('end', '17:00')
+
+ # used for moving to next businessday
+ if self.n >= 0:
+ self.next_bday = BusinessDay(n=1)
+ else:
+ self.next_bday = BusinessDay(n=-1)
+
+ def _validate_time(self, t_input):
+ from datetime import time as dt_time
+ import time
+ if isinstance(t_input, compat.string_types):
+ try:
+ t = time.strptime(t_input, '%H:%M')
+ return dt_time(hour=t.tm_hour, minute=t.tm_min)
+ except ValueError:
+ raise ValueError("time data must match '%H:%M' format")
+ elif isinstance(t_input, dt_time):
+ if t_input.second != 0 or t_input.microsecond != 0:
+ raise ValueError("time data must be specified only with hour and minute")
+ return t_input
+ else:
+ raise ValueError("time data must be string or datetime.time")
+
+ def _get_daytime_flag(self):
+ if self.start == self.end:
+ raise ValueError('start and end must not be the same')
+ elif self.start < self.end:
+ return True
+ else:
+ return False
+
+ def _repr_attrs(self):
+ out = super(BusinessHour, self)._repr_attrs()
+ attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
+ self.end.strftime('%H:%M'))]
+ out += ': ' + ', '.join(attrs)
+ return out
+
+ def _next_opening_time(self, other):
+ """
+ If n is positive, return tomorrow's business day opening time.
+ Otherwise yesterday's business day's opening time.
+
+ Opening time always locates on BusinessDay.
+ Otherwise, closing time may not if business hour extends over midnight.
+ """
+ if not self.next_bday.onOffset(other):
+ other = other + self.next_bday
+ else:
+ if self.n >= 0 and self.start < other.time():
+ other = other + self.next_bday
+ elif self.n < 0 and other.time() < self.start:
+ other = other + self.next_bday
+ return datetime(other.year, other.month, other.day,
+ self.start.hour, self.start.minute)
+
+ def _prev_opening_time(self, other):
+ """
+ If n is positive, return yesterday's business day opening time.
+ Otherwise yesterday business day's opening time.
+ """
+ if not self.next_bday.onOffset(other):
+ other = other - self.next_bday
+ else:
+ if self.n >= 0 and other.time() < self.start:
+ other = other - self.next_bday
+ elif self.n < 0 and other.time() > self.start:
+ other = other - self.next_bday
+ return datetime(other.year, other.month, other.day,
+ self.start.hour, self.start.minute)
+
+ def _get_business_hours_by_sec(self):
+ """
+ Return business hours in a day by seconds.
+ """
+ if self._get_daytime_flag():
+ # create dummy datetime to calcurate businesshours in a day
+ dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
+ until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
+ return tslib.tot_seconds(until - dtstart)
+ else:
+ self.daytime = False
+ dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
+ until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
+ return tslib.tot_seconds(until - dtstart)
+
+ @apply_wraps
+ def rollback(self, dt):
+ """Roll provided date backward to next offset only if not on offset"""
+ if not self.onOffset(dt):
+ businesshours = self._get_business_hours_by_sec()
+ if self.n >= 0:
+ dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours)
+ else:
+ dt = self._next_opening_time(dt) + timedelta(seconds=businesshours)
+ return dt
+
+ @apply_wraps
+ def rollforward(self, dt):
+ """Roll provided date forward to next offset only if not on offset"""
+ if not self.onOffset(dt):
+ if self.n >= 0:
+ return self._next_opening_time(dt)
+ else:
+ return self._prev_opening_time(dt)
+ return dt
+
+ @apply_wraps
+ def apply(self, other):
+ # calcurate here because offset is not immutable
+ daytime = self._get_daytime_flag()
+ businesshours = self._get_business_hours_by_sec()
+ bhdelta = timedelta(seconds=businesshours)
+
+ if isinstance(other, datetime):
+ # used for detecting edge condition
+ nanosecond = getattr(other, 'nanosecond', 0)
+ # reset timezone and nanosecond
+ # other may be a Timestamp, thus not use replace
+ other = datetime(other.year, other.month, other.day,
+ other.hour, other.minute,
+ other.second, other.microsecond)
+ n = self.n
+ if n >= 0:
+ if (other.time() == self.end or
+ not self._onOffset(other, businesshours)):
+ other = self._next_opening_time(other)
+ else:
+ if other.time() == self.start:
+ # adjustment to move to previous business day
+ other = other - timedelta(seconds=1)
+ if not self._onOffset(other, businesshours):
+ other = self._next_opening_time(other)
+ other = other + bhdelta
+
+ bd, r = divmod(abs(n * 60), businesshours // 60)
+ if n < 0:
+ bd, r = -bd, -r
+
+ if bd != 0:
+ skip_bd = BusinessDay(n=bd)
+ # midnight busienss hour may not on BusinessDay
+ if not self.next_bday.onOffset(other):
+ remain = other - self._prev_opening_time(other)
+ other = self._next_opening_time(other + skip_bd) + remain
+ else:
+ other = other + skip_bd
+
+ hours, minutes = divmod(r, 60)
+ result = other + timedelta(hours=hours, minutes=minutes)
+
+ # because of previous adjustment, time will be larger than start
+ if ((daytime and (result.time() < self.start or self.end < result.time())) or
+ not daytime and (self.end < result.time() < self.start)):
+ if n >= 0:
+ bday_edge = self._prev_opening_time(other)
+ bday_edge = bday_edge + bhdelta
+ # calcurate remainder
+ bday_remain = result - bday_edge
+ result = self._next_opening_time(other)
+ result += bday_remain
+ else:
+ bday_edge = self._next_opening_time(other)
+ bday_remain = result - bday_edge
+ result = self._next_opening_time(result) + bhdelta
+ result += bday_remain
+ # edge handling
+ if n >= 0:
+ if result.time() == self.end:
+ result = self._next_opening_time(result)
+ else:
+ if result.time() == self.start and nanosecond == 0:
+ # adjustment to move to previous business day
+ result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta
+
+ return result
+ else:
+ raise ApplyTypeError('Only know how to combine business hour with ')
+
+ def onOffset(self, dt):
+ if self.normalize and not _is_normalized(dt):
+ return False
+
+ if dt.tzinfo is not None:
+ dt = datetime(dt.year, dt.month, dt.day, dt.hour,
+ dt.minute, dt.second, dt.microsecond)
+ # Valid BH can be on the different BusinessDay during midnight
+ # Distinguish by the time spent from previous opening time
+ businesshours = self._get_business_hours_by_sec()
+ return self._onOffset(dt, businesshours)
+
+ def _onOffset(self, dt, businesshours):
+ """
+ Slight speedups using calcurated values
+ """
+ # if self.normalize and not _is_normalized(dt):
+ # return False
+ # Valid BH can be on the different BusinessDay during midnight
+ # Distinguish by the time spent from previous opening time
+ if self.n >= 0:
+ op = self._prev_opening_time(dt)
+ else:
+ op = self._next_opening_time(dt)
+ span = tslib.tot_seconds(dt - op)
+ if span <= businesshours:
+ return True
+ else:
+ return False
+
+
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
@@ -2250,6 +2482,7 @@ def generate_range(start=None, end=None, periods=None,
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
+ BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 965c198eb7c95..2f2d249539b81 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -196,6 +196,7 @@ def _check_tick(self, base_delta, code):
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
+
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
@@ -324,10 +325,40 @@ def test_infer_freq_tz_transition(self):
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
print(idx)
self.assertEqual(idx.inferred_freq, freq)
-
+
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
+ def test_infer_freq_businesshour(self):
+ # GH 7905
+ idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
+ '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
+ # hourly freq in a day must result in 'H'
+ self.assertEqual(idx.inferred_freq, 'H')
+
+ idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
+ '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
+ '2014-07-01 15:00', '2014-07-01 16:00',
+ '2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
+ self.assertEqual(idx.inferred_freq, 'BH')
+
+ idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
+ '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
+ '2014-07-04 15:00', '2014-07-04 16:00',
+ '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
+ self.assertEqual(idx.inferred_freq, 'BH')
+
+ idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
+ '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
+ '2014-07-04 15:00', '2014-07-04 16:00',
+ '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
+ '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
+ '2014-07-07 15:00', '2014-07-07 16:00',
+ '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
+ '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
+ '2014-07-08 15:00', '2014-07-08 16:00'])
+ self.assertEqual(idx.inferred_freq, 'BH')
+
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 0793508b4912c..a051560617604 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -10,7 +10,7 @@
import numpy as np
from pandas.core.datetools import (
- bday, BDay, CDay, BQuarterEnd, BMonthEnd,
+ bday, BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour,
CBMonthEnd, CBMonthBegin,
BYearEnd, MonthEnd, MonthBegin, BYearBegin, CustomBusinessDay,
QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week,
@@ -23,7 +23,6 @@
from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range
from pandas.tseries.tools import parse_time_string, DateParseError
import pandas.tseries.offsets as offsets
-
from pandas.io.pickle import read_pickle
from pandas.tslib import NaT, Timestamp, Timedelta
import pandas.tslib as tslib
@@ -133,7 +132,11 @@ def test_apply_out_of_range(self):
# try to create an out-of-bounds result timestamp; if we can't create the offset
# skip
try:
- offset = self._get_offset(self._offset, value=10000)
+ if self._offset is BusinessHour:
+ # Using 10000 in BusinessHour fails in tz check because of DST difference
+ offset = self._get_offset(self._offset, value=100000)
+ else:
+ offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
self.assertIsInstance(result, datetime)
@@ -179,6 +182,7 @@ def setUp(self):
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
+ 'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
@@ -278,6 +282,8 @@ def test_rollforward(self):
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
+ expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
+
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
@@ -321,6 +327,7 @@ def test_rollback(self):
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
+ 'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
@@ -371,6 +378,10 @@ def test_onOffset(self):
offset_n = self._get_offset(offset, normalize=True)
self.assertFalse(offset_n.onOffset(dt))
+ if offset is BusinessHour:
+ # In default BusinessHour (9:00-17:00), normalized time
+ # cannot be in business hour range
+ continue
date = datetime(dt.year, dt.month, dt.day)
self.assertTrue(offset_n.onOffset(date))
@@ -642,6 +653,593 @@ def test_offsets_compare_equal(self):
self.assertFalse(offset1 != offset2)
+class TestBusinessHour(Base):
+ _multiprocess_can_split_ = True
+ _offset = BusinessHour
+
+ def setUp(self):
+ self.d = datetime(2014, 7, 1, 10, 00)
+
+ self.offset1 = BusinessHour()
+ self.offset2 = BusinessHour(n=3)
+
+ self.offset3 = BusinessHour(n=-1)
+ self.offset4 = BusinessHour(n=-4)
+
+ from datetime import time as dt_time
+ self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
+ self.offset6 = BusinessHour(start='20:00', end='05:00')
+ self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
+
+ def test_constructor_errors(self):
+ from datetime import time as dt_time
+ with tm.assertRaises(ValueError):
+ BusinessHour(start=dt_time(11, 0, 5))
+ with tm.assertRaises(ValueError):
+ BusinessHour(start='AAA')
+ with tm.assertRaises(ValueError):
+ BusinessHour(start='14:00:05')
+
+ def test_different_normalize_equals(self):
+ # equivalent in this special case
+ offset = self._offset()
+ offset2 = self._offset()
+ offset2.normalize = True
+ self.assertEqual(offset, offset2)
+
+ def test_repr(self):
+ self.assertEqual(repr(self.offset1), '<BusinessHour: BH=09:00-17:00>')
+ self.assertEqual(repr(self.offset2), '<3 * BusinessHours: BH=09:00-17:00>')
+ self.assertEqual(repr(self.offset3), '<-1 * BusinessHour: BH=09:00-17:00>')
+ self.assertEqual(repr(self.offset4), '<-4 * BusinessHours: BH=09:00-17:00>')
+
+ self.assertEqual(repr(self.offset5), '<BusinessHour: BH=11:00-14:30>')
+ self.assertEqual(repr(self.offset6), '<BusinessHour: BH=20:00-05:00>')
+ self.assertEqual(repr(self.offset7), '<-2 * BusinessHours: BH=21:30-06:30>')
+
+ def test_with_offset(self):
+ expected = Timestamp('2014-07-01 13:00')
+
+ self.assertEqual(self.d + BusinessHour() * 3, expected)
+ self.assertEqual(self.d + BusinessHour(n=3), expected)
+
+ def testEQ(self):
+ for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
+ self.assertEqual(offset, offset)
+
+ self.assertNotEqual(BusinessHour(), BusinessHour(-1))
+ self.assertEqual(BusinessHour(start='09:00'), BusinessHour())
+ self.assertNotEqual(BusinessHour(start='09:00'), BusinessHour(start='09:01'))
+ self.assertNotEqual(BusinessHour(start='09:00', end='17:00'),
+ BusinessHour(start='17:00', end='09:01'))
+
+ def test_hash(self):
+ self.assertEqual(hash(self.offset2), hash(self.offset2))
+
+ def testCall(self):
+ self.assertEqual(self.offset1(self.d), datetime(2014, 7, 1, 11))
+ self.assertEqual(self.offset2(self.d), datetime(2014, 7, 1, 13))
+ self.assertEqual(self.offset3(self.d), datetime(2014, 6, 30, 17))
+ self.assertEqual(self.offset4(self.d), datetime(2014, 6, 30, 14))
+
+ def testRAdd(self):
+ self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
+
+ def testSub(self):
+ off = self.offset2
+ self.assertRaises(Exception, off.__sub__, self.d)
+ self.assertEqual(2 * off - off, off)
+
+ self.assertEqual(self.d - self.offset2, self.d + self._offset(-3))
+
+ def testRSub(self):
+ self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
+
+ def testMult1(self):
+ self.assertEqual(self.d + 5 * self.offset1, self.d + self._offset(5))
+
+ def testMult2(self):
+ self.assertEqual(self.d + (-3 * self._offset(-2)),
+ self.d + self._offset(6))
+
+ def testRollback1(self):
+ self.assertEqual(self.offset1.rollback(self.d), self.d)
+ self.assertEqual(self.offset2.rollback(self.d), self.d)
+ self.assertEqual(self.offset3.rollback(self.d), self.d)
+ self.assertEqual(self.offset4.rollback(self.d), self.d)
+ self.assertEqual(self.offset5.rollback(self.d), datetime(2014, 6, 30, 14, 30))
+ self.assertEqual(self.offset6.rollback(self.d), datetime(2014, 7, 1, 5, 0))
+ self.assertEqual(self.offset7.rollback(self.d), datetime(2014, 7, 1, 6, 30))
+
+ d = datetime(2014, 7, 1, 0)
+ self.assertEqual(self.offset1.rollback(d), datetime(2014, 6, 30, 17))
+ self.assertEqual(self.offset2.rollback(d), datetime(2014, 6, 30, 17))
+ self.assertEqual(self.offset3.rollback(d), datetime(2014, 6, 30, 17))
+ self.assertEqual(self.offset4.rollback(d), datetime(2014, 6, 30, 17))
+ self.assertEqual(self.offset5.rollback(d), datetime(2014, 6, 30, 14, 30))
+ self.assertEqual(self.offset6.rollback(d), d)
+ self.assertEqual(self.offset7.rollback(d), d)
+
+ self.assertEqual(self._offset(5).rollback(self.d), self.d)
+
+ def testRollback2(self):
+ self.assertEqual(self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)),
+ datetime(2014, 7, 4, 17, 0))
+
+ def testRollforward1(self):
+ self.assertEqual(self.offset1.rollforward(self.d), self.d)
+ self.assertEqual(self.offset2.rollforward(self.d), self.d)
+ self.assertEqual(self.offset3.rollforward(self.d), self.d)
+ self.assertEqual(self.offset4.rollforward(self.d), self.d)
+ self.assertEqual(self.offset5.rollforward(self.d), datetime(2014, 7, 1, 11, 0))
+ self.assertEqual(self.offset6.rollforward(self.d), datetime(2014, 7, 1, 20, 0))
+ self.assertEqual(self.offset7.rollforward(self.d), datetime(2014, 7, 1, 21, 30))
+
+ d = datetime(2014, 7, 1, 0)
+ self.assertEqual(self.offset1.rollforward(d), datetime(2014, 7, 1, 9))
+ self.assertEqual(self.offset2.rollforward(d), datetime(2014, 7, 1, 9))
+ self.assertEqual(self.offset3.rollforward(d), datetime(2014, 7, 1, 9))
+ self.assertEqual(self.offset4.rollforward(d), datetime(2014, 7, 1, 9))
+ self.assertEqual(self.offset5.rollforward(d), datetime(2014, 7, 1, 11))
+ self.assertEqual(self.offset6.rollforward(d), d)
+ self.assertEqual(self.offset7.rollforward(d), d)
+
+ self.assertEqual(self._offset(5).rollforward(self.d), self.d)
+
+ def testRollforward2(self):
+ self.assertEqual(self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)),
+ datetime(2014, 7, 7, 9))
+
+ def test_roll_date_object(self):
+ offset = BusinessHour()
+
+ dt = datetime(2014, 7, 6, 15, 0)
+
+ result = offset.rollback(dt)
+ self.assertEqual(result, datetime(2014, 7, 4, 17))
+
+ result = offset.rollforward(dt)
+ self.assertEqual(result, datetime(2014, 7, 7, 9))
+
+ def test_normalize(self):
+ tests = []
+
+ tests.append((BusinessHour(normalize=True),
+ {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
+
+ tests.append((BusinessHour(-1, normalize=True),
+ {datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
+
+ tests.append((BusinessHour(1, normalize=True, start='17:00', end='04:00'),
+ {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
+
+ for offset, cases in tests:
+ for dt, expected in compat.iteritems(cases):
+ self.assertEqual(offset.apply(dt), expected)
+
+ def test_onOffset(self):
+ tests = []
+
+ tests.append((BusinessHour(),
+ {datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False}))
+
+ tests.append((BusinessHour(start='10:00', end='15:00'),
+ {datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False}))
+
+ tests.append((BusinessHour(start='19:00', end='05:00'),
+ {datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False}))
+
+ for offset, cases in tests:
+ for dt, expected in compat.iteritems(cases):
+ self.assertEqual(offset.onOffset(dt), expected)
+
+ def test_opening_time(self):
+ tests = []
+
+ # opening time should be affected by sign of n, not by n's value and end
+ tests.append(([BusinessHour(), BusinessHour(n=2), BusinessHour(n=4),
+ BusinessHour(end='10:00'), BusinessHour(n=2, end='4:00'),
+ BusinessHour(n=4, end='15:00')],
+ {datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)),
+ # if timestamp is on opening time, next opening time is as it is
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9), datetime(2014, 7, 2, 9)),
+ # 2014-07-05 is saturday
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9), datetime(2014, 7, 7, 9))}))
+
+ tests.append(([BusinessHour(start='11:15'), BusinessHour(n=2, start='11:15'),
+ BusinessHour(n=3, start='11:15'),
+ BusinessHour(start='11:15', end='10:00'),
+ BusinessHour(n=2, start='11:15', end='4:00'),
+ BusinessHour(n=3, start='11:15', end='15:00')],
+ {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15), datetime(2014, 6, 30, 11, 15)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 2, 11, 15)),
+ datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15), datetime(2014, 7, 2, 11, 15)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15), datetime(2014, 7, 3, 11, 15)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15))}))
+
+ tests.append(([BusinessHour(-1), BusinessHour(n=-2), BusinessHour(n=-4),
+ BusinessHour(n=-1, end='10:00'), BusinessHour(n=-2, end='4:00'),
+ BusinessHour(n=-4, end='15:00')],
+ {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9), datetime(2014, 7, 3, 9)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9), datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9), datetime(2014, 7, 8, 9))}))
+
+ tests.append(([BusinessHour(start='17:00', end='05:00'),
+ BusinessHour(n=3, start='17:00', end='03:00')],
+ {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17), datetime(2014, 6, 30, 17)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 3, 17)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17), datetime(2014, 7, 7, 17)),}))
+
+ tests.append(([BusinessHour(-1, start='17:00', end='05:00'),
+ BusinessHour(n=-2, start='17:00', end='03:00')],
+ {datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17), datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17), datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17), datetime(2014, 7, 8, 17))}))
+
+ for offsets, cases in tests:
+ for offset in offsets:
+ for dt, (exp_next, exp_prev) in compat.iteritems(cases):
+ self.assertEqual(offset._next_opening_time(dt), exp_next)
+ self.assertEqual(offset._prev_opening_time(dt), exp_prev)
+
+ def test_apply(self):
+ tests = []
+
+ tests.append((BusinessHour(),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
+
+ tests.append((BusinessHour(4),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
+
+ tests.append((BusinessHour(-1),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
+
+ tests.append((BusinessHour(-4),
+ {datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
+
+ tests.append((BusinessHour(start='13:00', end='16:00'),
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
+
+ tests.append((BusinessHour(n=2, start='13:00', end='16:00'),
+ {datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
+ datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
+
+ tests.append((BusinessHour(n=-1, start='13:00', end='16:00'),
+ {datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
+ datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
+
+ tests.append((BusinessHour(n=-3, start='10:00', end='16:00'),
+ {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
+ datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
+
+ tests.append((BusinessHour(start='19:00', end='05:00'),
+ {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
+ datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
+ datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
+
+ tests.append((BusinessHour(n=-1, start='19:00', end='05:00'),
+ {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
+
+ for offset, cases in tests:
+ for base, expected in compat.iteritems(cases):
+ assertEq(offset, base, expected)
+
+ def test_apply_large_n(self):
+ tests = []
+
+ tests.append((BusinessHour(40), # A week later
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
+
+ tests.append((BusinessHour(-25), # 3 days and 1 hour before
+ {datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
+
+ tests.append((BusinessHour(28, start='21:00', end='02:00'), # 5 days and 3 hours later
+ {datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
+
+ for offset, cases in tests:
+ for base, expected in compat.iteritems(cases):
+ assertEq(offset, base, expected)
+
+ def test_apply_nanoseconds(self):
+ tests = []
+
+ tests.append((BusinessHour(),
+ {Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 16:00') + Nano(5),
+ Timestamp('2014-07-04 16:00') + Nano(5): Timestamp('2014-07-07 09:00') + Nano(5),
+ Timestamp('2014-07-04 16:00') - Nano(5): Timestamp('2014-07-04 17:00') - Nano(5)
+ }))
+
+ tests.append((BusinessHour(-1),
+ {Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 14:00') + Nano(5),
+ Timestamp('2014-07-04 10:00') + Nano(5): Timestamp('2014-07-04 09:00') + Nano(5),
+ Timestamp('2014-07-04 10:00') - Nano(5): Timestamp('2014-07-03 17:00') - Nano(5),
+ }))
+
+ for offset, cases in tests:
+ for base, expected in compat.iteritems(cases):
+ assertEq(offset, base, expected)
+
+ def test_offsets_compare_equal(self):
+ # root cause of #456
+ offset1 = self._offset()
+ offset2 = self._offset()
+ self.assertFalse(offset1 != offset2)
+
+ def test_datetimeindex(self):
+ idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00', freq='BH')
+ idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
+ idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
+ expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
+ '2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
+ '2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
+ '2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00'],
+ freq='BH')
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+ idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45', freq='BH')
+ idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
+ idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
+
+ expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45', '2014-07-07 09:45',
+ '2014-07-07 10:45', '2014-07-07 11:45', '2014-07-07 12:45',
+ '2014-07-07 13:45', '2014-07-07 14:45', '2014-07-07 15:45',
+ '2014-07-07 16:45', '2014-07-08 09:45', '2014-07-08 10:45'],
+ freq='BH')
+ expected = idx1
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+
class TestCustomBusinessDay(Base):
_multiprocess_can_split_ = True
_offset = CDay
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 964e8634bc1ef..0c4961d80a5f4 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -288,7 +288,7 @@ def test_indexing(self):
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
- freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
+ freqs = ['M', 'Q', 'A', 'D', 'B', 'BH', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
@@ -3347,6 +3347,29 @@ def test_date_range_bms_bug(self):
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
+ def test_date_range_businesshour(self):
+ idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
+ '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
+ '2014-07-04 15:00', '2014-07-04 16:00'], freq='BH')
+ rng = date_range('2014-07-04 09:00', '2014-07-04 16:00', freq='BH')
+ tm.assert_index_equal(idx, rng)
+
+ idx = DatetimeIndex(['2014-07-04 16:00', '2014-07-07 09:00'], freq='BH')
+ rng = date_range('2014-07-04 16:00', '2014-07-07 09:00', freq='BH')
+ tm.assert_index_equal(idx, rng)
+
+ idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
+ '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
+ '2014-07-04 15:00', '2014-07-04 16:00',
+ '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
+ '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
+ '2014-07-07 15:00', '2014-07-07 16:00',
+ '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
+ '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
+ '2014-07-08 15:00', '2014-07-08 16:00'], freq='BH')
+ rng = date_range('2014-07-04 09:00', '2014-07-08 16:00', freq='BH')
+ tm.assert_index_equal(idx, rng)
+
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
| Closes #2469. Create `BusinessHour` offset to specify business hours on `BusinessDay`. Appreciated if any feedbacks regarding bahaviours, keywords, etc.
### Basic
By default, use 9:00 - 17:00 as business hours. Adding `BusinessHour` will increment timestamp by hourly on the days belong to `BusinessDay` offset.
```
bh = pd.offsets.BusinessHour()
repr(bh)
# <BusinessHour: BH=09:00-17:00>
pd.Timestamp('2014-08-01 10:00') + bh
#2014-08-01 11:00:00
pd.Timestamp('2014-08-01 08:00') + bh
#2014-08-01 10:00:00
# move to next Business Day
pd.Timestamp('2014-08-01 19:00') + bh
#2014-08-04 10:00:00
# if exceeds the closing time, remaining are added to next day
pd.Timestamp('2014-08-01 16:45') + bh
#2014-08-04 09:45:00
# if n != 1
pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2)
#2014-08-01 12:00:00
pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3)
#2014-07-31 15:00:00
# Create Index
pd.date_range('2014-07-08 10:00', freq='BH', periods=40)
# [2014-07-08 10:00:00, ..., 2014-07-14 17:00:00]
# Length: 40, Freq: BH, Timezone: None
```
### Specify Opening/Closing Hour
Allow to specify opening/closing time using `start` and `end` keyword by `hour:minute` string or `datetime.time`.
```
bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0))
repr(bh)
# <BusinessHour: BH=11:00-20:00>
pd.Timestamp('2014-08-01 10:00') + bh
#2014-08-01 12:00:00
pd.Timestamp('2014-08-01 13:00') + bh
#2014-08-01 14:00:00
pd.Timestamp('2014-08-01 19:00') + bh
#2014-08-04 11:00:00
# if end < start, it will be midnight business hour
bh = pd.offsets.BusinessHour(start='17:00', end='9:00')
repr(bh)
# <BusinessHour: BH=17:00-09:00>
pd.Timestamp('2014-07-31 10:00') + bh
#2014-07-31 18:00:00
pd.Timestamp('2014-07-31 23:00') + bh
#2014-08-01 00:00:00
pd.Timestamp('2014-07-31 01:00') + bh
#2014-07-31 02:00:00
pd.Timestamp('2014-08-01 13:00') + bh
#2014-08-01 18:00:00
```
### Edge cases
`onOffset` should include both edges.
```
pd.offsets.BusinessHour().onOffset(pd.Timestamp('2014-08-01 09:00'))
# True
pd.offsets.BusinessHour().onOffset(pd.Timestamp('2014-08-01 17:00'))
# True
```
If result is on the end-edge of business hour, move to next
```
# not 2014-08-01 17:00
pd.Timestamp('2014-08-01 16:00') + pd.offsets.BusinessHour()
#2014-08-04 09:00
# not 2014-08-01 09:00
pd.Timestamp('2014-08-01 10:00') - pd.offsets.BusinessHour()
#2014-07:31 17:00
```
In case of midnight business hour, distinguish the date by its opening hour
```
#2014-08-02 is Saturday, but handled as valid because its business hour starts on 08-01
pd.Timestamp('2014-08-02 01:00') + pd.offsets.BusinessHour(start='17:00', end='9:00')
#2014-08:02 02:00
```
### Remainings:
- [x] doc
- impl & tests
- [x] Index creation
- [x] Confirm better conditions whether inferred_freq results in `BH` or `H`
- [x] `normalize`
- ~~tsplot~~ (This can't be done without #5148, skipped)
- [x] Correct edge handling for `rollback` and `rollforward`
- [x] handling `nanosecond` (minor issue, but current `apply_wraps` may add preserved `nanosecond` after the closing time)
- tests
- [x] `n` larger than business shours
- [x] Specifying `start` and `end`
- [x] midnight business hours
- [x] Error cases for initialization
- [x] Better fix for `test_apply_out_of_range` timezone check, which fails because of DST difference.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7905 | 2014-08-02T14:56:55Z | 2015-05-05T22:27:56Z | 2015-05-05T22:27:56Z | 2015-05-06T17:38:09Z |
TST/BUG: Rename html encoding test files. | diff --git a/pandas/io/tests/data/html_encoding/chinese_utf16.html b/pandas/io/tests/data/html_encoding/chinese_utf-16.html
similarity index 100%
rename from pandas/io/tests/data/html_encoding/chinese_utf16.html
rename to pandas/io/tests/data/html_encoding/chinese_utf-16.html
diff --git a/pandas/io/tests/data/html_encoding/chinese_utf32.html b/pandas/io/tests/data/html_encoding/chinese_utf-32.html
similarity index 100%
rename from pandas/io/tests/data/html_encoding/chinese_utf32.html
rename to pandas/io/tests/data/html_encoding/chinese_utf-32.html
diff --git a/pandas/io/tests/data/html_encoding/chinese_utf8.html b/pandas/io/tests/data/html_encoding/chinese_utf-8.html
similarity index 100%
rename from pandas/io/tests/data/html_encoding/chinese_utf8.html
rename to pandas/io/tests/data/html_encoding/chinese_utf-8.html
| html test_encode fails on OSX 10.9.4 due to missing dash in `utf32` encoding
string.
The test encoding string is derived from a split of the test file name.
Only utf32 is affected but this commit changes the utf8 and utf16 containing filenames as well for the sake of consistency.
``` ======================================================================
ERROR: test_encode (pandas.io.tests.test_html.TestReadHtmlEncodingLxml)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jmorris/Code/pandas/pandas/io/tests/test_html.py", line 624, in test_encode
from_string = self.read_string(f, encoding).pop()
File "/Users/jmorris/Code/pandas/pandas/io/tests/test_html.py", line 619, in read_string
return self.read_html(fobj.read(), encoding=encoding, index_col=0)
File "/Users/jmorris/Code/pandas/pandas/io/tests/test_html.py", line 607, in read_html
return read_html(*args, **kwargs)
File "/Users/jmorris/Code/pandas/pandas/io/html.py", line 851, in read_html
parse_dates, tupleize_cols, thousands, attrs, encoding)
File "/Users/jmorris/Code/pandas/pandas/io/html.py", line 714, in _parse
raise_with_traceback(retained)
File "/Users/jmorris/Code/pandas/pandas/io/html.py", line 708, in _parse
tables = p.parse_tables()
File "/Users/jmorris/Code/pandas/pandas/io/html.py", line 178, in parse_tables
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
File "/Users/jmorris/Code/pandas/pandas/io/html.py", line 527, in _build_doc
parser = HTMLParser(recover=False, encoding=self.encoding)
File "/Users/jmorris/anaconda/envs/py27/lib/python2.7/site-packages/lxml/html/__init__.py", line 1664, in __init__
super(HTMLParser, self).__init__(**kwargs)
File "parser.pxi", line 1598, in lxml.etree.HTMLParser.__init__ (src/lxml/lxml.etree.c:100669)
File "parser.pxi", line 792, in lxml.etree._BaseParser.__init__ (src/lxml/lxml.etree.c:93393)
LookupError: unknown encoding: 'utf32'
```
Test failure was not resolved by building the most recent lxml with static dependencies and most recent versions of libxml2 and libxslt.
I contacted the lxml mailing list.
http://mailman-mail5.webfaction.com/pipermail/lxml/2014-July/007239.html
It was suggested that problem may be in OSX libiconv though `iconv -l`doesn't show the bug on my system.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7903 | 2014-08-02T09:27:50Z | 2014-08-02T12:59:20Z | 2014-08-02T12:59:20Z | 2014-08-02T22:13:59Z |
BUG: do not assume 0 is in index of potential Series | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index aa6f1ce28a90d..34e9c7144607a 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -327,6 +327,7 @@ Bug Fixes
date-likes incorrectly (:issue:`7762`, :issue:`7032`).
+- Bug in ``Series.str.cat`` with an index which was filtered as to not include the first item (:issue:`7857`)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3e730942ffc0e..2d8b8f8b2edff 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -12,7 +12,8 @@
def _get_array_list(arr, others):
- if len(others) and isinstance(others[0], (list, np.ndarray)):
+ if len(others) and isinstance(_values_from_object(others)[0],
+ (list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 971d7acf73027..41594a1655d18 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -16,7 +16,7 @@
from pandas.compat import range, lrange, u
import pandas.compat as compat
from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
- bdate_range, date_range)
+ bdate_range, date_range, MultiIndex)
import pandas.core.common as com
from pandas.util.testing import assert_series_equal, assert_almost_equal
@@ -1193,6 +1193,23 @@ def test_encode_decode_errors(self):
tm.assert_series_equal(result, exp)
+ def test_cat_on_filtered_index(self):
+ df = DataFrame(index=MultiIndex.from_product([[2011, 2012], [1,2,3]],
+ names=['year', 'month']))
+
+ df = df.reset_index()
+ df = df[df.month > 1]
+
+ str_year = df.year.astype('str')
+ str_month = df.month.astype('str')
+ str_both = str_year.str.cat(str_month, sep=' ')
+
+ self.assertEqual(str_both.loc[1], '2011 2')
+
+ str_multiple = str_year.str.cat([str_month, str_month], sep=' ')
+
+ self.assertEqual(str_multiple.loc[1], '2011 2 2')
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #7857... this time it checks also for a Series (and it passes all tests).
| https://api.github.com/repos/pandas-dev/pandas/pulls/7902 | 2014-08-02T07:21:11Z | 2014-08-02T12:53:36Z | 2014-08-02T12:53:36Z | 2014-08-02T13:06:56Z |
NotImplementedError messages doc update #7872 | diff --git a/pandas/core/base.py b/pandas/core/base.py
index cc676b9682277..6c0a0c26b6457 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -19,7 +19,7 @@ class StringMixin(object):
# Formatting
def __unicode__(self):
- raise NotImplementedError
+ raise NotImplementedError("StringMixin __unicode__ format")
def __str__(self):
"""
@@ -382,7 +382,7 @@ def _box_func(self):
"""
box function to get object from internal representation
"""
- raise NotImplementedError
+ raise NotImplementedError("Box function to get object from internal representation")
def _box_values(self, values):
"""
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 92d60ae8d8847..bad9a03942277 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -39,6 +39,11 @@ class SettingWithCopyWarning(Warning):
class AmbiguousIndexError(PandasError, KeyError):
pass
+class AbstractMethodError(NotImplementedError):
+ def __init__(self,m):
+ self.message = m
+ def __str__(self):
+ return "This method must be defined on the concrete class - "+self.message
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8',
@@ -134,7 +139,7 @@ def _isnull_new(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
- raise NotImplementedError("isnull is not defined for MultiIndex")
+ raise AbstractMethodError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
@@ -160,7 +165,7 @@ def _isnull_old(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
- raise NotImplementedError("isnull is not defined for MultiIndex")
+ raise AbstractMethodError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 756de479a471a..3d4a90ed82592 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -135,7 +135,7 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
@property
def _constructor(self):
- raise NotImplementedError
+ raise NotImplementedError("NDFrame _constructor")
def __unicode__(self):
# unicode representation based upon iterating over self
@@ -150,7 +150,7 @@ def _local_dir(self):
@property
def _constructor_sliced(self):
- raise NotImplementedError
+ raise NotImplementedError("NDFrame _constructor_sliced")
#----------------------------------------------------------------------
# Axis
@@ -1073,7 +1073,7 @@ def _iget_item_cache(self, item):
return lower
def _box_item_values(self, key, values):
- raise NotImplementedError
+ raise NotImplementedError("NDFrame _box_item_values")
def _maybe_cache_changed(self, item, value):
"""
@@ -1653,7 +1653,7 @@ def _needs_reindex_multi(self, axes, method, level):
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
- return NotImplemented
+ raise NotImplementedError("NDFrame _reindex_multi")
_shared_docs['reindex_axis'] = (
"""Conform input object to new index with optional filling logic,
@@ -2179,7 +2179,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
raise ValueError('must specify a fill method or value')
if self._is_mixed_type and axis == 1:
if inplace:
- raise NotImplementedError()
+ raise NotImplementedError("fillna with inplace=True and _is_mixed_type=True and axis=1")
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
@@ -2880,7 +2880,7 @@ def first(self, offset):
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for DatatimeIndex instance only")
if len(self.index) == 0:
return self
@@ -2914,7 +2914,7 @@ def last(self, offset):
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for DatatimeIndex instance only")
if len(self.index) == 0:
return self
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index c2debb9bfe1c0..abba9f352da44 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -283,7 +283,7 @@ def _set_grouper(self, obj, sort=False):
return self.grouper
def _get_binner_for_grouping(self, obj):
- raise NotImplementedError
+ raise NotImplementedError("Binner for grouping")
@property
def groups(self):
@@ -644,7 +644,7 @@ def _python_apply_general(self, f):
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("Groupby aggregrate")
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
@@ -654,7 +654,7 @@ def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("Groupby transform")
def mean(self):
"""
@@ -1041,7 +1041,7 @@ def _python_agg_general(self, func, *args, **kwargs):
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("Groupby wrap applied output")
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
@@ -1404,7 +1404,7 @@ def aggregate(self, values, how, axis=0):
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
- raise NotImplementedError
+ raise NotImplementedError("BaseGrouper aggregate for arity > 1")
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
@@ -1459,7 +1459,7 @@ def _aggregate(self, result, counts, values, how, is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
- raise NotImplementedError
+ raise NotImplementedError("BaseGrouper aggregrate for > 3 dimensions")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
@@ -1695,7 +1695,7 @@ def _aggregate(self, result, counts, values, how, is_numeric=True):
if values.ndim > 3:
# punting for now
- raise NotImplementedError
+ raise NotImplementedError("BinGrouper aggregate for > 3 dimensions")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
@@ -2399,7 +2399,7 @@ def aggregate(self, arg, *args, **kwargs):
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
- raise NotImplementedError
+ raise NotImplementedError("aggregate not implemented for subset being a Dataframe")
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
@@ -2459,7 +2459,7 @@ def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for axis = 0")
obj = self._obj_with_exclusions
@@ -2509,7 +2509,7 @@ def _aggregate_generic(self, func, *args, **kwargs):
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
- raise NotImplementedError
+ raise NotImplementedError("NDFrameGroupBy wrap aggregated output")
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
@@ -3050,7 +3050,7 @@ def _iterate_slices(self):
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for axis = 0")
for val in slice_axis:
if val in self.exclusions:
@@ -3115,10 +3115,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for axis>0")
def _wrap_aggregated_output(self, output, names=None):
- raise NotImplementedError
+ raise NotImplementedError("PanelGroupBy _wrap_aggregated_output")
class NDArrayGroupBy(GroupBy):
@@ -3172,7 +3172,7 @@ def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
- raise NotImplementedError
+ raise NotImplementedError("DataSplitter apply function")
class ArraySplitter(DataSplitter):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 280c4073b0f94..860422c058d77 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -7,7 +7,7 @@
import pandas.core.common as com
from pandas.core.common import (_is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
- ABCSeries, ABCDataFrame, ABCPanel, is_float)
+ ABCSeries, ABCDataFrame, ABCPanel, is_float, AbstractMethodError)
import pandas.lib as lib
import numpy as np
@@ -55,7 +55,7 @@ def __call__(self, *args, **kwargs):
return self
def __iter__(self):
- raise NotImplementedError('ix is not iterable')
+ raise AbstractMethodError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
@@ -120,7 +120,7 @@ def __setitem__(self, key, value):
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
- raise NotImplementedError()
+ raise AbstractMethodError("Valid type checking for _NDFrameIndexer is not implemented")
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
@@ -644,7 +644,7 @@ def _align_frame(self, indexer, df):
def _align_panel(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
- raise NotImplementedError("cannot set using an indexer with a Panel "
+ raise AbstractMethodError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
@@ -1141,7 +1141,7 @@ def __getitem__(self, key):
return self._getitem_axis(key, axis=0)
def _getitem_axis(self, key, axis=0, validate_iterable=False):
- raise NotImplementedError()
+ raise AbstractMethodError("Get item along given axis in _LocationIndexer is not implemented")
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
@@ -1299,7 +1299,7 @@ def _has_valid_type(self, key, axis):
if com._is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
- raise NotImplementedError(
+ raise AbstractMethodError(
"iLocation based boolean indexing on an integer type "
"is not available"
)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 8100b98d6e42d..884dab11e8808 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -243,7 +243,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
- raise NotImplementedError
+ raise NotImplementedError("fillna function not implemented for more than 2 dimensions")
mask[mask.cumsum(self.ndim-1)>limit]=False
value = self._try_fill(value)
@@ -363,10 +363,10 @@ def convert(self, copy=True, **kwargs):
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
- raise NotImplementedError()
+ raise NotImplementedError("Block _can_hold_element function")
def _try_cast(self, value):
- raise NotImplementedError()
+ raise NotImplementedError("Block _try_cast")
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
@@ -1519,7 +1519,7 @@ def fillna(self, value, limit=None,
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
- raise NotImplementedError
+ raise NotImplementedError("fillna function not implemented for more than 2 dimensions")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
@@ -1741,7 +1741,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
- raise NotImplementedError
+ raise NotImplementedError("fillna currently implemented only for limit=None")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index e9f8893355f2d..1cd867a5233ba 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -595,10 +595,10 @@ def conform(self, frame, axis='items'):
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
- raise NotImplementedError
+ raise NotImplementedError("Returns the head content of a Panel")
def tail(self, n=5):
- raise NotImplementedError
+ raise NotImplementedError("Returns the tail content of a Panel")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
index 3eebd51190e3d..73ba07f845e87 100644
--- a/pandas/core/panelnd.py
+++ b/pandas/core/panelnd.py
@@ -99,7 +99,7 @@ def _combine_with_constructor(self, other, func):
for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter',
'dropna', 'shift']:
def func(self, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("%s is not implemented" % f)
setattr(klass, f, func)
# add the aggregate operations
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 78b049f4bf8d5..2ed3f9c854817 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -156,7 +156,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
- raise NotImplementedError
+ raise NotImplementedError("Series initialization currently is not implemented for MultiIndex")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index ad64d2bf6bdd9..0591131f986b6 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -680,7 +680,7 @@ def str_slice_replace(arr, start=None, stop=None, repl=None):
-------
replaced : array
"""
- raise NotImplementedError
+ raise NotImplementedError("String slice replace not implemented")
def str_strip(arr, to_strip=None):
@@ -985,7 +985,7 @@ def slice(self, start=None, stop=None, step=1):
@copy(str_slice)
def slice_replace(self, i=None, j=None):
- raise NotImplementedError
+ raise NotImplementedError("String slice replace is not implemented.")
@copy(str_decode)
def decode(self, encoding, errors="strict"):
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 3dcdbf207fb3f..ab1bea2abb22c 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -234,10 +234,10 @@ cdef class IndexEngine:
self._ensure_mapping_populated()
def _call_monotonic(self, values):
- raise NotImplementedError
+ raise NotImplementedError("IndexEngine _call_monotonic is not implemented")
cdef _make_hash_table(self, n):
- raise NotImplementedError
+ raise NotImplementedError("IndexEngine _make_hash_table is not implemented")
cdef _check_type(self, object val):
hash(val)
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
index f002994888932..9ee6a64c1affc 100644
--- a/pandas/io/ga.py
+++ b/pandas/io/ga.py
@@ -240,7 +240,7 @@ def get_profile(self, account_id=None, web_property_id=None, name=None,
return _get_match(profiles, name, id, **kwargs)
def create_query(self, *args, **kwargs):
- raise NotImplementedError()
+ raise NotImplementedError("Create query in GDataReader is not implemented")
@Substitution(extras='')
@Appender(_GA_READER_DOC)
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 5ea6ca36ac764..39da36f84a3aa 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -215,7 +215,7 @@ def _text_getter(self, obj):
text : str or unicode
The text from an individual DOM node.
"""
- raise NotImplementedError
+ raise NotImplementedError("Text from an individual DOM node")
def _parse_td(self, obj):
"""Return the td elements from a row element.
@@ -229,7 +229,7 @@ def _parse_td(self, obj):
columns : list of node-like
These are the elements of each row, i.e., the columns.
"""
- raise NotImplementedError
+ raise NotImplementedError("Elements of each row, i.e the columns")
def _parse_tables(self, doc, match, attrs):
"""Return all tables from the parsed DOM.
@@ -256,7 +256,7 @@ def _parse_tables(self, doc, match, attrs):
tables : list of node-like
A list of <table> elements to be parsed into raw data.
"""
- raise NotImplementedError
+ raise NotImplementedError("All tables from the parsed DOM")
def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
@@ -271,7 +271,7 @@ def _parse_tr(self, table):
rows : list of node-like
A list row elements of a table, usually <tr> or <th> elements.
"""
- raise NotImplementedError
+ raise NotImplementedError("List of row elements from parsed table element")
def _parse_thead(self, table):
"""Return the header of a table.
@@ -286,7 +286,7 @@ def _parse_thead(self, table):
thead : node-like
A <thead>...</thead> element.
"""
- raise NotImplementedError
+ raise NotImplementedError("Header of HTML table")
def _parse_tbody(self, table):
"""Return the body of the table.
@@ -301,7 +301,7 @@ def _parse_tbody(self, table):
tbody : node-like
A <tbody>...</tbody> element.
"""
- raise NotImplementedError
+ raise NotImplementedError("Body of HTML table")
def _parse_tfoot(self, table):
"""Return the footer of the table if any.
@@ -316,7 +316,7 @@ def _parse_tfoot(self, table):
tfoot : node-like
A <tfoot>...</tfoot> element.
"""
- raise NotImplementedError
+ raise NotImplementedError("Footer of HTML table")
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
@@ -325,7 +325,7 @@ def _build_doc(self):
-------
obj : tree-like
"""
- raise NotImplementedError
+ raise NotImplementedError("Tree-like object to iterate over the DOM")
def _build_table(self, table):
header = self._parse_raw_thead(table)
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 4ed325df9a747..a0d350bf9dfc1 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -33,7 +33,7 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
- raise NotImplementedError
+ raise NotImplementedError("to_json is currently implemented for Series and Dataframe only.")
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
@@ -64,7 +64,7 @@ def __init__(self, obj, orient, date_format, double_precision,
self._format_axes()
def _format_axes(self):
- raise NotImplementedError
+ raise NotImplementedError("Writer _format_axes is not implemented")
def write(self):
return dumps(
@@ -282,7 +282,7 @@ def _convert_axes(self):
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
- raise NotImplementedError
+ raise NotImplementedError("Try to convert types in Parser is not implemented.")
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
@@ -395,7 +395,7 @@ def _try_convert_to_date(self, data):
return data, False
def _try_convert_dates(self):
- raise NotImplementedError
+ raise NotImplementedError("Try to convert dates in Parser is not implemented")
class SeriesParser(Parser):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 22fe3ef16e34d..9f4fcd863727e 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -679,7 +679,7 @@ def _make_engine(self, engine='c'):
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
- raise NotImplementedError
+ raise NotImplementedError("Failover to Python not implemented")
def read(self, nrows=None):
if nrows is not None:
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index bb6f9cee5766e..c22c5b448dd57 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -427,7 +427,7 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
- raise NotImplementedError
+ raise NotImplementedError("Currently to_sql() implemented only for Series and DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label)
@@ -983,7 +983,7 @@ def __init__(self, con, flavor, is_cursor=False):
if flavor is None:
flavor = 'sqlite'
if flavor not in ['sqlite', 'mysql']:
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for sqlite and mysql flavors only")
else:
self.flavor = flavor
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index c02a3172f4adc..feeb1cbee296d 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -52,10 +52,10 @@ class ParserTests(object):
"""
def read_csv(self, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("Read CSV in Parser Tests is not implemented")
def read_table(self, *args, **kwargs):
- raise NotImplementedError
+ raise NotImplementedError("Read Table in Parser Tests is not implemented")
def setUp(self):
import warnings
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index a34f278fc5a96..da88d0fc1203b 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -727,10 +727,10 @@ def setup_import(self):
raise nose.SkipTest('SQLAlchemy not installed')
def setup_driver(self):
- raise NotImplementedError()
+ raise NotImplementedError("SQLAlchemy Setup Driver test not implemented")
def connect(self):
- raise NotImplementedError()
+ raise NotImplementedError("SQLAlchemy Connect test not implemented")
def setup_connect(self):
try:
@@ -740,7 +740,7 @@ def setup_connect(self):
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def tearDown(self):
- raise NotImplementedError()
+ raise NotImplementedError("SQLAlchemy Tear Down test not implemented")
def test_aread_sql(self):
self._read_sql_iris()
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index bd34c7e5f02b2..46c952c58ae21 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -250,7 +250,7 @@ def to_dense(self):
return DataFrame(data, index=self.index)
def astype(self, dtype):
- raise NotImplementedError
+ raise NotImplementedError("SparseDataFrame astype not implemented")
def copy(self, deep=True):
"""
@@ -419,7 +419,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None):
new_index, new_columns = this.index, this.columns
if level is not None:
- raise NotImplementedError
+ raise NotImplementedError("Combine Frames implemented only for level=None.")
if self.empty and other.empty:
return SparseDataFrame(index=new_index).__finalize__(self)
@@ -460,9 +460,9 @@ def _combine_match_index(self, other, func, level=None, fill_value=None):
new_data = {}
if fill_value is not None:
- raise NotImplementedError
+ raise NotImplementedError("Combine match index implemented for fill_value=None only")
if level is not None:
- raise NotImplementedError
+ raise NotImplementedError("Combine match index implemented for level=None only")
new_index = self.index.union(other.index)
this = self
@@ -495,9 +495,10 @@ def _combine_match_columns(self, other, func, level=None, fill_value=None):
# possible for this to happen, which is bothersome
if fill_value is not None:
- raise NotImplementedError
+ raise NotImplementedError("Combine match columns implemented for fill_value=None only")
if level is not None:
- raise NotImplementedError
+ raise NotImplementedError("Combine match columns implemented for level=None only")
+
new_data = {}
@@ -568,10 +569,10 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None,
raise TypeError('Reindex by level not supported for sparse')
if com.notnull(fill_value):
- raise NotImplementedError
+ raise NotImplementedError("Currently reindexing for com.nonnull(fillvalue)=True is not implemented")
if limit:
- raise NotImplementedError
+ raise NotImplementedError("Currently limit=None is only implemented")
# TODO: fill value handling
sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns)
diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py
index bfc4ab9d3eb48..c75fbf2ee10fb 100644
--- a/pandas/sparse/list.py
+++ b/pandas/sparse/list.py
@@ -46,7 +46,7 @@ def __getitem__(self, i):
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
- raise NotImplementedError
+ raise NotImplementedError("set item in SparseList is not implemented")
@property
def nchunks(self):
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index 20bbc58cc908f..b1b6889e8823c 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -32,7 +32,7 @@ def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
- raise NotImplementedError
+ raise NotImplementedError("Currently Sparse Panel Axis does not support MutliIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
@@ -155,7 +155,7 @@ def _get_items(self):
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
- raise NotImplementedError
+ raise NotImplementedError("Currently Sparse Panel Axis does not support MutliIndex")
# need to create new frames dict
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index b432ddd03d17f..d17b689fb1640 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -1619,7 +1619,7 @@ def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
b = 0
if K > 1:
- raise NotImplementedError
+ raise NotImplementedError("group aggregation for K>1 is not implemented")
else:
for i in range(N):
while b < ngroups - 1 and i >= bins[b]:
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index 42ae043847ba1..6f2feb5c7679d 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -6505,7 +6505,7 @@ def group_ohlc_float64(ndarray[float64_t, ndim=2] out,
b = 0
if K > 1:
- raise NotImplementedError
+ raise NotImplementedError("group aggregation for K>1 is not implemented")
else:
for i in range(N):
while b < ngroups - 1 and i >= bins[b]:
@@ -6578,7 +6578,7 @@ def group_ohlc_float32(ndarray[float32_t, ndim=2] out,
b = 0
if K > 1:
- raise NotImplementedError
+ raise NotImplementedError("group aggregation for K>1 is not implemented")
else:
for i in range(N):
while b < ngroups - 1 and i >= bins[b]:
diff --git a/pandas/src/sparse.pyx b/pandas/src/sparse.pyx
index 579d473cae1b3..6a9855176d20b 100644
--- a/pandas/src/sparse.pyx
+++ b/pandas/src/sparse.pyx
@@ -27,7 +27,7 @@ cdef class SparseIndex:
Abstract superclass for sparse index types
'''
def __init__(self):
- raise NotImplementedError
+ raise NotImplementedError("Initialization of Supercall SparseIndex is not implemented")
cdef class IntIndex(SparseIndex):
@@ -556,7 +556,7 @@ cdef class BlockMerge(object):
self.result = self._make_merged_blocks()
cdef _make_merged_blocks(self):
- raise NotImplementedError
+ raise NotImplementedError("BlockMerge _make_merged_blocks not implemented")
cdef _set_current_indices(self, int32_t xi, int32_t yi, bint mode):
if mode == 0:
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 37a982acc0bbd..5b29fc8a7b7fc 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -127,7 +127,7 @@ def random_color(column):
colors = lmap(random_color, lrange(num_colors))
else:
- raise NotImplementedError
+ raise NotImplementedError("color_type = default/random are only implemented")
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
@@ -989,7 +989,7 @@ def _compute_plot_data(self):
self.data = numeric_data
def _make_plot(self):
- raise NotImplementedError
+ raise NotImplementedError("MPLPlot _make_plot not implemented")
def _add_table(self):
if self.table is False:
@@ -1828,7 +1828,7 @@ def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, **kwds)
else:
- raise NotImplementedError
+ raise NotImplementedError("Plot with kind = 'bar'/'barh' are only implemented")
return f
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index ff585d80af830..7ed2541af06f7 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1717,14 +1717,14 @@ def indexer_at_time(self, time, asof=False):
from dateutil.parser import parse
if asof:
- raise NotImplementedError
+ raise NotImplementedError("Currently selection of values is implemented for asof=False")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
- raise NotImplementedError
+ raise NotImplementedError("Currently datetime.time or string is supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
@@ -1756,7 +1756,7 @@ def indexer_between_time(self, start_time, end_time, include_start=True,
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
- raise NotImplementedError
+ raise NotImplementedError("Currently implemented for datetime.time and string")
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
@@ -1858,7 +1858,7 @@ def _generate_regular_range(start, end, periods, offset):
b = e - periods * stride
tz = end.tz
else:
- raise NotImplementedError
+ raise NotImplementedError("start and end are None, periods is not None is not currently implemented")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 059a6bfd06719..7e52683bca739 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -346,11 +346,11 @@ def _take_new_index(obj, indexer, new_index, axis=0):
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
- raise NotImplementedError
+ raise NotImplementedError("Taking new index of a Dataframe not implemented for axis=1")
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
- raise NotImplementedError
+ raise NotImplementedError("Taking new index implemented only for Series and DataFrame")
def _get_range_edges(axis, offset, closed='left', base=0):
@@ -429,7 +429,7 @@ def asfreq(obj, freq, method=None, how=None, normalize=False):
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
- raise NotImplementedError
+ raise NotImplementedError("Currently utility frequency conversion implemented for method=None")
if how is None:
how = 'E'
| closes #7872
I have updated the NonImplementedError messages #7872 in all places applicable.
It includes changes to statements of type
- raise NotImplementedError
- raise NotImplementedError()
Comment on the changes.
The one place I have doubt and not sure whether to add one or not would be https://github.com/pydata/pandas/blob/master/.coveragerc#L17
| https://api.github.com/repos/pandas-dev/pandas/pulls/7899 | 2014-08-01T19:10:43Z | 2015-04-14T14:21:12Z | null | 2015-04-14T14:21:28Z |
BUG: ewm*() interpretation of min_periods is off by one | diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index e5ba8efd25b02..b27a6ff9adbc1 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -68,7 +68,7 @@ API changes
rolling_min(s, window=10, min_periods=5)
-- :func:`ewma`, :func:`ewmastd`, :func:`ewmavar`, :func:`ewmacorr`, and :func:`ewmacov`
+- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcorr`, and :func:`ewmcov`
now have an optional ``ignore_na`` argument.
When ``ignore_na=False`` (the default), missing values are taken into account in the weights calculation.
When ``ignore_na=True`` (which reproduces the pre-0.15.0 behavior), missing values are ignored in the weights calculation.
@@ -80,6 +80,11 @@ API changes
ewma(Series([1., None, 100.]), com=2.5, ignore_na=True) # pre-0.15.0 behavior
ewma(Series([1., None, 100.]), com=2.5, ignore_na=False) # default
+- :func:`ewma`, :func:`ewmstd`, :func:`ewmvol`, :func:`ewmvar`, :func:`ewmcorr`, and :func:`ewmcov`
+ now set to ``NaN`` the first ``min_periods-1`` entries of the result (for ``min_periods>1``).
+ Previously the first ``min_periods`` entries of the result were set to ``NaN``.
+ The new behavior accords with the existing documentation. (:issue:`7884`)
+
- Bug in passing a ``DatetimeIndex`` with a timezone that was not being retained in DataFrame construction from a dict (:issue:`7822`)
In prior versions this would drop the timezone.
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 646e20acae3f9..44b8bbd0c9078 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -463,8 +463,9 @@ def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None,
def _ewma(v):
result = algos.ewma(v, com, int(adjust), int(ignore_na))
- first_index = _first_valid_index(v)
- result[first_index: first_index + min_periods] = NaN
+ if min_periods > 1:
+ first_index = _first_valid_index(v)
+ result[first_index: first_index + min_periods - 1] = NaN
return result
return_hook, values = _process_data_structure(arg)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 9f2dae3d7d9a3..ce7f9c8a225a8 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -629,8 +629,37 @@ def _check_ew_ndarray(self, func, preserve_nan=False):
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
-
- # ??? check something
+ s = Series(arr)
+
+ # check min_periods
+ # GH 7898
+ result = func(s, 50, min_periods=2)
+ self.assertTrue(np.isnan(result.values[:11]).all())
+ self.assertFalse(np.isnan(result.values[11:]).any())
+
+ for min_periods in (0, 1):
+ result = func(s, 50, min_periods=min_periods)
+ if func == mom.ewma:
+ self.assertTrue(np.isnan(result.values[:10]).all())
+ self.assertFalse(np.isnan(result.values[10:]).any())
+ else:
+ # ewmstd, ewmvol, ewmvar *should* require at least two values,
+ # but currently require only one, for some reason
+ self.assertTrue(np.isnan(result.values[:10]).all())
+ self.assertFalse(np.isnan(result.values[10:]).any())
+
+ # check series of length 0
+ result = func(Series([]), 50, min_periods=min_periods)
+ assert_series_equal(result, Series([]))
+
+ # check series of length 1
+ result = func(Series([1.]), 50, min_periods=min_periods)
+ if func == mom.ewma:
+ assert_series_equal(result, Series([1.]))
+ else:
+ # ewmstd, ewmvol, ewmvar *should* require at least two values,
+ # so should return NaN, but currently require one, so return 0.
+ assert_series_equal(result, Series([0.]))
# pass in ints
result2 = func(np.arange(50), span=10)
@@ -752,9 +781,32 @@ def _check_binary_ew(self, func):
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
-
- self.assertTrue(np.isnan(result.values[:15]).all())
- self.assertFalse(np.isnan(result.values[15:]).any())
+ self.assertTrue(np.isnan(result.values[:14]).all())
+ self.assertFalse(np.isnan(result.values[14:]).any())
+
+ # GH 7898
+ for min_periods in (0, 1, 2):
+ result = func(A, B, 20, min_periods=min_periods)
+ # binary functions (ewmcov, ewmcorr) *should* require at least two values
+ if (func == mom.ewmcov) and (min_periods <= 1):
+ # currenty ewmcov requires only one value, for some reason.
+ self.assertTrue(np.isnan(result.values[:10]).all())
+ self.assertFalse(np.isnan(result.values[10:]).any())
+ else:
+ self.assertTrue(np.isnan(result.values[:11]).all())
+ self.assertFalse(np.isnan(result.values[11:]).any())
+
+ # check series of length 0
+ result = func(Series([]), Series([]), 50, min_periods=min_periods)
+ assert_series_equal(result, Series([]))
+
+ # check series of length 1
+ result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
+ if (func == mom.ewmcov) and (min_periods <= 1):
+ # currenty ewmcov requires only one value, for some reason.
+ assert_series_equal(result, Series([0.]))
+ else:
+ assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
| Closes https://github.com/pydata/pandas/issues/7884.
This addresses only the `ewm*()` functions' off-by-one interpretation of `min_periods`.
It doesn't address the inconsistency between the `ewm*()` functions and the `rolling_*()` functions in the meaning of `min_periods`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7898 | 2014-08-01T19:00:22Z | 2014-08-02T13:46:41Z | 2014-08-02T13:46:41Z | 2014-09-10T00:12:07Z |
BUG: fixing ewma() for adjust=False and ignore_na=False | diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 54d71d79ae7c3..1c1d32e1d2a20 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1018,7 +1018,10 @@ def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na):
if cur == cur:
old_wt *= old_wt_factor
weighted_avg = ((old_wt * weighted_avg) + (new_wt * cur)) / (old_wt + new_wt)
- old_wt += new_wt
+ if adjust:
+ old_wt += new_wt
+ else:
+ old_wt = 1.
elif not ignore_na:
old_wt *= old_wt_factor
else:
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index a62d8178385cc..646e20acae3f9 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -107,6 +107,23 @@
:math:`c = (s - 1) / 2`
So a "20-day EWMA" would have center 9.5.
+
+When adjust is True (default), weighted averages are calculated using weights
+ (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
+
+When adjust is False, weighted averages are calculated recursively as:
+ weighted_average[0] = arg[0];
+ weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
+
+When ignore_na is False (default), weights are based on absolute positions.
+For example, the weights of x and y used in calculating the final weighted
+average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
+(1-alpha)**2 and alpha (if adjust is False).
+
+When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
+relative positions. For example, the weights of x and y used in calculating
+the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
+True), and 1-alpha and alpha (if adjust is False).
"""
_expanding_kw = """min_periods : int, default None
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 4b5bb042e1fc7..9f2dae3d7d9a3 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -551,6 +551,7 @@ def test_ewma_nan_handling(self):
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
+ s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
@@ -558,18 +559,22 @@ def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
- (s0, True, False, [np.nan, (1.0 - alpha), 1.]),
- (s0, True, True, [np.nan, (1.0 - alpha), 1.]),
- (s0, False, False, [np.nan, (1.0 - alpha), alpha]),
- (s0, False, True, [np.nan, (1.0 - alpha), alpha]),
- (s1, True, False, [(1.0 - alpha)**2, np.nan, 1.]),
- (s1, True, True, [(1.0 - alpha), np.nan, 1.]),
- (s1, False, False, [(1.0 - alpha)**2, np.nan, alpha]),
- (s1, False, True, [(1.0 - alpha), np.nan, alpha]),
- (s2, True, False, [np.nan, (1.0 - alpha)**3, np.nan, np.nan, 1., np.nan]),
- (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1., np.nan]),
- (s2, False, False, [np.nan, (1.0 - alpha)**3, np.nan, np.nan, alpha, np.nan]),
- (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]),
+ (s0, True, False, [np.nan, (1. - alpha), 1.]),
+ (s0, True, True, [np.nan, (1. - alpha), 1.]),
+ (s0, False, False, [np.nan, (1. - alpha), alpha]),
+ (s0, False, True, [np.nan, (1. - alpha), alpha]),
+ (s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
+ (s1, True, True, [(1. - alpha), np.nan, 1.]),
+ (s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
+ (s1, False, True, [(1. - alpha), np.nan, alpha]),
+ (s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
+ (s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
+ (s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
+ (s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
+ (s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
+ (s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
+ (s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
+ (s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
| The previous code in https://github.com/pydata/pandas/pull/7603 wasn't quite right for the case that both `adjust=False` and `ignore_na=False`. This PR fixes the handling of that case. Note that `ignore_na=False` behavior is new in v0.15.0, so this bug and its fix don't affect any prior behavior.
| https://api.github.com/repos/pandas-dev/pandas/pulls/7896 | 2014-08-01T16:37:34Z | 2014-08-01T18:30:44Z | 2014-08-01T18:30:44Z | 2014-09-10T00:12:18Z |
API: add 'level' kwarg to 'Index.isin' method | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 023c200e271ab..39635cb0e612f 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -582,7 +582,7 @@ and :ref:`Advanced Indexing <indexing.advanced>` you may select along more than
.. _indexing.basics.indexing_isin:
Indexing with isin
-~~~~~~~~~~~~~~~~~~
+------------------
Consider the ``isin`` method of Series, which returns a boolean vector that is
true wherever the Series elements exist in the passed list. This allows you to
@@ -591,13 +591,30 @@ select rows where one or more columns have values you want:
.. ipython:: python
s = Series(np.arange(5),index=np.arange(5)[::-1],dtype='int64')
-
s
+ s.isin([2, 4, 6])
+ s[s.isin([2, 4, 6])]
+
+The same method is available for ``Index`` objects and is useful for the cases
+when you don't know which of the sought labels are in fact present:
- s.isin([2, 4])
+.. ipython:: python
+
+ s[s.index.isin([2, 4, 6])]
- s[s.isin([2, 4])]
+ # compare it to the following
+ s[[2, 4, 6]]
+In addition to that, ``MultiIndex`` allows selecting a separate level to use
+in the membership check:
+
+.. ipython:: python
+
+ s_mi = Series(np.arange(6),
+ index=pd.MultiIndex.from_product([[0, 1], ['a', 'b', 'c']]))
+ s_mi
+ s_mi.iloc[s_mi.index.isin([(1, 'a'), (2, 'b'), (0, 'c')])]
+ s_mi.iloc[s_mi.index.isin(['a', 'c', 'e'], level=1)]
DataFrame also has an ``isin`` method. When calling ``isin``, pass a set of
values as either an array or dict. If values is an array, ``isin`` returns
@@ -1622,12 +1639,6 @@ with duplicates dropped.
idx1.sym_diff(idx2)
idx1 ^ idx2
-The ``isin`` method of Index objects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-One additional operation is the ``isin`` method that works analogously to the
-``Series.isin`` method found :ref:`here <indexing.boolean>`.
-
.. _indexing.hierarchical:
Hierarchical indexing (MultiIndex)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt
index 109ed8b286c22..0f9633bdb908b 100644
--- a/doc/source/v0.15.0.txt
+++ b/doc/source/v0.15.0.txt
@@ -129,6 +129,19 @@ API changes
strings must contain 244 or fewer characters. Attempting to write Stata
dta files with strings longer than 244 characters raises a ``ValueError``. (:issue:`7858`)
+- ``Index.isin`` now supports a ``level`` argument to specify which index level
+ to use for membership tests (:issue:`7892`, :issue:`7890`)
+
+ .. code-block:: python
+
+ In [1]: idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b', 'c']])
+
+ In [2]: idx.values
+ Out[2]: array([(0, 'a'), (0, 'b'), (0, 'c'), (1, 'a'), (1, 'b'), (1, 'c')], dtype=object)
+
+ In [3]: idx.isin(['a', 'c', 'e'], level=1)
+ Out[3]: array([ True, False, True, True, False, True], dtype=bool)
+
.. _whatsnew_0150.cat:
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 263e6db8c486a..94bc48d0f4342 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -12,7 +12,7 @@
import pandas.index as _index
from pandas.lib import Timestamp, is_datetime_array
from pandas.core.base import FrozenList, FrozenNDArray, IndexOpsMixin
-from pandas.util.decorators import cache_readonly, deprecate
+from pandas.util.decorators import cache_readonly, deprecate, Appender
from pandas.core.common import isnull, array_equivalent
import pandas.core.common as com
from pandas.core.common import (_values_from_object, is_float, is_integer,
@@ -687,13 +687,29 @@ def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self.values, len(self))
+ def _validate_index_level(self, level):
+ """
+ Validate index level.
+
+ For single-level Index getting level number is a no-op, but some
+ verification must be done like in MultiIndex.
+
+ """
+ if isinstance(level, int):
+ if level < 0 and level != -1:
+ raise IndexError("Too many levels: Index has only 1 level,"
+ " %d is not a valid level number" % (level,))
+ elif level > 0:
+ raise IndexError("Too many levels:"
+ " Index has only 1 level, not %d" %
+ (level + 1))
+ elif level != self.name:
+ raise KeyError('Level %s must be same as name (%s)'
+ % (level, self.name))
+
def _get_level_number(self, level):
- if not isinstance(level, int):
- if level != self.name:
- raise AssertionError('Level %s must be same as name (%s)'
- % (level, self.name))
- level = 0
- return level
+ self._validate_index_level(level)
+ return 0
@cache_readonly
def inferred_type(self):
@@ -1271,7 +1287,7 @@ def get_level_values(self, level):
values : ndarray
"""
# checks that level number is actually just 1
- self._get_level_number(level)
+ self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None):
@@ -1370,7 +1386,7 @@ def groupby(self, to_groupby):
def map(self, mapper):
return self._arrmap(self.values, mapper)
- def isin(self, values):
+ def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values
@@ -1378,12 +1394,26 @@ def isin(self, values):
Parameters
----------
values : set or sequence of values
+ Sought values.
+ level : str or int, optional
+ Name or position of the index level to use (if the index is a
+ MultiIndex).
+
+ Notes
+ -----
+ If `level` is specified:
+
+ - if it is the name of one *and only one* index level, use that level;
+ - otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
+
"""
value_set = set(values)
+ if level is not None:
+ self._validate_index_level(level)
return lib.ismember(self._array_values(), value_set)
def _array_values(self):
@@ -2149,20 +2179,11 @@ def hasnans(self):
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
- def isin(self, values):
- """
- Compute boolean array of whether each index value is found in the
- passed set of values
-
- Parameters
- ----------
- values : set or sequence of values
-
- Returns
- -------
- is_contained : ndarray (boolean dtype)
- """
+ @Appender(Index.isin.__doc__)
+ def isin(self, values, level=None):
value_set = set(values)
+ if level is not None:
+ self._validate_index_level(level)
return lib.ismember_nans(self._array_values(), value_set,
isnull(list(value_set)).any())
@@ -4052,6 +4073,21 @@ def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
+ @Appender(Index.isin.__doc__)
+ def isin(self, values, level=None):
+ if level is None:
+ return lib.ismember(self._array_values(), set(values))
+ else:
+ num = self._get_level_number(level)
+ levs = self.levels[num]
+ labs = self.labels[num]
+
+ sought_labels = levs.isin(values).nonzero()[0]
+ if levs.size == 0:
+ return np.zeros(len(labs), dtype=np.bool_)
+ else:
+ return np.lib.arraysetops.in1d(labs, sought_labels)
+
# For utility purposes
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 8b1f6ce3e7f45..c32c7ddc55ced 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -840,7 +840,7 @@ def test_get_set_value(self):
self.assertEqual(values[67], 10)
def test_isin(self):
- values = ['foo', 'bar']
+ values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
@@ -853,6 +853,49 @@ def test_isin(self):
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
+ def test_isin_nan(self):
+ self.assert_numpy_array_equal(
+ Index(['a', np.nan]).isin([np.nan]), [False, True])
+ self.assert_numpy_array_equal(
+ Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
+ self.assert_numpy_array_equal(
+ Index(['a', np.nan]).isin([float('nan')]), [False, False])
+ self.assert_numpy_array_equal(
+ Index(['a', np.nan]).isin([pd.NaT]), [False, False])
+ # Float64Index overrides isin, so must be checked separately
+ self.assert_numpy_array_equal(
+ Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
+ self.assert_numpy_array_equal(
+ Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
+ self.assert_numpy_array_equal(
+ Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
+
+ def test_isin_level_kwarg(self):
+ def check_idx(idx):
+ values = idx.tolist()[-2:] + ['nonexisting']
+
+ expected = np.array([False, False, True, True])
+ self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
+ self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
+
+ self.assertRaises(IndexError, idx.isin, values, level=1)
+ self.assertRaises(IndexError, idx.isin, values, level=10)
+ self.assertRaises(IndexError, idx.isin, values, level=-2)
+
+ self.assertRaises(KeyError, idx.isin, values, level=1.0)
+ self.assertRaises(KeyError, idx.isin, values, level='foobar')
+
+ idx.name = 'foobar'
+ self.assert_numpy_array_equal(expected,
+ idx.isin(values, level='foobar'))
+
+ self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
+ self.assertRaises(KeyError, idx.isin, values, level=np.nan)
+
+ check_idx(Index(['qux', 'baz', 'foo', 'bar']))
+ # Float64Index overrides isin, so must be checked separately
+ check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
+
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
@@ -2948,6 +2991,55 @@ def test_level_setting_resets_attributes(self):
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
+ def test_isin(self):
+ values = [('foo', 2), ('bar', 3), ('quux', 4)]
+
+ idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'],
+ np.arange(4)])
+ result = idx.isin(values)
+ expected = np.array([False, False, True, True])
+ self.assert_numpy_array_equal(result, expected)
+
+ # empty, return dtype bool
+ idx = MultiIndex.from_arrays([[], []])
+ result = idx.isin(values)
+ self.assertEqual(len(result), 0)
+ self.assertEqual(result.dtype, np.bool_)
+
+ def test_isin_nan(self):
+ idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
+ self.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
+ [False, False])
+ self.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
+ [False, False])
+
+ def test_isin_level_kwarg(self):
+ idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'],
+ np.arange(4)])
+
+ vals_0 = ['foo', 'bar', 'quux']
+ vals_1 = [2, 3, 10]
+
+ expected = np.array([False, False, True, True])
+ self.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
+ self.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
+
+ self.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
+ self.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
+
+ self.assertRaises(IndexError, idx.isin, vals_0, level=5)
+ self.assertRaises(IndexError, idx.isin, vals_0, level=-5)
+
+ self.assertRaises(KeyError, idx.isin, vals_0, level=1.0)
+ self.assertRaises(KeyError, idx.isin, vals_1, level=-1.0)
+ self.assertRaises(KeyError, idx.isin, vals_1, level='A')
+
+ idx.names = ['A', 'B']
+ self.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
+ self.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
+
+ self.assertRaises(KeyError, idx.isin, vals_1, level='C')
+
def test_get_combined_index():
from pandas.core.index import _get_combined_index
| closes #7890
This PR adds `level` kwarg for `Index` objects as discussed (briefly in #7890):
``` python
Index.isin(iterable, level=None)
```
### Summary
- for non-`MultiIndex` classes:
- no functional changes in the actual membership test
- valid `level` values are `None`, `0`, `-1` and `self.name`
- if `level` doesn't match the name, it's a `KeyError` (was `AssertionError` before)
- for `MultiIndex` classes:
- if `level=None`, elements of `self.values` (tuples) are used
- otherwise elements of corresponding level are used
- valid `level` values are the same as in `MultiIndex.get_level_values` method, `-self.nlevels..(self.nlevels - 1)` plus all unique index names
### TODO
- [x] add to documentation
- [x] optimize MultiIndex case
| https://api.github.com/repos/pandas-dev/pandas/pulls/7892 | 2014-07-31T20:01:01Z | 2014-08-04T20:26:24Z | 2014-08-04T20:26:24Z | 2014-11-02T21:35:14Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.