title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
COMPAT: Warnings
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 196f4b2679576..f44fa347cb053 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -3,6 +3,7 @@ """Top level ``eval`` module. """ +import warnings import tokenize from pandas.io.formats.printing import pprint_thing from pandas.core.computation.scope import _ensure_scope @@ -303,7 +304,8 @@ def eval(expr, parser='pandas', engine=None, truediv=True, "if there is no assignment") # assign if needed - if env.target is not None and parsed_expr.assigner is not None: + assigner = parsed_expr.assigner + if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment @@ -317,22 +319,25 @@ def eval(expr, parser='pandas', engine=None, truediv=True, # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. + # we will ignore numpy warnings here; e.g. if trying + # to use a non-numeric indexer try: - target[parsed_expr.assigner] = ret + with warnings.catch_warnings(record=True): + target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: - resolvers = ({parsed_expr.assigner: ret},) + resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: - if parsed_expr.assigner in resolver: - resolver[parsed_expr.assigner] = ret + if assigner in resolver: + resolver[assigner] = ret break else: - resolvers += ({parsed_expr.assigner: ret},) + resolvers += ({assigner: ret},) ret = None first_expr = False diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 8db75accc84e5..bf2b598d4ff9e 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1913,7 +1913,10 @@ def size(self): """ ids, _, ngroup = self.group_info ids = _ensure_platform_int(ids) - out = np.bincount(ids[ids != -1], minlength=ngroup or None) + if ngroup: + out = np.bincount(ids[ids != -1], minlength=ngroup) + else: + out = ids return Series(out, index=self.result_index, dtype='int64') diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index be39f4baba0fb..32bab09a0c4ac 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -764,7 +764,7 @@ def _parse_numpy(self): if orient == "columns": args = loads(json, dtype=None, numpy=True, labelled=True, precise_float=self.precise_float) - if args: + if len(args): args = (args[0].T, args[2], args[1]) self.obj = DataFrame(*args) elif orient == "split": diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 1d57093585ef2..26e39f0df8b29 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -596,6 +596,7 @@ def read(self, nrows=None): nrows = self.row_count if len(self.column_types) == 0: + self.close() raise EmptyDataError("No columns to parse from file") if self._current_row_in_file_index >= self.row_count:
closes #18180
https://api.github.com/repos/pandas-dev/pandas/pulls/18247
2017-11-12T21:02:52Z
2017-11-13T01:18:53Z
2017-11-13T01:18:53Z
2017-11-13T01:18:53Z
TST: xfail dateutil > 2.6.1 tests
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 307184cb34e27..f53688fd5c84a 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -995,6 +995,8 @@ def test_dayfirst(self, cache): class TestGuessDatetimeFormat(object): + + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") def test_guess_datetime_format_for_array(self): tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' diff --git a/pandas/tests/scalar/test_parsing.py b/pandas/tests/scalar/test_parsing.py index 6908fecbd4e05..54ae84b678a94 100644 --- a/pandas/tests/scalar/test_parsing.py +++ b/pandas/tests/scalar/test_parsing.py @@ -67,37 +67,52 @@ def test_parsers_monthfreq(self): class TestGuessDatetimeFormat(object): - def test_guess_datetime_format_with_parseable_formats(self): + + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") + @pytest.mark.parametrize( + "string, format", + [ + ('20111230', '%Y%m%d'), + ('2011-12-30', '%Y-%m-%d'), + ('30-12-2011', '%d-%m-%Y'), + ('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'), + ('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'), + ('2011-12-30 00:00:00.000000', + '%Y-%m-%d %H:%M:%S.%f')]) + def test_guess_datetime_format_with_parseable_formats( + self, string, format): tm._skip_if_not_us_locale() - dt_string_to_format = (('20111230', '%Y%m%d'), - ('2011-12-30', '%Y-%m-%d'), - ('30-12-2011', '%d-%m-%Y'), - ('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'), - ('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'), - ('2011-12-30 00:00:00.000000', - '%Y-%m-%d %H:%M:%S.%f'), ) - - for dt_string, dt_format in dt_string_to_format: - assert parsing._guess_datetime_format(dt_string) == dt_format - - def test_guess_datetime_format_with_dayfirst(self): - ambiguous_string = '01/01/2011' - assert parsing._guess_datetime_format( - ambiguous_string, dayfirst=True) == '%d/%m/%Y' - assert parsing._guess_datetime_format( - ambiguous_string, dayfirst=False) == '%m/%d/%Y' - def test_guess_datetime_format_with_locale_specific_formats(self): + result = parsing._guess_datetime_format(string) + assert result == format + + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") + @pytest.mark.parametrize( + "dayfirst, expected", + [ + (True, "%d/%m/%Y"), + (False, "%m/%d/%Y")]) + def test_guess_datetime_format_with_dayfirst(self, dayfirst, expected): + ambiguous_string = '01/01/2011' + result = parsing._guess_datetime_format( + ambiguous_string, dayfirst=dayfirst) + assert result == expected + + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") + @pytest.mark.parametrize( + "string, format", + [ + ('30/Dec/2011', '%d/%b/%Y'), + ('30/December/2011', '%d/%B/%Y'), + ('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S')]) + def test_guess_datetime_format_with_locale_specific_formats( + self, string, format): # The month names will vary depending on the locale, in which # case these wont be parsed properly (dateutil can't parse them) tm._skip_if_has_locale() - dt_string_to_format = (('30/Dec/2011', '%d/%b/%Y'), - ('30/December/2011', '%d/%B/%Y'), - ('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'), ) - - for dt_string, dt_format in dt_string_to_format: - assert parsing._guess_datetime_format(dt_string) == dt_format + result = parsing._guess_datetime_format(string) + assert result == format def test_guess_datetime_format_invalid_inputs(self): # A datetime string must include a year, month and a day for it @@ -117,17 +132,20 @@ def test_guess_datetime_format_invalid_inputs(self): for invalid_dt in invalid_dts: assert parsing._guess_datetime_format(invalid_dt) is None - def test_guess_datetime_format_nopadding(self): + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") + @pytest.mark.parametrize( + "string, format", + [ + ('2011-1-1', '%Y-%m-%d'), + ('30-1-2011', '%d-%m-%Y'), + ('1/1/2011', '%m/%d/%Y'), + ('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'), + ('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'), + ('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S')]) + def test_guess_datetime_format_nopadding(self, string, format): # GH 11142 - dt_string_to_format = (('2011-1-1', '%Y-%m-%d'), - ('30-1-2011', '%d-%m-%Y'), - ('1/1/2011', '%m/%d/%Y'), - ('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'), - ('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'), - ('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S')) - - for dt_string, dt_format in dt_string_to_format: - assert parsing._guess_datetime_format(dt_string) == dt_format + result = parsing._guess_datetime_format(string) + assert result == format class TestArrayToDatetime(object):
xref #18141
https://api.github.com/repos/pandas-dev/pandas/pulls/18240
2017-11-12T14:36:51Z
2017-11-12T16:57:52Z
2017-11-12T16:57:52Z
2017-12-11T20:23:44Z
CI: slightly more robust xfvb starting
diff --git a/.travis.yml b/.travis.yml index fe1a2950dbf08..42b4ef0396fc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,8 +102,6 @@ before_install: - uname -a - git --version - git tag - - ci/before_install_travis.sh - - export DISPLAY=":99.0" install: - echo "install start" @@ -114,6 +112,8 @@ install: before_script: - ci/install_db_travis.sh + - export DISPLAY=":99.0" + - ci/before_script_travis.sh script: - echo "script start" diff --git a/ci/before_install_travis.sh b/ci/before_script_travis.sh similarity index 93% rename from ci/before_install_travis.sh rename to ci/before_script_travis.sh index 2d0b4da6120dc..0b3939b1906a2 100755 --- a/ci/before_install_travis.sh +++ b/ci/before_script_travis.sh @@ -4,6 +4,7 @@ echo "inside $0" if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sh -e /etc/init.d/xvfb start + sleep 3 fi # Never fail because bad things happened here. diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 940a331a9de84..b5d1435c29cb7 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -18,7 +18,7 @@ try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 -except PyperclipException: +except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0
https://api.github.com/repos/pandas-dev/pandas/pulls/18239
2017-11-12T14:10:10Z
2017-11-12T16:15:03Z
2017-11-12T16:15:03Z
2017-12-11T20:24:20Z
BUG: Fix filter method so that accepts byte and unicode column names
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 7c5dc66ce4587..86dcc9dcefa09 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -115,7 +115,7 @@ Reshaping - Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) - Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) -- +- Bug in ``DataFrame.filter(...)`` when :class:`unicode` is passed as a condition in Python 2 (:issue:`13101`) - Numeric diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 8a6a979ddd7c3..a615e098135a9 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -257,6 +257,16 @@ def u(s): def u_safe(s): return s + def to_str(s): + """ + Convert bytes and non-string into Python 3 str + """ + if isinstance(s, binary_type): + s = bytes_to_str(s) + elif not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): # encoding is for compat with PY2 return len(data) @@ -302,6 +312,14 @@ def u_safe(s): except: return s + def to_str(s): + """ + Convert unicode and non-string into Python 2 str + """ + if not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): try: data = data.decode(encoding) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d93fe52d5ca9c..0a10058677fb9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -50,7 +50,7 @@ from pandas.tseries.frequencies import to_offset from pandas import compat from pandas.compat.numpy import function as nv -from pandas.compat import (map, zip, lzip, lrange, string_types, +from pandas.compat import (map, zip, lzip, lrange, string_types, to_str, isidentifier, set_function_name, cPickle as pkl) from pandas.core.ops import _align_method_FRAME import pandas.core.nanops as nanops @@ -3218,14 +3218,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): **{name: [r for r in items if r in labels]}) elif like: def f(x): - if not isinstance(x, string_types): - x = str(x) - return like in x + return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: + def f(x): + return matcher.search(to_str(x)) is not None matcher = re.compile(regex) - values = labels.map(lambda x: matcher.search(str(x)) is not None) + values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 1e2f630401c89..343e235fb741c 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -884,6 +884,27 @@ def test_filter_regex_search(self): exp = df[[x for x in df.columns if 'BB' in x]] assert_frame_equal(result, exp) + @pytest.mark.parametrize('name,expected', [ + ('a', DataFrame({u'a': [1, 2]})), + (u'a', DataFrame({u'a': [1, 2]})), + (u'あ', DataFrame({u'あ': [3, 4]})) + ]) + def test_filter_unicode(self, name, expected): + # GH13101 + df = DataFrame({u'a': [1, 2], u'あ': [3, 4]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + + @pytest.mark.parametrize('name', ['a', u'a']) + def test_filter_bytestring(self, name): + # GH13101 + df = DataFrame({b'a': [1, 2], b'b': [3, 4]}) + expected = DataFrame({b'a': [1, 2]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + def test_filter_corner(self): empty = DataFrame()
- [x] closes #13101 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18238
2017-11-12T13:41:27Z
2017-11-22T02:03:52Z
2017-11-22T02:03:51Z
2017-12-11T20:23:22Z
Implement _get_lastbday
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 87be9fa910101..c088baecffaeb 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -139,11 +139,22 @@ def apply_index_wraps(func): # --------------------------------------------------------------------- # Business Helpers -cpdef int _get_firstbday(int wkday): +cpdef int _get_lastbday(int wkday, int days_in_month): """ - wkday is the result of monthrange(year, month) + (wkday, days_in_month) is the result of monthrange(year, month) + + """ + return days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0) + + +cpdef int _get_firstbday(int wkday, int days_in_month=0): + """ + (wkday, days_in_month) is the result of monthrange(year, month) If it's a saturday or sunday, increment first business day to reflect this + + Note: `days_in_month` argument is only included to match the signature + of _get_lastbday. """ first = 1 if wkday == 5: # on Saturday diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5d1ea71d5cef5..fb8250bb16ff2 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -18,7 +18,8 @@ from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, - _get_firstbday, _get_calendar, _to_dt64, _validate_business_time, + _get_firstbday, _get_lastbday, + _get_calendar, _to_dt64, _validate_business_time, _int_to_weekday, _weekday_to_int, _determine_offset, apply_index_wraps, @@ -1184,12 +1185,11 @@ class BusinessMonthEnd(MonthOffset): def apply(self, other): n = self.n wkday, days_in_month = tslib.monthrange(other.year, other.month) - lastBDay = days_in_month - max(((wkday + days_in_month - 1) - % 7) - 4, 0) + last_bday = _get_lastbday(wkday, days_in_month) - if n > 0 and not other.day >= lastBDay: + if n > 0 and not other.day >= last_bday: n = n - 1 - elif n <= 0 and other.day > lastBDay: + elif n <= 0 and other.day > last_bday: n = n + 1 other = other + relativedelta(months=n, day=31) @@ -1197,6 +1197,12 @@ def apply(self, other): other = other - BDay() return other + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False + wkday, days_in_month = tslib.monthrange(dt.year, dt.month) + return dt.day == _get_lastbday(wkday, days_in_month) + class BusinessMonthBegin(MonthOffset): """DateOffset of one business month at beginning""" @@ -1226,13 +1232,8 @@ def apply(self, other): def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False - first_weekday, _ = tslib.monthrange(dt.year, dt.month) - if first_weekday == 5: - return dt.day == 3 - elif first_weekday == 6: - return dt.day == 2 - else: - return dt.day == 1 + wkday, days_in_month = tslib.monthrange(dt.year, dt.month) + return dt.day == _get_firstbday(wkday, days_in_month) class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): @@ -1704,16 +1705,15 @@ def apply(self, other): other.microsecond) wkday, days_in_month = tslib.monthrange(other.year, other.month) - lastBDay = days_in_month - max(((wkday + days_in_month - 1) - % 7) - 4, 0) + last_bday = _get_lastbday(wkday, days_in_month) monthsToGo = 3 - ((other.month - self.startingMonth) % 3) if monthsToGo == 3: monthsToGo = 0 - if n > 0 and not (other.day >= lastBDay and monthsToGo == 0): + if n > 0 and not (other.day >= last_bday and monthsToGo == 0): n = n - 1 - elif n <= 0 and other.day > lastBDay and monthsToGo == 0: + elif n <= 0 and other.day > last_bday and monthsToGo == 0: n = n + 1 other = other + relativedelta(months=monthsToGo + 3 * n, day=31) @@ -1876,17 +1876,16 @@ class BYearEnd(YearOffset): def apply(self, other): n = self.n wkday, days_in_month = tslib.monthrange(other.year, self.month) - lastBDay = (days_in_month - - max(((wkday + days_in_month - 1) % 7) - 4, 0)) + last_bday = _get_lastbday(wkday, days_in_month) years = n if n > 0: if (other.month < self.month or - (other.month == self.month and other.day < lastBDay)): + (other.month == self.month and other.day < last_bday)): years -= 1 elif n <= 0: if (other.month > self.month or - (other.month == self.month and other.day > lastBDay)): + (other.month == self.month and other.day > last_bday)): years += 1 other = other + relativedelta(years=years)
Orthogonal to other PRs. Implement `_get_lastbday` mirroring `_get_firstbday`. Add a dummy argument to get_firstbday so that the signatures match. Following this and #18218, we'll be able to define e.g. ``` class BQuarterBegin(...): _get_bday = _get_firstbday class BMonthEnd(...): _get_bday = _get_lastbday ``` etc. From there we can get rid of _bunch_ of duplicated logic.
https://api.github.com/repos/pandas-dev/pandas/pulls/18234
2017-11-12T04:59:40Z
2017-11-12T06:34:24Z
null
2017-12-08T19:38:49Z
Separate tick tests, use pytest parametrize
diff --git a/pandas/tests/tseries/conftest.py b/pandas/tests/tseries/conftest.py index 25446c24b28c0..fc1ecf21c5446 100644 --- a/pandas/tests/tseries/conftest.py +++ b/pandas/tests/tseries/conftest.py @@ -1,10 +1,4 @@ import pytest -import pandas.tseries.offsets as offsets - - -@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__]) -def offset_types(request): - return request.param @pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', 'US/Eastern', diff --git a/pandas/tests/tseries/offsets/__init__.py b/pandas/tests/tseries/offsets/__init__.py new file mode 100644 index 0000000000000..40a96afc6ff09 --- /dev/null +++ b/pandas/tests/tseries/offsets/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py new file mode 100644 index 0000000000000..2e8eb224bca7f --- /dev/null +++ b/pandas/tests/tseries/offsets/common.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Assertion helpers for offsets tests +""" + + +def assert_offset_equal(offset, base, expected): + actual = offset + base + actual_swapped = base + offset + actual_apply = offset.apply(base) + try: + assert actual == expected + assert actual_swapped == expected + assert actual_apply == expected + except AssertionError: + raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)" + "\nAt Date: %s" % + (expected, actual, offset, base)) + + +def assert_onOffset(offset, date, expected): + actual = offset.onOffset(date) + assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)" + "\nAt Date: %s" % + (expected, actual, offset, date)) diff --git a/pandas/tests/tseries/offsets/conftest.py b/pandas/tests/tseries/offsets/conftest.py new file mode 100644 index 0000000000000..25446c24b28c0 --- /dev/null +++ b/pandas/tests/tseries/offsets/conftest.py @@ -0,0 +1,13 @@ +import pytest +import pandas.tseries.offsets as offsets + + +@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__]) +def offset_types(request): + return request.param + + +@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']) +def tz(request): + return request.param diff --git a/pandas/tests/tseries/data/cday-0.14.1.pickle b/pandas/tests/tseries/offsets/data/cday-0.14.1.pickle similarity index 100% rename from pandas/tests/tseries/data/cday-0.14.1.pickle rename to pandas/tests/tseries/offsets/data/cday-0.14.1.pickle diff --git a/pandas/tests/tseries/data/dateoffset_0_15_2.pickle b/pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle similarity index 100% rename from pandas/tests/tseries/data/dateoffset_0_15_2.pickle rename to pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py similarity index 79% rename from pandas/tests/tseries/test_offsets.py rename to pandas/tests/tseries/offsets/test_offsets.py index 4fd3bba01602f..b123fa127e29c 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -4,7 +4,7 @@ from dateutil.relativedelta import relativedelta import pytest -from pandas.compat import range, iteritems +from pandas.compat import range from pandas import compat import numpy as np @@ -25,9 +25,9 @@ MonthBegin, SemiMonthBegin, SemiMonthEnd, BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week, YearBegin, - YearEnd, Hour, Minute, Second, Day, Micro, + YearEnd, Day, QuarterEnd, BusinessMonthEnd, FY5253, - Milli, Nano, Easter, FY5253Quarter, + Nano, Easter, FY5253Quarter, LastWeekOfMonth) from pandas.core.tools.datetimes import ( format, ole2datetime, parse_time_string, @@ -35,11 +35,13 @@ import pandas.tseries.offsets as offsets from pandas.io.pickle import read_pickle from pandas._libs.tslibs import timezones -from pandas._libs.tslib import normalize_date, NaT, Timestamp, Timedelta +from pandas._libs.tslib import normalize_date, NaT, Timestamp import pandas._libs.tslib as tslib import pandas.util.testing as tm from pandas.tseries.holiday import USFederalHolidayCalendar +from .common import assert_offset_equal, assert_onOffset + def test_monthrange(): import calendar @@ -162,51 +164,44 @@ def test_apply_out_of_range(self, tz): class TestCommon(Base): - - def setup_method(self, method): - # exected value created by Base._get_offset - # are applied to 2011/01/01 09:00 (Saturday) - # used for .apply and .rollforward - self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'), - 'DateOffset': Timestamp('2011-01-02 09:00:00'), - 'BusinessDay': Timestamp('2011-01-03 09:00:00'), - 'CustomBusinessDay': - Timestamp('2011-01-03 09:00:00'), - 'CustomBusinessMonthEnd': - Timestamp('2011-01-31 09:00:00'), - 'CustomBusinessMonthBegin': - Timestamp('2011-01-03 09:00:00'), - 'MonthBegin': Timestamp('2011-02-01 09:00:00'), - 'BusinessMonthBegin': - Timestamp('2011-01-03 09:00:00'), - 'MonthEnd': Timestamp('2011-01-31 09:00:00'), - 'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'), - 'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'), - 'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), - 'YearBegin': Timestamp('2012-01-01 09:00:00'), - 'BYearBegin': Timestamp('2011-01-03 09:00:00'), - 'YearEnd': Timestamp('2011-12-31 09:00:00'), - 'BYearEnd': Timestamp('2011-12-30 09:00:00'), - 'QuarterBegin': Timestamp('2011-03-01 09:00:00'), - 'BQuarterBegin': Timestamp('2011-03-01 09:00:00'), - 'QuarterEnd': Timestamp('2011-03-31 09:00:00'), - 'BQuarterEnd': Timestamp('2011-03-31 09:00:00'), - 'BusinessHour': Timestamp('2011-01-03 10:00:00'), - 'CustomBusinessHour': - Timestamp('2011-01-03 10:00:00'), - 'WeekOfMonth': Timestamp('2011-01-08 09:00:00'), - 'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'), - 'FY5253Quarter': Timestamp('2011-01-25 09:00:00'), - 'FY5253': Timestamp('2011-01-25 09:00:00'), - 'Week': Timestamp('2011-01-08 09:00:00'), - 'Easter': Timestamp('2011-04-24 09:00:00'), - 'Hour': Timestamp('2011-01-01 10:00:00'), - 'Minute': Timestamp('2011-01-01 09:01:00'), - 'Second': Timestamp('2011-01-01 09:00:01'), - 'Milli': Timestamp('2011-01-01 09:00:00.001000'), - 'Micro': Timestamp('2011-01-01 09:00:00.000001'), - 'Nano': Timestamp(np_datetime64_compat( - '2011-01-01T09:00:00.000000001Z'))} + # exected value created by Base._get_offset + # are applied to 2011/01/01 09:00 (Saturday) + # used for .apply and .rollforward + expecteds = {'Day': Timestamp('2011-01-02 09:00:00'), + 'DateOffset': Timestamp('2011-01-02 09:00:00'), + 'BusinessDay': Timestamp('2011-01-03 09:00:00'), + 'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'), + 'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), + 'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'), + 'MonthBegin': Timestamp('2011-02-01 09:00:00'), + 'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'), + 'MonthEnd': Timestamp('2011-01-31 09:00:00'), + 'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'), + 'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'), + 'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), + 'YearBegin': Timestamp('2012-01-01 09:00:00'), + 'BYearBegin': Timestamp('2011-01-03 09:00:00'), + 'YearEnd': Timestamp('2011-12-31 09:00:00'), + 'BYearEnd': Timestamp('2011-12-30 09:00:00'), + 'QuarterBegin': Timestamp('2011-03-01 09:00:00'), + 'BQuarterBegin': Timestamp('2011-03-01 09:00:00'), + 'QuarterEnd': Timestamp('2011-03-31 09:00:00'), + 'BQuarterEnd': Timestamp('2011-03-31 09:00:00'), + 'BusinessHour': Timestamp('2011-01-03 10:00:00'), + 'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'), + 'WeekOfMonth': Timestamp('2011-01-08 09:00:00'), + 'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'), + 'FY5253Quarter': Timestamp('2011-01-25 09:00:00'), + 'FY5253': Timestamp('2011-01-25 09:00:00'), + 'Week': Timestamp('2011-01-08 09:00:00'), + 'Easter': Timestamp('2011-04-24 09:00:00'), + 'Hour': Timestamp('2011-01-01 10:00:00'), + 'Minute': Timestamp('2011-01-01 09:01:00'), + 'Second': Timestamp('2011-01-01 09:00:01'), + 'Milli': Timestamp('2011-01-01 09:00:00.001000'), + 'Micro': Timestamp('2011-01-01 09:00:00.000001'), + 'Nano': Timestamp(np_datetime64_compat( + '2011-01-01T09:00:00.000000001Z'))} def test_return_type(self, offset_types): offset = self._get_offset(offset_types) @@ -623,7 +618,7 @@ def test_onOffset(self): (BDay(), datetime(2008, 1, 5), False)] for offset, d, expected in tests: - assertOnOffset(offset, d, expected) + assert_onOffset(offset, d, expected) def test_apply(self): tests = [] @@ -668,7 +663,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_large_n(self): dt = datetime(2012, 10, 23) @@ -1272,7 +1267,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_large_n(self): tests = [] @@ -1331,7 +1326,7 @@ def test_apply_large_n(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_nanoseconds(self): tests = [] @@ -1354,7 +1349,7 @@ def test_apply_nanoseconds(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_offsets_compare_equal(self): # root cause of #456 @@ -1628,7 +1623,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_nanoseconds(self): tests = [] @@ -1651,7 +1646,7 @@ def test_apply_nanoseconds(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) class TestCustomBusinessDay(Base): @@ -1752,7 +1747,7 @@ def test_onOffset(self): (CDay(), datetime(2008, 1, 5), False)] for offset, d, expected in tests: - assertOnOffset(offset, d, expected) + assert_onOffset(offset, d, expected) def test_apply(self): tests = [] @@ -1798,7 +1793,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_large_n(self): dt = datetime(2012, 10, 23) @@ -1870,7 +1865,7 @@ def test_weekmask_and_holidays(self): def test_calendar(self): calendar = USFederalHolidayCalendar() dt = datetime(2014, 1, 17) - assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21)) + assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21)) def test_roundtrip_pickle(self): def _check_roundtrip(obj): @@ -1997,7 +1992,7 @@ def test_onOffset(self): (CBMonthEnd(), datetime(2008, 1, 1), False)] for offset, d, expected in tests: - assertOnOffset(offset, d, expected) + assert_onOffset(offset, d, expected) def test_apply(self): cbm = CBMonthEnd() @@ -2022,7 +2017,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_large_n(self): dt = datetime(2012, 10, 23) @@ -2111,7 +2106,7 @@ def test_onOffset(self): (CBMonthBegin(), datetime(2008, 1, 31), False)] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_apply(self): cbm = CBMonthBegin() @@ -2135,7 +2130,7 @@ def test_apply(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_large_n(self): dt = datetime(2012, 10, 23) @@ -2174,13 +2169,6 @@ def test_datetimeindex(self): freq=cbmb).tolist()[0] == datetime(2012, 1, 3)) -def assertOnOffset(offset, date, expected): - actual = offset.onOffset(date) - assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)" - "\nAt Date: %s" % - (expected, actual, offset, date)) - - class TestWeek(Base): _offset = Week @@ -2231,7 +2219,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_onOffset(self): for weekday in range(7): @@ -2244,7 +2232,7 @@ def test_onOffset(self): expected = True else: expected = False - assertOnOffset(offset, date, expected) + assert_onOffset(offset, date, expected) def test_offsets_compare_equal(self): # root cause of #456 @@ -2316,7 +2304,7 @@ def test_offset(self): for n, week, weekday, dt, expected in test_cases: offset = WeekOfMonth(n, week=week, weekday=weekday) - assertEq(offset, dt, expected) + assert_offset_equal(offset, dt, expected) # try subtracting result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2) @@ -2457,7 +2445,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_onOffset(self): @@ -2467,7 +2455,7 @@ def test_onOffset(self): (BMonthBegin(), datetime(2008, 3, 3), True)] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_offsets_compare_equal(self): # root cause of #456 @@ -2515,7 +2503,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_normalize(self): dt = datetime(2007, 1, 1, 3) @@ -2530,7 +2518,7 @@ def test_onOffset(self): (BMonthEnd(), datetime(2008, 1, 1), False)] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_offsets_compare_equal(self): # root cause of #456 @@ -2577,7 +2565,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) class TestMonthEnd(Base): @@ -2619,7 +2607,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_day_of_month(self): dt = datetime(2007, 1, 1) @@ -2644,7 +2632,7 @@ def test_onOffset(self): (MonthEnd(), datetime(2008, 1, 1), False)] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) class TestSemiMonthEnd(Base): @@ -2759,7 +2747,7 @@ def test_offset_whole_year(self): datetime(2008, 12, 31)) for base, exp_date in zip(dates[:-1], dates[1:]): - assertEq(SemiMonthEnd(), base, exp_date) + assert_offset_equal(SemiMonthEnd(), base, exp_date) # ensure .apply_index works as expected s = DatetimeIndex(dates[:-1]) @@ -2775,7 +2763,7 @@ def test_offset_whole_year(self): def test_offset(self): for offset, cases in self._get_tests(): for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_index(self): for offset, cases in self._get_tests(): @@ -2793,30 +2781,30 @@ def test_onOffset(self): (datetime(2008, 2, 29), True)] for dt, expected in tests: - assertOnOffset(SemiMonthEnd(), dt, expected) - - def test_vectorized_offset_addition(self): - for klass, assert_func in zip([Series, DatetimeIndex], - [tm.assert_series_equal, - tm.assert_index_equal]): - s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), - Timestamp('2000-02-15', tz='US/Central')], name='a') - - result = s + SemiMonthEnd() - result2 = SemiMonthEnd() + s - exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'), - Timestamp('2000-02-29', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) - - s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), - Timestamp('2000-02-01', tz='US/Central')], name='a') - result = s + SemiMonthEnd() - result2 = SemiMonthEnd() + s - exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), - Timestamp('2000-02-15', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + assert_onOffset(SemiMonthEnd(), dt, expected) + + @pytest.mark.parametrize('klass,assert_func', + [(Series, tm.assert_series_equal), + (DatetimeIndex, tm.assert_index_equal)]) + def test_vectorized_offset_addition(self, klass, assert_func): + s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), + Timestamp('2000-02-15', tz='US/Central')], name='a') + + result = s + SemiMonthEnd() + result2 = SemiMonthEnd() + s + exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'), + Timestamp('2000-02-29', tz='US/Central')], name='a') + assert_func(result, exp) + assert_func(result2, exp) + + s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), + Timestamp('2000-02-01', tz='US/Central')], name='a') + result = s + SemiMonthEnd() + result2 = SemiMonthEnd() + s + exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), + Timestamp('2000-02-15', tz='US/Central')], name='a') + assert_func(result, exp) + assert_func(result2, exp) class TestSemiMonthBegin(Base): @@ -2935,7 +2923,7 @@ def test_offset_whole_year(self): datetime(2008, 12, 15)) for base, exp_date in zip(dates[:-1], dates[1:]): - assertEq(SemiMonthBegin(), base, exp_date) + assert_offset_equal(SemiMonthBegin(), base, exp_date) # ensure .apply_index works as expected s = DatetimeIndex(dates[:-1]) @@ -2951,7 +2939,7 @@ def test_offset_whole_year(self): def test_offset(self): for offset, cases in self._get_tests(): for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) def test_apply_index(self): for offset, cases in self._get_tests(): @@ -2968,30 +2956,29 @@ def test_onOffset(self): (datetime(2008, 2, 15), True)] for dt, expected in tests: - assertOnOffset(SemiMonthBegin(), dt, expected) - - def test_vectorized_offset_addition(self): - for klass, assert_func in zip([Series, DatetimeIndex], - [tm.assert_series_equal, - tm.assert_index_equal]): - - s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), - Timestamp('2000-02-15', tz='US/Central')], name='a') - result = s + SemiMonthBegin() - result2 = SemiMonthBegin() + s - exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'), - Timestamp('2000-03-01', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) - - s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), - Timestamp('2000-02-01', tz='US/Central')], name='a') - result = s + SemiMonthBegin() - result2 = SemiMonthBegin() + s - exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), - Timestamp('2000-02-15', tz='US/Central')], name='a') - assert_func(result, exp) - assert_func(result2, exp) + assert_onOffset(SemiMonthBegin(), dt, expected) + + @pytest.mark.parametrize('klass,assert_func', + [(Series, tm.assert_series_equal), + (DatetimeIndex, tm.assert_index_equal)]) + def test_vectorized_offset_addition(self, klass, assert_func): + s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), + Timestamp('2000-02-15', tz='US/Central')], name='a') + result = s + SemiMonthBegin() + result2 = SemiMonthBegin() + s + exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'), + Timestamp('2000-03-01', tz='US/Central')], name='a') + assert_func(result, exp) + assert_func(result2, exp) + + s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'), + Timestamp('2000-02-01', tz='US/Central')], name='a') + result = s + SemiMonthBegin() + result2 = SemiMonthBegin() + s + exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), + Timestamp('2000-02-15', tz='US/Central')], name='a') + assert_func(result, exp) + assert_func(result2, exp) class TestBQuarterBegin(Base): @@ -3081,7 +3068,7 @@ def test_offset(self): for offset, cases in tests: for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + assert_offset_equal(offset, base, expected) # corner offset = BQuarterBegin(n=-1, startingMonth=1) @@ -3104,100 +3091,100 @@ def test_isAnchored(self): assert BQuarterEnd().isAnchored() assert not BQuarterEnd(2, startingMonth=1).isAnchored() - def test_offset(self): - tests = [] - - tests.append((BQuarterEnd(startingMonth=1), - {datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 4, 30), - datetime(2008, 2, 15): datetime(2008, 4, 30), - datetime(2008, 2, 29): datetime(2008, 4, 30), - datetime(2008, 3, 15): datetime(2008, 4, 30), - datetime(2008, 3, 31): datetime(2008, 4, 30), - datetime(2008, 4, 15): datetime(2008, 4, 30), - datetime(2008, 4, 30): datetime(2008, 7, 31), })) - - tests.append((BQuarterEnd(startingMonth=2), - {datetime(2008, 1, 1): datetime(2008, 2, 29), - datetime(2008, 1, 31): datetime(2008, 2, 29), - datetime(2008, 2, 15): datetime(2008, 2, 29), - datetime(2008, 2, 29): datetime(2008, 5, 30), - datetime(2008, 3, 15): datetime(2008, 5, 30), - datetime(2008, 3, 31): datetime(2008, 5, 30), - datetime(2008, 4, 15): datetime(2008, 5, 30), - datetime(2008, 4, 30): datetime(2008, 5, 30), })) - - tests.append((BQuarterEnd(startingMonth=1, n=0), - {datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 1, 31), - datetime(2008, 2, 15): datetime(2008, 4, 30), - datetime(2008, 2, 29): datetime(2008, 4, 30), - datetime(2008, 3, 15): datetime(2008, 4, 30), - datetime(2008, 3, 31): datetime(2008, 4, 30), - datetime(2008, 4, 15): datetime(2008, 4, 30), - datetime(2008, 4, 30): datetime(2008, 4, 30), })) - - tests.append((BQuarterEnd(startingMonth=1, n=-1), - {datetime(2008, 1, 1): datetime(2007, 10, 31), - datetime(2008, 1, 31): datetime(2007, 10, 31), - datetime(2008, 2, 15): datetime(2008, 1, 31), - datetime(2008, 2, 29): datetime(2008, 1, 31), - datetime(2008, 3, 15): datetime(2008, 1, 31), - datetime(2008, 3, 31): datetime(2008, 1, 31), - datetime(2008, 4, 15): datetime(2008, 1, 31), - datetime(2008, 4, 30): datetime(2008, 1, 31), })) - - tests.append((BQuarterEnd(startingMonth=1, n=2), - {datetime(2008, 1, 31): datetime(2008, 7, 31), - datetime(2008, 2, 15): datetime(2008, 7, 31), - datetime(2008, 2, 29): datetime(2008, 7, 31), - datetime(2008, 3, 15): datetime(2008, 7, 31), - datetime(2008, 3, 31): datetime(2008, 7, 31), - datetime(2008, 4, 15): datetime(2008, 7, 31), - datetime(2008, 4, 30): datetime(2008, 10, 31), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - + offset_cases = [] + offset_cases.append((BQuarterEnd(startingMonth=1), + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 1, 31): datetime(2008, 4, 30), + datetime(2008, 2, 15): datetime(2008, 4, 30), + datetime(2008, 2, 29): datetime(2008, 4, 30), + datetime(2008, 3, 15): datetime(2008, 4, 30), + datetime(2008, 3, 31): datetime(2008, 4, 30), + datetime(2008, 4, 15): datetime(2008, 4, 30), + datetime(2008, 4, 30): datetime(2008, 7, 31), })) + + offset_cases.append((BQuarterEnd(startingMonth=2), + {datetime(2008, 1, 1): datetime(2008, 2, 29), + datetime(2008, 1, 31): datetime(2008, 2, 29), + datetime(2008, 2, 15): datetime(2008, 2, 29), + datetime(2008, 2, 29): datetime(2008, 5, 30), + datetime(2008, 3, 15): datetime(2008, 5, 30), + datetime(2008, 3, 31): datetime(2008, 5, 30), + datetime(2008, 4, 15): datetime(2008, 5, 30), + datetime(2008, 4, 30): datetime(2008, 5, 30), })) + + offset_cases.append((BQuarterEnd(startingMonth=1, n=0), + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 1, 31): datetime(2008, 1, 31), + datetime(2008, 2, 15): datetime(2008, 4, 30), + datetime(2008, 2, 29): datetime(2008, 4, 30), + datetime(2008, 3, 15): datetime(2008, 4, 30), + datetime(2008, 3, 31): datetime(2008, 4, 30), + datetime(2008, 4, 15): datetime(2008, 4, 30), + datetime(2008, 4, 30): datetime(2008, 4, 30), })) + + offset_cases.append((BQuarterEnd(startingMonth=1, n=-1), + {datetime(2008, 1, 1): datetime(2007, 10, 31), + datetime(2008, 1, 31): datetime(2007, 10, 31), + datetime(2008, 2, 15): datetime(2008, 1, 31), + datetime(2008, 2, 29): datetime(2008, 1, 31), + datetime(2008, 3, 15): datetime(2008, 1, 31), + datetime(2008, 3, 31): datetime(2008, 1, 31), + datetime(2008, 4, 15): datetime(2008, 1, 31), + datetime(2008, 4, 30): datetime(2008, 1, 31), })) + + offset_cases.append((BQuarterEnd(startingMonth=1, n=2), + {datetime(2008, 1, 31): datetime(2008, 7, 31), + datetime(2008, 2, 15): datetime(2008, 7, 31), + datetime(2008, 2, 29): datetime(2008, 7, 31), + datetime(2008, 3, 15): datetime(2008, 7, 31), + datetime(2008, 3, 31): datetime(2008, 7, 31), + datetime(2008, 4, 15): datetime(2008, 7, 31), + datetime(2008, 4, 30): datetime(2008, 10, 31), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + def test_offset_corner_case(self): # corner offset = BQuarterEnd(n=-1, startingMonth=1) assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29) - def test_onOffset(self): - - tests = [ - (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True), - (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False), - (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False), - (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False), - (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False), - (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True), - (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False), - (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False), - (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False), - (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False), - (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False), - (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True), - (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False), - (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False), - (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False), - (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True), - (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False), - (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False), - (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False), - (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True), - (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False), - (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True), - (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False), - (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False), - (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False), - (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True), - (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False), - ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + on_offset_cases = [ + (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True), + (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False), + (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False), + (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False), + (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False), + (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True), + (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False), + (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False), + (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False), + (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False), + (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False), + (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True), + (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False), + (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False), + (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False), + (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True), + (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False), + (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False), + (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False), + (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True), + (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False), + (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True), + (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False), + (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False), + (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False), + (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True), + (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) def makeFY5253LastOfMonthQuarter(*args, **kwds): @@ -3268,7 +3255,7 @@ def test_onOffset(self): ] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_apply(self): offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, @@ -3410,7 +3397,7 @@ def test_onOffset(self): ] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_apply(self): date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1), @@ -3515,27 +3502,28 @@ def test_offset(self): datetime(2012, 9, 29), datetime(2012, 12, 29), datetime(2013, 3, 30), datetime(2013, 6, 29)] - assertEq(offset, base=GMCR[0], expected=GMCR[1]) - assertEq(offset, base=GMCR[0] + relativedelta(days=-1), - expected=GMCR[0]) - assertEq(offset, base=GMCR[1], expected=GMCR[2]) + assert_offset_equal(offset, base=GMCR[0], expected=GMCR[1]) + assert_offset_equal(offset, base=GMCR[0] + relativedelta(days=-1), + expected=GMCR[0]) + assert_offset_equal(offset, base=GMCR[1], expected=GMCR[2]) - assertEq(offset2, base=GMCR[0], expected=GMCR[2]) - assertEq(offset4, base=GMCR[0], expected=GMCR[4]) + assert_offset_equal(offset2, base=GMCR[0], expected=GMCR[2]) + assert_offset_equal(offset4, base=GMCR[0], expected=GMCR[4]) - assertEq(offset_neg1, base=GMCR[-1], expected=GMCR[-2]) - assertEq(offset_neg1, base=GMCR[-1] + relativedelta(days=+1), - expected=GMCR[-1]) - assertEq(offset_neg2, base=GMCR[-1], expected=GMCR[-3]) + assert_offset_equal(offset_neg1, base=GMCR[-1], expected=GMCR[-2]) + assert_offset_equal(offset_neg1, + base=GMCR[-1] + relativedelta(days=+1), + expected=GMCR[-1]) + assert_offset_equal(offset_neg2, base=GMCR[-1], expected=GMCR[-3]) date = GMCR[0] + relativedelta(days=-1) for expected in GMCR: - assertEq(offset, date, expected) + assert_offset_equal(offset, date, expected) date = date + offset date = GMCR[-1] + relativedelta(days=+1) for expected in reversed(GMCR): - assertEq(offset_neg1, date, expected) + assert_offset_equal(offset_neg1, date, expected) date = date + offset_neg1 def test_onOffset(self): @@ -3609,7 +3597,7 @@ def test_onOffset(self): ] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_year_has_extra_week(self): # End of long Q1 @@ -3722,29 +3710,35 @@ def test_onOffset(self): ] for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + assert_onOffset(offset, dt, expected) def test_offset(self): offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4) - MU = [datetime(2012, 5, 31), datetime(2012, 8, 30), datetime(2012, 11, - 29), + MU = [datetime(2012, 5, 31), + datetime(2012, 8, 30), datetime(2012, 11, 29), datetime(2013, 2, 28), datetime(2013, 5, 30)] date = MU[0] + relativedelta(days=-1) for expected in MU: - assertEq(offset, date, expected) + assert_offset_equal(offset, date, expected) date = date + offset - assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30)) - assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31)) + assert_offset_equal(offset, + datetime(2012, 5, 31), + datetime(2012, 8, 30)) + assert_offset_equal(offset, + datetime(2012, 5, 30), + datetime(2012, 5, 31)) offset2 = FY5253Quarter(weekday=5, startingMonth=12, variation="last", qtr_with_extra_week=4) - assertEq(offset2, datetime(2013, 1, 15), datetime(2013, 3, 30)) + assert_offset_equal(offset2, + datetime(2013, 1, 15), + datetime(2013, 3, 30)) class TestQuarterBegin(Base): @@ -3762,64 +3756,65 @@ def test_isAnchored(self): assert QuarterBegin().isAnchored() assert not QuarterBegin(2, startingMonth=1).isAnchored() - def test_offset(self): - tests = [] - - tests.append((QuarterBegin(startingMonth=1), - {datetime(2007, 12, 1): datetime(2008, 1, 1), - datetime(2008, 1, 1): datetime(2008, 4, 1), - datetime(2008, 2, 15): datetime(2008, 4, 1), - datetime(2008, 2, 29): datetime(2008, 4, 1), - datetime(2008, 3, 15): datetime(2008, 4, 1), - datetime(2008, 3, 31): datetime(2008, 4, 1), - datetime(2008, 4, 15): datetime(2008, 7, 1), - datetime(2008, 4, 1): datetime(2008, 7, 1), })) - - tests.append((QuarterBegin(startingMonth=2), - {datetime(2008, 1, 1): datetime(2008, 2, 1), - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2008, 1, 15): datetime(2008, 2, 1), - datetime(2008, 2, 29): datetime(2008, 5, 1), - datetime(2008, 3, 15): datetime(2008, 5, 1), - datetime(2008, 3, 31): datetime(2008, 5, 1), - datetime(2008, 4, 15): datetime(2008, 5, 1), - datetime(2008, 4, 30): datetime(2008, 5, 1), })) - - tests.append((QuarterBegin(startingMonth=1, n=0), - {datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 12, 1): datetime(2009, 1, 1), - datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 2, 15): datetime(2008, 4, 1), - datetime(2008, 2, 29): datetime(2008, 4, 1), - datetime(2008, 3, 15): datetime(2008, 4, 1), - datetime(2008, 3, 31): datetime(2008, 4, 1), - datetime(2008, 4, 15): datetime(2008, 7, 1), - datetime(2008, 4, 30): datetime(2008, 7, 1), })) - - tests.append((QuarterBegin(startingMonth=1, n=-1), - {datetime(2008, 1, 1): datetime(2007, 10, 1), - datetime(2008, 1, 31): datetime(2008, 1, 1), - datetime(2008, 2, 15): datetime(2008, 1, 1), - datetime(2008, 2, 29): datetime(2008, 1, 1), - datetime(2008, 3, 15): datetime(2008, 1, 1), - datetime(2008, 3, 31): datetime(2008, 1, 1), - datetime(2008, 4, 15): datetime(2008, 4, 1), - datetime(2008, 4, 30): datetime(2008, 4, 1), - datetime(2008, 7, 1): datetime(2008, 4, 1)})) - - tests.append((QuarterBegin(startingMonth=1, n=2), - {datetime(2008, 1, 1): datetime(2008, 7, 1), - datetime(2008, 2, 15): datetime(2008, 7, 1), - datetime(2008, 2, 29): datetime(2008, 7, 1), - datetime(2008, 3, 15): datetime(2008, 7, 1), - datetime(2008, 3, 31): datetime(2008, 7, 1), - datetime(2008, 4, 15): datetime(2008, 10, 1), - datetime(2008, 4, 1): datetime(2008, 10, 1), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - + offset_cases = [] + offset_cases.append((QuarterBegin(startingMonth=1), + {datetime(2007, 12, 1): datetime(2008, 1, 1), + datetime(2008, 1, 1): datetime(2008, 4, 1), + datetime(2008, 2, 15): datetime(2008, 4, 1), + datetime(2008, 2, 29): datetime(2008, 4, 1), + datetime(2008, 3, 15): datetime(2008, 4, 1), + datetime(2008, 3, 31): datetime(2008, 4, 1), + datetime(2008, 4, 15): datetime(2008, 7, 1), + datetime(2008, 4, 1): datetime(2008, 7, 1), })) + + offset_cases.append((QuarterBegin(startingMonth=2), + {datetime(2008, 1, 1): datetime(2008, 2, 1), + datetime(2008, 1, 31): datetime(2008, 2, 1), + datetime(2008, 1, 15): datetime(2008, 2, 1), + datetime(2008, 2, 29): datetime(2008, 5, 1), + datetime(2008, 3, 15): datetime(2008, 5, 1), + datetime(2008, 3, 31): datetime(2008, 5, 1), + datetime(2008, 4, 15): datetime(2008, 5, 1), + datetime(2008, 4, 30): datetime(2008, 5, 1), })) + + offset_cases.append((QuarterBegin(startingMonth=1, n=0), + {datetime(2008, 1, 1): datetime(2008, 1, 1), + datetime(2008, 12, 1): datetime(2009, 1, 1), + datetime(2008, 1, 1): datetime(2008, 1, 1), + datetime(2008, 2, 15): datetime(2008, 4, 1), + datetime(2008, 2, 29): datetime(2008, 4, 1), + datetime(2008, 3, 15): datetime(2008, 4, 1), + datetime(2008, 3, 31): datetime(2008, 4, 1), + datetime(2008, 4, 15): datetime(2008, 7, 1), + datetime(2008, 4, 30): datetime(2008, 7, 1), })) + + offset_cases.append((QuarterBegin(startingMonth=1, n=-1), + {datetime(2008, 1, 1): datetime(2007, 10, 1), + datetime(2008, 1, 31): datetime(2008, 1, 1), + datetime(2008, 2, 15): datetime(2008, 1, 1), + datetime(2008, 2, 29): datetime(2008, 1, 1), + datetime(2008, 3, 15): datetime(2008, 1, 1), + datetime(2008, 3, 31): datetime(2008, 1, 1), + datetime(2008, 4, 15): datetime(2008, 4, 1), + datetime(2008, 4, 30): datetime(2008, 4, 1), + datetime(2008, 7, 1): datetime(2008, 4, 1)})) + + offset_cases.append((QuarterBegin(startingMonth=1, n=2), + {datetime(2008, 1, 1): datetime(2008, 7, 1), + datetime(2008, 2, 15): datetime(2008, 7, 1), + datetime(2008, 2, 29): datetime(2008, 7, 1), + datetime(2008, 3, 15): datetime(2008, 7, 1), + datetime(2008, 3, 31): datetime(2008, 7, 1), + datetime(2008, 4, 15): datetime(2008, 10, 1), + datetime(2008, 4, 1): datetime(2008, 10, 1), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + def test_offset_corner_case(self): # corner offset = QuarterBegin(n=-1, startingMonth=1) assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1) @@ -3841,127 +3836,104 @@ def test_isAnchored(self): assert QuarterEnd().isAnchored() assert not QuarterEnd(2, startingMonth=1).isAnchored() - def test_offset(self): - tests = [] - - tests.append((QuarterEnd(startingMonth=1), - {datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 4, 30), - datetime(2008, 2, 15): datetime(2008, 4, 30), - datetime(2008, 2, 29): datetime(2008, 4, 30), - datetime(2008, 3, 15): datetime(2008, 4, 30), - datetime(2008, 3, 31): datetime(2008, 4, 30), - datetime(2008, 4, 15): datetime(2008, 4, 30), - datetime(2008, 4, 30): datetime(2008, 7, 31), })) - - tests.append((QuarterEnd(startingMonth=2), - {datetime(2008, 1, 1): datetime(2008, 2, 29), - datetime(2008, 1, 31): datetime(2008, 2, 29), - datetime(2008, 2, 15): datetime(2008, 2, 29), - datetime(2008, 2, 29): datetime(2008, 5, 31), - datetime(2008, 3, 15): datetime(2008, 5, 31), - datetime(2008, 3, 31): datetime(2008, 5, 31), - datetime(2008, 4, 15): datetime(2008, 5, 31), - datetime(2008, 4, 30): datetime(2008, 5, 31), })) - - tests.append((QuarterEnd(startingMonth=1, n=0), - {datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 1, 31), - datetime(2008, 2, 15): datetime(2008, 4, 30), - datetime(2008, 2, 29): datetime(2008, 4, 30), - datetime(2008, 3, 15): datetime(2008, 4, 30), - datetime(2008, 3, 31): datetime(2008, 4, 30), - datetime(2008, 4, 15): datetime(2008, 4, 30), - datetime(2008, 4, 30): datetime(2008, 4, 30), })) - - tests.append((QuarterEnd(startingMonth=1, n=-1), - {datetime(2008, 1, 1): datetime(2007, 10, 31), - datetime(2008, 1, 31): datetime(2007, 10, 31), - datetime(2008, 2, 15): datetime(2008, 1, 31), - datetime(2008, 2, 29): datetime(2008, 1, 31), - datetime(2008, 3, 15): datetime(2008, 1, 31), - datetime(2008, 3, 31): datetime(2008, 1, 31), - datetime(2008, 4, 15): datetime(2008, 1, 31), - datetime(2008, 4, 30): datetime(2008, 1, 31), - datetime(2008, 7, 1): datetime(2008, 4, 30)})) - - tests.append((QuarterEnd(startingMonth=1, n=2), - {datetime(2008, 1, 31): datetime(2008, 7, 31), - datetime(2008, 2, 15): datetime(2008, 7, 31), - datetime(2008, 2, 29): datetime(2008, 7, 31), - datetime(2008, 3, 15): datetime(2008, 7, 31), - datetime(2008, 3, 31): datetime(2008, 7, 31), - datetime(2008, 4, 15): datetime(2008, 7, 31), - datetime(2008, 4, 30): datetime(2008, 10, 31), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - + offset_cases = [] + offset_cases.append((QuarterEnd(startingMonth=1), + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 1, 31): datetime(2008, 4, 30), + datetime(2008, 2, 15): datetime(2008, 4, 30), + datetime(2008, 2, 29): datetime(2008, 4, 30), + datetime(2008, 3, 15): datetime(2008, 4, 30), + datetime(2008, 3, 31): datetime(2008, 4, 30), + datetime(2008, 4, 15): datetime(2008, 4, 30), + datetime(2008, 4, 30): datetime(2008, 7, 31), })) + + offset_cases.append((QuarterEnd(startingMonth=2), + {datetime(2008, 1, 1): datetime(2008, 2, 29), + datetime(2008, 1, 31): datetime(2008, 2, 29), + datetime(2008, 2, 15): datetime(2008, 2, 29), + datetime(2008, 2, 29): datetime(2008, 5, 31), + datetime(2008, 3, 15): datetime(2008, 5, 31), + datetime(2008, 3, 31): datetime(2008, 5, 31), + datetime(2008, 4, 15): datetime(2008, 5, 31), + datetime(2008, 4, 30): datetime(2008, 5, 31), })) + + offset_cases.append((QuarterEnd(startingMonth=1, n=0), + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 1, 31): datetime(2008, 1, 31), + datetime(2008, 2, 15): datetime(2008, 4, 30), + datetime(2008, 2, 29): datetime(2008, 4, 30), + datetime(2008, 3, 15): datetime(2008, 4, 30), + datetime(2008, 3, 31): datetime(2008, 4, 30), + datetime(2008, 4, 15): datetime(2008, 4, 30), + datetime(2008, 4, 30): datetime(2008, 4, 30), })) + + offset_cases.append((QuarterEnd(startingMonth=1, n=-1), + {datetime(2008, 1, 1): datetime(2007, 10, 31), + datetime(2008, 1, 31): datetime(2007, 10, 31), + datetime(2008, 2, 15): datetime(2008, 1, 31), + datetime(2008, 2, 29): datetime(2008, 1, 31), + datetime(2008, 3, 15): datetime(2008, 1, 31), + datetime(2008, 3, 31): datetime(2008, 1, 31), + datetime(2008, 4, 15): datetime(2008, 1, 31), + datetime(2008, 4, 30): datetime(2008, 1, 31), + datetime(2008, 7, 1): datetime(2008, 4, 30)})) + + offset_cases.append((QuarterEnd(startingMonth=1, n=2), + {datetime(2008, 1, 31): datetime(2008, 7, 31), + datetime(2008, 2, 15): datetime(2008, 7, 31), + datetime(2008, 2, 29): datetime(2008, 7, 31), + datetime(2008, 3, 15): datetime(2008, 7, 31), + datetime(2008, 3, 31): datetime(2008, 7, 31), + datetime(2008, 4, 15): datetime(2008, 7, 31), + datetime(2008, 4, 30): datetime(2008, 10, 31), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + def test_offset_corner_case(self): # corner offset = QuarterEnd(n=-1, startingMonth=1) assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31) - def test_onOffset(self): - - tests = [(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True), - (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), - False), - (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), - False), - (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), - False), - (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), - False), - (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True), - (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), - False), - (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), - False), - (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), - False), - (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), - False), - (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), - False), - (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), - False), - (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True), - (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), - False), - (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), - False), - (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), - False), - (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), - False), - (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True), - (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), - False), - (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), - False), - (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), - False), - (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), - True), - (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), - False), - (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), - False), - (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True), - (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), - False), - (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), - False), - (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), - False), - (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), - False), - (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), - True), ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + on_offset_cases = [ + (QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True), + (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False), + (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False), + (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False), + (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False), + (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True), + (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False), + (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False), + (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False), + (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False), + (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False), + (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False), + (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True), + (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False), + (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False), + (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False), + (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False), + (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True), + (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False), + (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False), + (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False), + (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True), + (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False), + (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False), + (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True), + (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False), + (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False), + (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False), + (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False), + (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) class TestBYearBegin(Base): @@ -3971,43 +3943,43 @@ def test_misspecified(self): pytest.raises(ValueError, BYearBegin, month=13) pytest.raises(ValueError, BYearEnd, month=13) - def test_offset(self): - tests = [] - - tests.append((BYearBegin(), - {datetime(2008, 1, 1): datetime(2009, 1, 1), - datetime(2008, 6, 30): datetime(2009, 1, 1), - datetime(2008, 12, 31): datetime(2009, 1, 1), - datetime(2011, 1, 1): datetime(2011, 1, 3), - datetime(2011, 1, 3): datetime(2012, 1, 2), - datetime(2005, 12, 30): datetime(2006, 1, 2), - datetime(2005, 12, 31): datetime(2006, 1, 2)})) - - tests.append((BYearBegin(0), - {datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 6, 30): datetime(2009, 1, 1), - datetime(2008, 12, 31): datetime(2009, 1, 1), - datetime(2005, 12, 30): datetime(2006, 1, 2), - datetime(2005, 12, 31): datetime(2006, 1, 2), })) - - tests.append((BYearBegin(-1), - {datetime(2007, 1, 1): datetime(2006, 1, 2), - datetime(2009, 1, 4): datetime(2009, 1, 1), - datetime(2009, 1, 1): datetime(2008, 1, 1), - datetime(2008, 6, 30): datetime(2008, 1, 1), - datetime(2008, 12, 31): datetime(2008, 1, 1), - datetime(2006, 12, 29): datetime(2006, 1, 2), - datetime(2006, 12, 30): datetime(2006, 1, 2), - datetime(2006, 1, 1): datetime(2005, 1, 3), })) - - tests.append((BYearBegin(-2), - {datetime(2007, 1, 1): datetime(2005, 1, 3), - datetime(2007, 6, 30): datetime(2006, 1, 2), - datetime(2008, 12, 31): datetime(2007, 1, 1), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) + offset_cases = [] + offset_cases.append((BYearBegin(), + {datetime(2008, 1, 1): datetime(2009, 1, 1), + datetime(2008, 6, 30): datetime(2009, 1, 1), + datetime(2008, 12, 31): datetime(2009, 1, 1), + datetime(2011, 1, 1): datetime(2011, 1, 3), + datetime(2011, 1, 3): datetime(2012, 1, 2), + datetime(2005, 12, 30): datetime(2006, 1, 2), + datetime(2005, 12, 31): datetime(2006, 1, 2)})) + + offset_cases.append((BYearBegin(0), + {datetime(2008, 1, 1): datetime(2008, 1, 1), + datetime(2008, 6, 30): datetime(2009, 1, 1), + datetime(2008, 12, 31): datetime(2009, 1, 1), + datetime(2005, 12, 30): datetime(2006, 1, 2), + datetime(2005, 12, 31): datetime(2006, 1, 2), })) + + offset_cases.append((BYearBegin(-1), + {datetime(2007, 1, 1): datetime(2006, 1, 2), + datetime(2009, 1, 4): datetime(2009, 1, 1), + datetime(2009, 1, 1): datetime(2008, 1, 1), + datetime(2008, 6, 30): datetime(2008, 1, 1), + datetime(2008, 12, 31): datetime(2008, 1, 1), + datetime(2006, 12, 29): datetime(2006, 1, 2), + datetime(2006, 12, 30): datetime(2006, 1, 2), + datetime(2006, 1, 1): datetime(2005, 1, 3), })) + + offset_cases.append((BYearBegin(-2), + {datetime(2007, 1, 1): datetime(2005, 1, 3), + datetime(2007, 6, 30): datetime(2006, 1, 2), + datetime(2008, 12, 31): datetime(2007, 1, 1), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) class TestYearBegin(Base): @@ -4016,91 +3988,89 @@ class TestYearBegin(Base): def test_misspecified(self): pytest.raises(ValueError, YearBegin, month=13) - def test_offset(self): - tests = [] - - tests.append((YearBegin(), - {datetime(2008, 1, 1): datetime(2009, 1, 1), - datetime(2008, 6, 30): datetime(2009, 1, 1), - datetime(2008, 12, 31): datetime(2009, 1, 1), - datetime(2005, 12, 30): datetime(2006, 1, 1), - datetime(2005, 12, 31): datetime(2006, 1, 1), })) - - tests.append((YearBegin(0), - {datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 6, 30): datetime(2009, 1, 1), - datetime(2008, 12, 31): datetime(2009, 1, 1), - datetime(2005, 12, 30): datetime(2006, 1, 1), - datetime(2005, 12, 31): datetime(2006, 1, 1), })) - - tests.append((YearBegin(3), - {datetime(2008, 1, 1): datetime(2011, 1, 1), - datetime(2008, 6, 30): datetime(2011, 1, 1), - datetime(2008, 12, 31): datetime(2011, 1, 1), - datetime(2005, 12, 30): datetime(2008, 1, 1), - datetime(2005, 12, 31): datetime(2008, 1, 1), })) - - tests.append((YearBegin(-1), - {datetime(2007, 1, 1): datetime(2006, 1, 1), - datetime(2007, 1, 15): datetime(2007, 1, 1), - datetime(2008, 6, 30): datetime(2008, 1, 1), - datetime(2008, 12, 31): datetime(2008, 1, 1), - datetime(2006, 12, 29): datetime(2006, 1, 1), - datetime(2006, 12, 30): datetime(2006, 1, 1), - datetime(2007, 1, 1): datetime(2006, 1, 1), })) - - tests.append((YearBegin(-2), - {datetime(2007, 1, 1): datetime(2005, 1, 1), - datetime(2008, 6, 30): datetime(2007, 1, 1), - datetime(2008, 12, 31): datetime(2007, 1, 1), })) - - tests.append((YearBegin(month=4), - {datetime(2007, 4, 1): datetime(2008, 4, 1), - datetime(2007, 4, 15): datetime(2008, 4, 1), - datetime(2007, 3, 1): datetime(2007, 4, 1), - datetime(2007, 12, 15): datetime(2008, 4, 1), - datetime(2012, 1, 31): datetime(2012, 4, 1), })) - - tests.append((YearBegin(0, month=4), - {datetime(2007, 4, 1): datetime(2007, 4, 1), - datetime(2007, 3, 1): datetime(2007, 4, 1), - datetime(2007, 12, 15): datetime(2008, 4, 1), - datetime(2012, 1, 31): datetime(2012, 4, 1), })) - - tests.append((YearBegin(4, month=4), - {datetime(2007, 4, 1): datetime(2011, 4, 1), - datetime(2007, 4, 15): datetime(2011, 4, 1), - datetime(2007, 3, 1): datetime(2010, 4, 1), - datetime(2007, 12, 15): datetime(2011, 4, 1), - datetime(2012, 1, 31): datetime(2015, 4, 1), })) - - tests.append((YearBegin(-1, month=4), - {datetime(2007, 4, 1): datetime(2006, 4, 1), - datetime(2007, 3, 1): datetime(2006, 4, 1), - datetime(2007, 12, 15): datetime(2007, 4, 1), - datetime(2012, 1, 31): datetime(2011, 4, 1), })) - - tests.append((YearBegin(-3, month=4), - {datetime(2007, 4, 1): datetime(2004, 4, 1), - datetime(2007, 3, 1): datetime(2004, 4, 1), - datetime(2007, 12, 15): datetime(2005, 4, 1), - datetime(2012, 1, 31): datetime(2009, 4, 1), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - - def test_onOffset(self): - - tests = [ - (YearBegin(), datetime(2007, 1, 3), False), - (YearBegin(), datetime(2008, 1, 1), True), - (YearBegin(), datetime(2006, 12, 31), False), - (YearBegin(), datetime(2006, 1, 2), False), - ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + offset_cases = [] + offset_cases.append((YearBegin(), + {datetime(2008, 1, 1): datetime(2009, 1, 1), + datetime(2008, 6, 30): datetime(2009, 1, 1), + datetime(2008, 12, 31): datetime(2009, 1, 1), + datetime(2005, 12, 30): datetime(2006, 1, 1), + datetime(2005, 12, 31): datetime(2006, 1, 1), })) + + offset_cases.append((YearBegin(0), + {datetime(2008, 1, 1): datetime(2008, 1, 1), + datetime(2008, 6, 30): datetime(2009, 1, 1), + datetime(2008, 12, 31): datetime(2009, 1, 1), + datetime(2005, 12, 30): datetime(2006, 1, 1), + datetime(2005, 12, 31): datetime(2006, 1, 1), })) + + offset_cases.append((YearBegin(3), + {datetime(2008, 1, 1): datetime(2011, 1, 1), + datetime(2008, 6, 30): datetime(2011, 1, 1), + datetime(2008, 12, 31): datetime(2011, 1, 1), + datetime(2005, 12, 30): datetime(2008, 1, 1), + datetime(2005, 12, 31): datetime(2008, 1, 1), })) + + offset_cases.append((YearBegin(-1), + {datetime(2007, 1, 1): datetime(2006, 1, 1), + datetime(2007, 1, 15): datetime(2007, 1, 1), + datetime(2008, 6, 30): datetime(2008, 1, 1), + datetime(2008, 12, 31): datetime(2008, 1, 1), + datetime(2006, 12, 29): datetime(2006, 1, 1), + datetime(2006, 12, 30): datetime(2006, 1, 1), + datetime(2007, 1, 1): datetime(2006, 1, 1), })) + + offset_cases.append((YearBegin(-2), + {datetime(2007, 1, 1): datetime(2005, 1, 1), + datetime(2008, 6, 30): datetime(2007, 1, 1), + datetime(2008, 12, 31): datetime(2007, 1, 1), })) + + offset_cases.append((YearBegin(month=4), + {datetime(2007, 4, 1): datetime(2008, 4, 1), + datetime(2007, 4, 15): datetime(2008, 4, 1), + datetime(2007, 3, 1): datetime(2007, 4, 1), + datetime(2007, 12, 15): datetime(2008, 4, 1), + datetime(2012, 1, 31): datetime(2012, 4, 1), })) + + offset_cases.append((YearBegin(0, month=4), + {datetime(2007, 4, 1): datetime(2007, 4, 1), + datetime(2007, 3, 1): datetime(2007, 4, 1), + datetime(2007, 12, 15): datetime(2008, 4, 1), + datetime(2012, 1, 31): datetime(2012, 4, 1), })) + + offset_cases.append((YearBegin(4, month=4), + {datetime(2007, 4, 1): datetime(2011, 4, 1), + datetime(2007, 4, 15): datetime(2011, 4, 1), + datetime(2007, 3, 1): datetime(2010, 4, 1), + datetime(2007, 12, 15): datetime(2011, 4, 1), + datetime(2012, 1, 31): datetime(2015, 4, 1), })) + + offset_cases.append((YearBegin(-1, month=4), + {datetime(2007, 4, 1): datetime(2006, 4, 1), + datetime(2007, 3, 1): datetime(2006, 4, 1), + datetime(2007, 12, 15): datetime(2007, 4, 1), + datetime(2012, 1, 31): datetime(2011, 4, 1), })) + + offset_cases.append((YearBegin(-3, month=4), + {datetime(2007, 4, 1): datetime(2004, 4, 1), + datetime(2007, 3, 1): datetime(2004, 4, 1), + datetime(2007, 12, 15): datetime(2005, 4, 1), + datetime(2012, 1, 31): datetime(2009, 4, 1), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False), + (YearBegin(), datetime(2008, 1, 1), True), + (YearBegin(), datetime(2006, 12, 31), False), + (YearBegin(), datetime(2006, 1, 2), False)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) class TestBYearEndLagged(Base): @@ -4109,20 +4079,20 @@ def test_bad_month_fail(self): pytest.raises(Exception, BYearEnd, month=13) pytest.raises(Exception, BYearEnd, month=0) - def test_offset(self): - tests = [] + offset_cases = [] + offset_cases.append((BYearEnd(month=6), + {datetime(2008, 1, 1): datetime(2008, 6, 30), + datetime(2007, 6, 30): datetime(2008, 6, 30)}, )) - tests.append((BYearEnd(month=6), - {datetime(2008, 1, 1): datetime(2008, 6, 30), - datetime(2007, 6, 30): datetime(2008, 6, 30)}, )) + offset_cases.append((BYearEnd(n=-1, month=6), + {datetime(2008, 1, 1): datetime(2007, 6, 29), + datetime(2007, 6, 30): datetime(2007, 6, 29)}, )) - tests.append((BYearEnd(n=-1, month=6), - {datetime(2008, 1, 1): datetime(2007, 6, 29), - datetime(2007, 6, 30): datetime(2007, 6, 29)}, )) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assert base + offset == expected + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert base + offset == expected def test_roll(self): offset = BYearEnd(month=6) @@ -4131,64 +4101,60 @@ def test_roll(self): assert offset.rollforward(date) == datetime(2010, 6, 30) assert offset.rollback(date) == datetime(2009, 6, 30) - def test_onOffset(self): - - tests = [ - (BYearEnd(month=2), datetime(2007, 2, 28), True), - (BYearEnd(month=6), datetime(2007, 6, 30), False), - ] + on_offset_cases = [(BYearEnd(month=2), datetime(2007, 2, 28), True), + (BYearEnd(month=6), datetime(2007, 6, 30), False)] - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) class TestBYearEnd(Base): _offset = BYearEnd - def test_offset(self): - tests = [] - - tests.append((BYearEnd(), - {datetime(2008, 1, 1): datetime(2008, 12, 31), - datetime(2008, 6, 30): datetime(2008, 12, 31), - datetime(2008, 12, 31): datetime(2009, 12, 31), - datetime(2005, 12, 30): datetime(2006, 12, 29), - datetime(2005, 12, 31): datetime(2006, 12, 29), })) - - tests.append((BYearEnd(0), - {datetime(2008, 1, 1): datetime(2008, 12, 31), - datetime(2008, 6, 30): datetime(2008, 12, 31), - datetime(2008, 12, 31): datetime(2008, 12, 31), - datetime(2005, 12, 31): datetime(2006, 12, 29), })) - - tests.append((BYearEnd(-1), - {datetime(2007, 1, 1): datetime(2006, 12, 29), - datetime(2008, 6, 30): datetime(2007, 12, 31), - datetime(2008, 12, 31): datetime(2007, 12, 31), - datetime(2006, 12, 29): datetime(2005, 12, 30), - datetime(2006, 12, 30): datetime(2006, 12, 29), - datetime(2007, 1, 1): datetime(2006, 12, 29), })) - - tests.append((BYearEnd(-2), - {datetime(2007, 1, 1): datetime(2005, 12, 30), - datetime(2008, 6, 30): datetime(2006, 12, 29), - datetime(2008, 12, 31): datetime(2006, 12, 29), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - - def test_onOffset(self): - - tests = [ - (BYearEnd(), datetime(2007, 12, 31), True), - (BYearEnd(), datetime(2008, 1, 1), False), - (BYearEnd(), datetime(2006, 12, 31), False), - (BYearEnd(), datetime(2006, 12, 29), True), - ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + offset_cases = [] + offset_cases.append((BYearEnd(), + {datetime(2008, 1, 1): datetime(2008, 12, 31), + datetime(2008, 6, 30): datetime(2008, 12, 31), + datetime(2008, 12, 31): datetime(2009, 12, 31), + datetime(2005, 12, 30): datetime(2006, 12, 29), + datetime(2005, 12, 31): datetime(2006, 12, 29), })) + + offset_cases.append((BYearEnd(0), + {datetime(2008, 1, 1): datetime(2008, 12, 31), + datetime(2008, 6, 30): datetime(2008, 12, 31), + datetime(2008, 12, 31): datetime(2008, 12, 31), + datetime(2005, 12, 31): datetime(2006, 12, 29), })) + + offset_cases.append((BYearEnd(-1), + {datetime(2007, 1, 1): datetime(2006, 12, 29), + datetime(2008, 6, 30): datetime(2007, 12, 31), + datetime(2008, 12, 31): datetime(2007, 12, 31), + datetime(2006, 12, 29): datetime(2005, 12, 30), + datetime(2006, 12, 30): datetime(2006, 12, 29), + datetime(2007, 1, 1): datetime(2006, 12, 29), })) + + offset_cases.append((BYearEnd(-2), + {datetime(2007, 1, 1): datetime(2005, 12, 30), + datetime(2008, 6, 30): datetime(2006, 12, 29), + datetime(2008, 12, 31): datetime(2006, 12, 29), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True), + (BYearEnd(), datetime(2008, 1, 1), False), + (BYearEnd(), datetime(2006, 12, 31), False), + (BYearEnd(), datetime(2006, 12, 29), True)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) class TestYearEnd(Base): @@ -4197,286 +4163,115 @@ class TestYearEnd(Base): def test_misspecified(self): pytest.raises(ValueError, YearEnd, month=13) - def test_offset(self): - tests = [] - - tests.append((YearEnd(), - {datetime(2008, 1, 1): datetime(2008, 12, 31), - datetime(2008, 6, 30): datetime(2008, 12, 31), - datetime(2008, 12, 31): datetime(2009, 12, 31), - datetime(2005, 12, 30): datetime(2005, 12, 31), - datetime(2005, 12, 31): datetime(2006, 12, 31), })) - - tests.append((YearEnd(0), - {datetime(2008, 1, 1): datetime(2008, 12, 31), - datetime(2008, 6, 30): datetime(2008, 12, 31), - datetime(2008, 12, 31): datetime(2008, 12, 31), - datetime(2005, 12, 30): datetime(2005, 12, 31), })) - - tests.append((YearEnd(-1), - {datetime(2007, 1, 1): datetime(2006, 12, 31), - datetime(2008, 6, 30): datetime(2007, 12, 31), - datetime(2008, 12, 31): datetime(2007, 12, 31), - datetime(2006, 12, 29): datetime(2005, 12, 31), - datetime(2006, 12, 30): datetime(2005, 12, 31), - datetime(2007, 1, 1): datetime(2006, 12, 31), })) - - tests.append((YearEnd(-2), - {datetime(2007, 1, 1): datetime(2005, 12, 31), - datetime(2008, 6, 30): datetime(2006, 12, 31), - datetime(2008, 12, 31): datetime(2006, 12, 31), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - - def test_onOffset(self): - - tests = [ - (YearEnd(), datetime(2007, 12, 31), True), - (YearEnd(), datetime(2008, 1, 1), False), - (YearEnd(), datetime(2006, 12, 31), True), - (YearEnd(), datetime(2006, 12, 29), False), - ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) + offset_cases = [] + offset_cases.append((YearEnd(), + {datetime(2008, 1, 1): datetime(2008, 12, 31), + datetime(2008, 6, 30): datetime(2008, 12, 31), + datetime(2008, 12, 31): datetime(2009, 12, 31), + datetime(2005, 12, 30): datetime(2005, 12, 31), + datetime(2005, 12, 31): datetime(2006, 12, 31), })) + + offset_cases.append((YearEnd(0), + {datetime(2008, 1, 1): datetime(2008, 12, 31), + datetime(2008, 6, 30): datetime(2008, 12, 31), + datetime(2008, 12, 31): datetime(2008, 12, 31), + datetime(2005, 12, 30): datetime(2005, 12, 31), })) + + offset_cases.append((YearEnd(-1), + {datetime(2007, 1, 1): datetime(2006, 12, 31), + datetime(2008, 6, 30): datetime(2007, 12, 31), + datetime(2008, 12, 31): datetime(2007, 12, 31), + datetime(2006, 12, 29): datetime(2005, 12, 31), + datetime(2006, 12, 30): datetime(2005, 12, 31), + datetime(2007, 1, 1): datetime(2006, 12, 31), })) + + offset_cases.append((YearEnd(-2), + {datetime(2007, 1, 1): datetime(2005, 12, 31), + datetime(2008, 6, 30): datetime(2006, 12, 31), + datetime(2008, 12, 31): datetime(2006, 12, 31), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True), + (YearEnd(), datetime(2008, 1, 1), False), + (YearEnd(), datetime(2006, 12, 31), True), + (YearEnd(), datetime(2006, 12, 29), False)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) class TestYearEndDiffMonth(Base): - def test_offset(self): - tests = [] - - tests.append((YearEnd(month=3), - {datetime(2008, 1, 1): datetime(2008, 3, 31), - datetime(2008, 2, 15): datetime(2008, 3, 31), - datetime(2008, 3, 31): datetime(2009, 3, 31), - datetime(2008, 3, 30): datetime(2008, 3, 31), - datetime(2005, 3, 31): datetime(2006, 3, 31), - datetime(2006, 7, 30): datetime(2007, 3, 31)})) - - tests.append((YearEnd(0, month=3), - {datetime(2008, 1, 1): datetime(2008, 3, 31), - datetime(2008, 2, 28): datetime(2008, 3, 31), - datetime(2008, 3, 31): datetime(2008, 3, 31), - datetime(2005, 3, 30): datetime(2005, 3, 31), })) - - tests.append((YearEnd(-1, month=3), - {datetime(2007, 1, 1): datetime(2006, 3, 31), - datetime(2008, 2, 28): datetime(2007, 3, 31), - datetime(2008, 3, 31): datetime(2007, 3, 31), - datetime(2006, 3, 29): datetime(2005, 3, 31), - datetime(2006, 3, 30): datetime(2005, 3, 31), - datetime(2007, 3, 1): datetime(2006, 3, 31), })) - - tests.append((YearEnd(-2, month=3), - {datetime(2007, 1, 1): datetime(2005, 3, 31), - datetime(2008, 6, 30): datetime(2007, 3, 31), - datetime(2008, 3, 31): datetime(2006, 3, 31), })) - - for offset, cases in tests: - for base, expected in compat.iteritems(cases): - assertEq(offset, base, expected) - - def test_onOffset(self): - - tests = [ - (YearEnd(month=3), datetime(2007, 3, 31), True), - (YearEnd(month=3), datetime(2008, 1, 1), False), - (YearEnd(month=3), datetime(2006, 3, 31), True), - (YearEnd(month=3), datetime(2006, 3, 29), False), - ] - - for offset, dt, expected in tests: - assertOnOffset(offset, dt, expected) - - -def assertEq(offset, base, expected): - actual = offset + base - actual_swapped = base + offset - actual_apply = offset.apply(base) - try: - assert actual == expected - assert actual_swapped == expected - assert actual_apply == expected - except AssertionError: - raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)" - "\nAt Date: %s" % - (expected, actual, offset, base)) + offset_cases = [] + offset_cases.append((YearEnd(month=3), + {datetime(2008, 1, 1): datetime(2008, 3, 31), + datetime(2008, 2, 15): datetime(2008, 3, 31), + datetime(2008, 3, 31): datetime(2009, 3, 31), + datetime(2008, 3, 30): datetime(2008, 3, 31), + datetime(2005, 3, 31): datetime(2006, 3, 31), + datetime(2006, 7, 30): datetime(2007, 3, 31)})) + + offset_cases.append((YearEnd(0, month=3), + {datetime(2008, 1, 1): datetime(2008, 3, 31), + datetime(2008, 2, 28): datetime(2008, 3, 31), + datetime(2008, 3, 31): datetime(2008, 3, 31), + datetime(2005, 3, 30): datetime(2005, 3, 31), })) + + offset_cases.append((YearEnd(-1, month=3), + {datetime(2007, 1, 1): datetime(2006, 3, 31), + datetime(2008, 2, 28): datetime(2007, 3, 31), + datetime(2008, 3, 31): datetime(2007, 3, 31), + datetime(2006, 3, 29): datetime(2005, 3, 31), + datetime(2006, 3, 30): datetime(2005, 3, 31), + datetime(2007, 3, 1): datetime(2006, 3, 31), })) + + offset_cases.append((YearEnd(-2, month=3), + {datetime(2007, 1, 1): datetime(2005, 3, 31), + datetime(2008, 6, 30): datetime(2007, 3, 31), + datetime(2008, 3, 31): datetime(2006, 3, 31), })) + + @pytest.mark.parametrize('case', offset_cases) + def test_offset(self, case): + offset, cases = case + for base, expected in compat.iteritems(cases): + assert_offset_equal(offset, base, expected) + + on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True), + (YearEnd(month=3), datetime(2008, 1, 1), False), + (YearEnd(month=3), datetime(2006, 3, 31), True), + (YearEnd(month=3), datetime(2006, 3, 29), False)] + + @pytest.mark.parametrize('case', on_offset_cases) + def test_onOffset(self, case): + offset, dt, expected = case + assert_onOffset(offset, dt, expected) def test_Easter(): - assertEq(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4)) - assertEq(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24)) - assertEq(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24)) - - assertEq(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24)) - assertEq(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8)) + assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4)) + assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24)) + assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24)) - assertEq(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4)) - assertEq(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4)) - assertEq(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12)) + assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24)) + assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8)) - assertEq(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)) - assertEq(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23)) + assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4)) + assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4)) + assert_offset_equal(-Easter(2), + datetime(2011, 1, 1), + datetime(2009, 4, 12)) - -class TestTicks(object): - - ticks = [Hour, Minute, Second, Milli, Micro, Nano] - - def test_ticks(self): - offsets = [(Hour, Timedelta(hours=5)), - (Minute, Timedelta(hours=2, minutes=3)), - (Second, Timedelta(hours=2, seconds=3)), - (Milli, Timedelta(hours=2, milliseconds=3)), - (Micro, Timedelta(hours=2, microseconds=3)), - (Nano, Timedelta(hours=2, nanoseconds=3))] - - for kls, expected in offsets: - offset = kls(3) - result = offset + Timedelta(hours=2) - assert isinstance(result, Timedelta) - assert result == expected - - def test_Hour(self): - assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1)) - assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) - assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2)) - assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) - - assert Hour(3) + Hour(2) == Hour(5) - assert Hour(3) - Hour(2) == Hour() - - assert Hour(4) != Hour(1) - - def test_Minute(self): - assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1)) - assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1)) - assertEq(2 * Minute(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 2)) - assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1), - datetime(2010, 1, 1)) - - assert Minute(3) + Minute(2) == Minute(5) - assert Minute(3) - Minute(2) == Minute() - assert Minute(5) != Minute() - - def test_Second(self): - assertEq(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1)) - assertEq(Second(-1), datetime(2010, 1, 1, - 0, 0, 1), datetime(2010, 1, 1)) - assertEq(2 * Second(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 2)) - assertEq(-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), - datetime(2010, 1, 1)) - - assert Second(3) + Second(2) == Second(5) - assert Second(3) - Second(2) == Second() - - def test_Millisecond(self): - assertEq(Milli(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 0, 1000)) - assertEq(Milli(-1), datetime(2010, 1, 1, 0, - 0, 0, 1000), datetime(2010, 1, 1)) - assertEq(Milli(2), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 0, 2000)) - assertEq(2 * Milli(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 0, 2000)) - assertEq(-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), - datetime(2010, 1, 1)) - - assert Milli(3) + Milli(2) == Milli(5) - assert Milli(3) - Milli(2) == Milli() - - def test_MillisecondTimestampArithmetic(self): - assertEq(Milli(), Timestamp('2010-01-01'), - Timestamp('2010-01-01 00:00:00.001')) - assertEq(Milli(-1), Timestamp('2010-01-01 00:00:00.001'), - Timestamp('2010-01-01')) - - def test_Microsecond(self): - assertEq(Micro(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 0, 1)) - assertEq(Micro(-1), datetime(2010, 1, 1, - 0, 0, 0, 1), datetime(2010, 1, 1)) - assertEq(2 * Micro(), datetime(2010, 1, 1), - datetime(2010, 1, 1, 0, 0, 0, 2)) - assertEq(-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), - datetime(2010, 1, 1)) - - assert Micro(3) + Micro(2) == Micro(5) - assert Micro(3) - Micro(2) == Micro() - - def test_NanosecondGeneric(self): - timestamp = Timestamp(datetime(2010, 1, 1)) - assert timestamp.nanosecond == 0 - - result = timestamp + Nano(10) - assert result.nanosecond == 10 - - reverse_result = Nano(10) + timestamp - assert reverse_result.nanosecond == 10 - - def test_Nanosecond(self): - timestamp = Timestamp(datetime(2010, 1, 1)) - assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns')) - assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp) - assertEq(2 * Nano(), timestamp, timestamp + np.timedelta64(2, 'ns')) - assertEq(-1 * Nano(), timestamp + np.timedelta64(1, 'ns'), timestamp) - - assert Nano(3) + Nano(2) == Nano(5) - assert Nano(3) - Nano(2) == Nano() - - # GH9284 - assert Nano(1) + Nano(10) == Nano(11) - assert Nano(5) + Micro(1) == Nano(1005) - assert Micro(5) + Nano(1) == Nano(5001) - - def test_tick_zero(self): - for t1 in self.ticks: - for t2 in self.ticks: - assert t1(0) == t2(0) - assert t1(0) + t2(0) == t1(0) - - if t1 is not Nano: - assert t1(2) + t2(0) == t1(2) - if t1 is Nano: - assert t1(2) + Nano(0) == t1(2) - - def test_tick_equalities(self): - for t in self.ticks: - assert t(3) == t(3) - assert t() == t(1) - - # not equals - assert t(3) != t(2) - assert t(3) != t(-3) - - def test_tick_operators(self): - for t in self.ticks: - assert t(3) + t(2) == t(5) - assert t(3) - t(2) == t(1) - assert t(800) + t(300) == t(1100) - assert t(1000) - t(5) == t(995) - - def test_tick_offset(self): - for t in self.ticks: - assert not t().isAnchored() - - def test_compare_ticks(self): - for kls in self.ticks: - three = kls(3) - four = kls(4) - - for _ in range(10): - assert three < kls(4) - assert kls(3) < four - assert four > kls(3) - assert kls(4) > three - assert kls(3) == kls(3) - assert kls(3) != kls(4) + assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)) + assert_offset_equal(-Easter(2), + datetime(2010, 4, 4), + datetime(2008, 3, 23)) class TestOffsetNames(object): @@ -4641,19 +4436,6 @@ def test_rule_code(self): assert k == _get_freq_str(code) -def test_apply_ticks(): - result = offsets.Hour(3).apply(offsets.Hour(4)) - exp = offsets.Hour(7) - assert (result == exp) - - -def test_delta_to_tick(): - delta = timedelta(3) - - tick = offsets._delta_to_tick(delta) - assert (tick == offsets.Day(3)) - - def test_dateoffset_misc(): oset = offsets.DateOffset(months=2, days=4) # it works @@ -4875,27 +4657,29 @@ def test_springforward_singular(self): self._test_all_offsets(n=1, tstart=self._make_timestamp( self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None) - def test_all_offset_classes(self): - tests = {MonthBegin: ['11/2/2012', '12/1/2012'], - MonthEnd: ['11/2/2012', '11/30/2012'], - BMonthBegin: ['11/2/2012', '12/3/2012'], - BMonthEnd: ['11/2/2012', '11/30/2012'], - CBMonthBegin: ['11/2/2012', '12/3/2012'], - CBMonthEnd: ['11/2/2012', '11/30/2012'], - SemiMonthBegin: ['11/2/2012', '11/15/2012'], - SemiMonthEnd: ['11/2/2012', '11/15/2012'], - Week: ['11/2/2012', '11/9/2012'], - YearBegin: ['11/2/2012', '1/1/2013'], - YearEnd: ['11/2/2012', '12/31/2012'], - BYearBegin: ['11/2/2012', '1/1/2013'], - BYearEnd: ['11/2/2012', '12/31/2012'], - QuarterBegin: ['11/2/2012', '12/1/2012'], - QuarterEnd: ['11/2/2012', '12/31/2012'], - BQuarterBegin: ['11/2/2012', '12/3/2012'], - BQuarterEnd: ['11/2/2012', '12/31/2012'], - Day: ['11/4/2012', '11/4/2012 23:00']} - - for offset, test_values in iteritems(tests): - first = Timestamp(test_values[0], tz='US/Eastern') + offset() - second = Timestamp(test_values[1], tz='US/Eastern') - assert first == second + offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'], + MonthEnd: ['11/2/2012', '11/30/2012'], + BMonthBegin: ['11/2/2012', '12/3/2012'], + BMonthEnd: ['11/2/2012', '11/30/2012'], + CBMonthBegin: ['11/2/2012', '12/3/2012'], + CBMonthEnd: ['11/2/2012', '11/30/2012'], + SemiMonthBegin: ['11/2/2012', '11/15/2012'], + SemiMonthEnd: ['11/2/2012', '11/15/2012'], + Week: ['11/2/2012', '11/9/2012'], + YearBegin: ['11/2/2012', '1/1/2013'], + YearEnd: ['11/2/2012', '12/31/2012'], + BYearBegin: ['11/2/2012', '1/1/2013'], + BYearEnd: ['11/2/2012', '12/31/2012'], + QuarterBegin: ['11/2/2012', '12/1/2012'], + QuarterEnd: ['11/2/2012', '12/31/2012'], + BQuarterBegin: ['11/2/2012', '12/3/2012'], + BQuarterEnd: ['11/2/2012', '12/31/2012'], + Day: ['11/4/2012', '11/4/2012 23:00']}.items() + + @pytest.mark.parametrize('tup', offset_classes) + def test_all_offset_classes(self, tup): + offset, test_values = tup + + first = Timestamp(test_values[0], tz='US/Eastern') + offset() + second = Timestamp(test_values[1], tz='US/Eastern') + assert first == second diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py new file mode 100644 index 0000000000000..24033d4ff6cbd --- /dev/null +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +""" +Tests for offsets.Tick and subclasses +""" +from datetime import datetime, timedelta + +import pytest +import numpy as np + +from pandas import Timedelta, Timestamp +from pandas.tseries import offsets +from pandas.tseries.offsets import Hour, Minute, Second, Milli, Micro, Nano + +from .common import assert_offset_equal + +# --------------------------------------------------------------------- +# Test Helpers + +tick_classes = [Hour, Minute, Second, Milli, Micro, Nano] + + +# --------------------------------------------------------------------- + + +def test_apply_ticks(): + result = offsets.Hour(3).apply(offsets.Hour(4)) + exp = offsets.Hour(7) + assert (result == exp) + + +def test_delta_to_tick(): + delta = timedelta(3) + + tick = offsets._delta_to_tick(delta) + assert (tick == offsets.Day(3)) + + +# --------------------------------------------------------------------- + + +def test_Hour(): + assert_offset_equal(Hour(), + datetime(2010, 1, 1), datetime(2010, 1, 1, 1)) + assert_offset_equal(Hour(-1), + datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) + assert_offset_equal(2 * Hour(), + datetime(2010, 1, 1), datetime(2010, 1, 1, 2)) + assert_offset_equal(-1 * Hour(), + datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) + + assert Hour(3) + Hour(2) == Hour(5) + assert Hour(3) - Hour(2) == Hour() + + assert Hour(4) != Hour(1) + + +def test_Minute(): + assert_offset_equal(Minute(), + datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1)) + assert_offset_equal(Minute(-1), + datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1)) + assert_offset_equal(2 * Minute(), + datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2)) + assert_offset_equal(-1 * Minute(), + datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1)) + + assert Minute(3) + Minute(2) == Minute(5) + assert Minute(3) - Minute(2) == Minute() + assert Minute(5) != Minute() + + +def test_Second(): + assert_offset_equal(Second(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 1)) + assert_offset_equal(Second(-1), + datetime(2010, 1, 1, 0, 0, 1), + datetime(2010, 1, 1)) + assert_offset_equal(2 * Second(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 2)) + assert_offset_equal(-1 * Second(), + datetime(2010, 1, 1, 0, 0, 1), + datetime(2010, 1, 1)) + + assert Second(3) + Second(2) == Second(5) + assert Second(3) - Second(2) == Second() + + +def test_Millisecond(): + assert_offset_equal(Milli(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 0, 1000)) + assert_offset_equal(Milli(-1), + datetime(2010, 1, 1, 0, 0, 0, 1000), + datetime(2010, 1, 1)) + assert_offset_equal(Milli(2), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 0, 2000)) + assert_offset_equal(2 * Milli(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 0, 2000)) + assert_offset_equal(-1 * Milli(), + datetime(2010, 1, 1, 0, 0, 0, 1000), + datetime(2010, 1, 1)) + + assert Milli(3) + Milli(2) == Milli(5) + assert Milli(3) - Milli(2) == Milli() + + +def test_MillisecondTimestampArithmetic(): + assert_offset_equal(Milli(), + Timestamp('2010-01-01'), + Timestamp('2010-01-01 00:00:00.001')) + assert_offset_equal(Milli(-1), + Timestamp('2010-01-01 00:00:00.001'), + Timestamp('2010-01-01')) + + +def test_Microsecond(): + assert_offset_equal(Micro(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 0, 1)) + assert_offset_equal(Micro(-1), + datetime(2010, 1, 1, 0, 0, 0, 1), + datetime(2010, 1, 1)) + + assert_offset_equal(2 * Micro(), + datetime(2010, 1, 1), + datetime(2010, 1, 1, 0, 0, 0, 2)) + assert_offset_equal(-1 * Micro(), + datetime(2010, 1, 1, 0, 0, 0, 1), + datetime(2010, 1, 1)) + + assert Micro(3) + Micro(2) == Micro(5) + assert Micro(3) - Micro(2) == Micro() + + +def test_NanosecondGeneric(): + timestamp = Timestamp(datetime(2010, 1, 1)) + assert timestamp.nanosecond == 0 + + result = timestamp + Nano(10) + assert result.nanosecond == 10 + + reverse_result = Nano(10) + timestamp + assert reverse_result.nanosecond == 10 + + +def test_Nanosecond(): + timestamp = Timestamp(datetime(2010, 1, 1)) + assert_offset_equal(Nano(), + timestamp, + timestamp + np.timedelta64(1, 'ns')) + assert_offset_equal(Nano(-1), + timestamp + np.timedelta64(1, 'ns'), + timestamp) + assert_offset_equal(2 * Nano(), + timestamp, + timestamp + np.timedelta64(2, 'ns')) + assert_offset_equal(-1 * Nano(), + timestamp + np.timedelta64(1, 'ns'), + timestamp) + + assert Nano(3) + Nano(2) == Nano(5) + assert Nano(3) - Nano(2) == Nano() + + # GH9284 + assert Nano(1) + Nano(10) == Nano(11) + assert Nano(5) + Micro(1) == Nano(1005) + assert Micro(5) + Nano(1) == Nano(5001) + + +@pytest.mark.parametrize('kls, expected', + [(Hour, Timedelta(hours=5)), + (Minute, Timedelta(hours=2, minutes=3)), + (Second, Timedelta(hours=2, seconds=3)), + (Milli, Timedelta(hours=2, milliseconds=3)), + (Micro, Timedelta(hours=2, microseconds=3)), + (Nano, Timedelta(hours=2, nanoseconds=3))]) +def test_tick_addition(kls, expected): + offset = kls(3) + result = offset + Timedelta(hours=2) + assert isinstance(result, Timedelta) + assert result == expected + + +@pytest.mark.parametrize('cls1', tick_classes) +@pytest.mark.parametrize('cls2', tick_classes) +def test_tick_zero(cls1, cls2): + assert cls1(0) == cls2(0) + assert cls1(0) + cls2(0) == cls1(0) + + if cls1 is not Nano: + assert cls1(2) + cls2(0) == cls1(2) + + if cls1 is Nano: + assert cls1(2) + Nano(0) == cls1(2) + + +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_equalities(cls): + assert cls(3) == cls(3) + assert cls() == cls(1) + + # not equals + assert cls(3) != cls(2) + assert cls(3) != cls(-3) + + +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_operators(cls): + assert cls(3) + cls(2) == cls(5) + assert cls(3) - cls(2) == cls(1) + assert cls(800) + cls(300) == cls(1100) + assert cls(1000) - cls(5) == cls(995) + + +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_offset(cls): + assert not cls().isAnchored() + + +@pytest.mark.parametrize('cls', tick_classes) +def test_compare_ticks(cls): + three = cls(3) + four = cls(4) + + # TODO: WTF? What is this range(10) supposed to do? + for _ in range(10): + assert three < cls(4) + assert cls(3) < four + assert four > cls(3) + assert cls(4) > three + assert cls(3) == cls(3) + assert cls(3) != cls(4) diff --git a/setup.py b/setup.py index dd24c5c14ee69..bbfb8f4c6ebc5 100755 --- a/setup.py +++ b/setup.py @@ -761,6 +761,7 @@ def pxd(name): 'pandas.tests.series', 'pandas.tests.scalar', 'pandas.tests.tseries', + 'pandas.tests.tseries.offsets', 'pandas.tests.plotting', 'pandas.tests.tools', 'pandas.tests.util', @@ -796,7 +797,7 @@ def pxd(name): 'pandas.tests.io.formats': ['data/*.csv'], 'pandas.tests.io.msgpack': ['data/*.mp'], 'pandas.tests.reshape': ['data/*.csv'], - 'pandas.tests.tseries': ['data/*.pickle'], + 'pandas.tests.tseries.offsets': ['data/*.pickle'], 'pandas.io.formats': ['templates/*.tpl'] }, ext_modules=extensions,
The test output for offsets would be more useful of the tests were parametrized. So this PR implements that for about a third of the tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/18233
2017-11-12T00:01:18Z
2017-11-13T13:09:21Z
2017-11-13T13:09:21Z
2017-12-08T19:38:46Z
TST: clean up some tests issues & style
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f3b11e52cdd7a..eae283e9bc00d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -136,7 +136,7 @@ def trans(x): # noqa try: if np.allclose(new_result, result, rtol=0): return new_result - except: + except Exception: # comparison of an object dtype with a number type could # hit here @@ -151,14 +151,14 @@ def trans(x): # noqa elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']: try: result = result.astype(dtype) - except: + except Exception: if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize('utc') result = result.tz_convert(dtype.tz) - except: + except Exception: pass return result @@ -210,7 +210,7 @@ def changeit(): new_result[mask] = om_at result[:] = new_result return result, False - except: + except Exception: pass # we are forced to change the dtype of the result as the input @@ -243,7 +243,7 @@ def changeit(): try: np.place(result, mask, other) - except: + except Exception: return changeit() return result, False @@ -274,14 +274,14 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.datetime64): try: fill_value = tslib.Timestamp(fill_value).value - except: + except Exception: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: fill_value = lib.Timedelta(fill_value).value - except: + except Exception: # as for datetimes, cannot upcast to object fill_value = iNaT else: @@ -592,12 +592,12 @@ def maybe_convert_scalar(values): def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ - l = len(categories) - if l < _int8_max: + length = len(categories) + if length < _int8_max: return _ensure_int8(indexer) - elif l < _int16_max: + elif length < _int16_max: return _ensure_int16(indexer) - elif l < _int32_max: + elif length < _int32_max: return _ensure_int32(indexer) return _ensure_int64(indexer) @@ -629,7 +629,7 @@ def conv(r, dtype): r = float(r) elif dtype.kind == 'i': r = int(r) - except: + except Exception: pass return r @@ -756,7 +756,7 @@ def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, if not isna(new_values).all(): values = new_values - except: + except Exception: pass else: # soft-conversion @@ -817,7 +817,7 @@ def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True, # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values values = values.copy() if copy else values - except: + except Exception: pass return values @@ -888,10 +888,10 @@ def try_datetime(v): try: from pandas import to_datetime return to_datetime(v) - except: + except Exception: pass - except: + except Exception: pass return v.reshape(shape) @@ -903,7 +903,7 @@ def try_timedelta(v): from pandas import to_timedelta try: return to_timedelta(v)._values.reshape(shape) - except: + except Exception: return v.reshape(shape) inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 57d2d07294a53..eb96cbad70099 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2032,7 +2032,7 @@ def equals(self, other): try: return array_equivalent(_values_from_object(self), _values_from_object(other)) - except: + except Exception: return False def identical(self, other): @@ -2315,7 +2315,7 @@ def intersection(self, other): try: indexer = Index(other._values).get_indexer(self._values) indexer = indexer.take((indexer != -1).nonzero()[0]) - except: + except Exception: # duplicates indexer = algos.unique1d( Index(other._values).get_indexer_non_unique(self._values)[0]) @@ -3022,13 +3022,13 @@ def _reindex_non_unique(self, target): new_indexer = None if len(missing): - l = np.arange(len(indexer)) + length = np.arange(len(indexer)) missing = _ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(l[~check]) + missing_indexer = _ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(l[check]) + cur_indexer = _ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 78869de318dce..aa99e8920d9b5 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -449,7 +449,7 @@ def _generate(cls, start, end, periods, name, offset, try: inferred_tz = timezones.infer_tzinfo(start, end) - except: + except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') @@ -1176,12 +1176,12 @@ def __iter__(self): # convert in chunks of 10k for efficiency data = self.asi8 - l = len(self) + length = len(self) chunksize = 10000 - chunks = int(l / chunksize) + 1 + chunks = int(length / chunksize) + 1 for i in range(chunks): start_i = i * chunksize - end_i = min((i + 1) * chunksize, l) + end_i = min((i + 1) * chunksize, length) converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box=True) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a4a5f7df9aa0f..604af1cfd678a 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -841,7 +841,7 @@ def insert(self, loc, item): if _is_convertible_to_td(item): try: item = Timedelta(item) - except: + except Exception: pass freq = None diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..c9a72bb688270 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -597,7 +597,7 @@ def _ixs(self, i, axis=0): return values[i] except IndexError: raise - except: + except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) @@ -675,7 +675,7 @@ def _get_with(self, key): if isinstance(key, tuple): try: return self._get_values_tuple(key) - except: + except Exception: if len(key) == 1: key = key[0] if isinstance(key, slice): @@ -818,7 +818,7 @@ def _set_with(self, key, value): if not isinstance(key, (list, Series, np.ndarray, Series)): try: key = list(key) - except: + except Exception: key = [key] if isinstance(key, Index): diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 956f3c68eeb41..0b268dcca90e8 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -973,6 +973,7 @@ def test_importcheck_thread_safety(): def test_parse_failure_unseekable(): # Issue #17975 _skip_if_no('lxml') + _skip_if_no('bs4') class UnseekableStringIO(StringIO): def seekable(self): @@ -996,6 +997,7 @@ def seekable(self): def test_parse_failure_rewinds(): # Issue #17975 _skip_if_no('lxml') + _skip_if_no('bs4') class MockFile(object): def __init__(self, data): diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 4cd9a2fadeb32..1d1eeb9da2364 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -1101,13 +1101,18 @@ def test_timestamp(self): tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central') utsc = tsc.tz_convert('UTC') + # utsc is a different representation of the same time assert tsc.timestamp() == utsc.timestamp() if PY3: - # should agree with datetime.timestamp method - dt = ts.to_pydatetime() - assert dt.timestamp() == ts.timestamp() + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() class TestTimestampNsOperations(object): diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index ddcf1bb7d8b7b..724628649796d 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas.tseries.offsets as offsets -from pandas.compat import lrange, zip +from pandas.compat import lrange, zip, PY3 from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib @@ -1278,16 +1278,22 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo) result_pd = Timestamp(dt).replace(tzinfo=tzinfo) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime()
closes #18037
https://api.github.com/repos/pandas-dev/pandas/pulls/18232
2017-11-11T21:06:10Z
2017-11-11T23:38:28Z
2017-11-11T23:38:27Z
2017-12-11T20:25:01Z
Reduce copying of input data on Series construction
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 404d9f5d972b6..9c8c9ec1611ad 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -38,11 +38,18 @@ Backwards incompatible API changes - - + + + + + .. _whatsnew_0220.api: Other API Changes ^^^^^^^^^^^^^^^^^ +- :func:`Series.astype` and :func:`Index.astype` with an incompatible dtype will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`18231`) +- ``Series`` construction with an ``object`` dtyped tz-aware datetime and ``dtype=object`` specified, will now return an ``object`` dtyped ``Series``, previously this would infer the datetime dtype (:issue:`18231`) - ``NaT`` division with :class:`datetime.timedelta` will now return ``NaN`` instead of raising (:issue:`17876`) - All-NaN levels in a ``MultiIndex`` are now assigned ``float`` rather than ``object`` dtype, promoting consistency with ``Index`` (:issue:`17929`). - :class:`Timestamp` will no longer silently ignore unused or invalid ``tz`` or ``tzinfo`` keyword arguments (:issue:`17690`) @@ -80,6 +87,7 @@ Performance Improvements - :class`DateOffset` arithmetic performance is improved (:issue:`18218`) - Converting a ``Series`` of ``Timedelta`` objects to days, seconds, etc... sped up through vectorization of underlying methods (:issue:`18092`) - The overriden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`) +- ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`) .. _whatsnew_0220.docs: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index eae283e9bc00d..4b99914758d55 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -13,7 +13,9 @@ is_datetimelike, is_extension_type, is_object_dtype, is_datetime64tz_dtype, is_datetime64_dtype, - is_timedelta64_dtype, is_dtype_equal, + is_datetime64_ns_dtype, + is_timedelta64_dtype, is_timedelta64_ns_dtype, + is_dtype_equal, is_float_dtype, is_complex_dtype, is_integer_dtype, is_datetime_or_timedelta_dtype, @@ -829,8 +831,10 @@ def maybe_castable(arr): # check datetime64[ns]/timedelta64[ns] are valid # otherwise try to coerce kind = arr.dtype.kind - if kind == 'M' or kind == 'm': - return is_datetime64_dtype(arr.dtype) + if kind == 'M': + return is_datetime64_ns_dtype(arr.dtype) + elif kind == 'm': + return is_timedelta64_ns_dtype(arr.dtype) return arr.dtype.name not in _POSSIBLY_CAST_DTYPES diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1359a938e652d..50f03aca97447 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1027,13 +1027,16 @@ def to_frame(self, index=True): result.index = self return result - def _to_embed(self, keep_tz=False): + def _to_embed(self, keep_tz=False, dtype=None): """ *this is an internal non-public method* return an array repr of this object, potentially casting to object """ + if dtype is not None: + return self.astype(dtype)._to_embed(keep_tz=keep_tz) + return self.values.copy() _index_shared_docs['astype'] = """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 64b5b9f958880..3a11c80ecba64 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -917,7 +917,7 @@ def astype(self, dtype, copy=True): return Index(self.format(), name=self.name, dtype=object) elif is_period_dtype(dtype): return self.to_period(freq=dtype.freq) - raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype) + raise TypeError('Cannot cast DatetimeIndex to dtype %s' % dtype) def _get_time_micros(self): values = self.asi8 @@ -957,12 +957,15 @@ def to_series(self, keep_tz=False): index=self._shallow_copy(), name=self.name) - def _to_embed(self, keep_tz=False): + def _to_embed(self, keep_tz=False, dtype=None): """ return an array repr of this object, potentially casting to object This is for internal compat """ + if dtype is not None: + return self.astype(dtype)._to_embed(keep_tz=keep_tz) + if keep_tz and self.tz is not None: # preserve the tz & copy diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 85e3300913000..76004994ae38a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -464,10 +464,14 @@ def __array_wrap__(self, result, context=None): def _box_func(self): return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq) - def _to_embed(self, keep_tz=False): + def _to_embed(self, keep_tz=False, dtype=None): """ return an array repr of this object, potentially casting to object """ + + if dtype is not None: + return self.astype(dtype)._to_embed(keep_tz=keep_tz) + return self.asobject.values @property @@ -510,7 +514,7 @@ def astype(self, dtype, copy=True, how='start'): return self.to_timestamp(how=how).tz_localize(dtype.tz) elif is_period_dtype(dtype): return self.asfreq(freq=dtype.freq) - raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype) + raise TypeError('Cannot cast PeriodIndex to dtype %s' % dtype) @Substitution(klass='PeriodIndex') @Appender(_shared_docs['searchsorted']) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index c592aa9608d97..eb4a9ce7e1439 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -495,7 +495,7 @@ def astype(self, dtype, copy=True): elif is_integer_dtype(dtype): return Index(self.values.astype('i8', copy=copy), dtype='i8', name=self.name) - raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype) + raise TypeError('Cannot cast TimedeltaIndex to dtype %s' % dtype) def union(self, other): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index 6142ccdd2f2ac..be1de4c6814ba 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -29,7 +29,8 @@ _is_unorderable_exception, _ensure_platform_int, pandas_dtype) -from pandas.core.dtypes.generic import ABCSparseArray, ABCDataFrame +from pandas.core.dtypes.generic import ( + ABCSparseArray, ABCDataFrame, ABCIndexClass) from pandas.core.dtypes.cast import ( maybe_upcast, infer_dtype_from_scalar, maybe_convert_platform, @@ -184,8 +185,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if name is None: name = data.name - data = data._to_embed(keep_tz=True) - copy = True + data = data._to_embed(keep_tz=True, dtype=dtype) + copy = False elif isinstance(data, np.ndarray): pass elif isinstance(data, Series): @@ -3139,7 +3140,9 @@ def _sanitize_index(data, index, copy=False): if len(data) != len(index): raise ValueError('Length of values does not match length of ' 'index') - if isinstance(data, PeriodIndex): + if isinstance(data, ABCIndexClass) and not copy: + pass + elif isinstance(data, PeriodIndex): data = data.asobject elif isinstance(data, DatetimeIndex): data = data._to_embed(keep_tz=True) @@ -3209,12 +3212,11 @@ def _try_cast(arr, take_fast_path): # e.g. indexes can have different conversions (so don't fast path # them) # GH 6140 - subarr = _sanitize_index(data, index, copy=True) + subarr = _sanitize_index(data, index, copy=copy) else: - subarr = _try_cast(data, True) - if copy: - subarr = data.copy() + # we will try to copy be-definition here + subarr = _try_cast(data, True) elif isinstance(data, Categorical): subarr = data diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 3ca185cf158a7..c29821ba51284 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -233,10 +233,6 @@ def test_construction_with_conversions(self): # convert from a numpy array of non-ns timedelta64 arr = np.array([1, 2, 3], dtype='timedelta64[s]') - s = Series(arr) - expected = Series(pd.timedelta_range('00:00:01', periods=3, freq='s')) - assert_series_equal(s, expected) - df = DataFrame(index=range(3)) df['A'] = arr expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3, @@ -244,21 +240,6 @@ def test_construction_with_conversions(self): index=range(3)) assert_frame_equal(df, expected) - # convert from a numpy array of non-ns datetime64 - # note that creating a numpy datetime64 is in LOCAL time!!!! - # seems to work for M8[D], but not for M8[s] - - s = Series(np.array(['2013-01-01', '2013-01-02', - '2013-01-03'], dtype='datetime64[D]')) - assert_series_equal(s, Series(date_range('20130101', periods=3, - freq='D'))) - - # s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 - # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')) - - # assert_series_equal(s,date_range('20130101 - # 00:00:01',period=3,freq='s')) - expected = DataFrame({ 'dt1': Timestamp('20130101'), 'dt2': date_range('20130101', periods=3), @@ -467,7 +448,7 @@ def test_convert_objects(self): self.mixed_frame['I'] = '1' # add in some items that will be nan - l = len(self.mixed_frame) + length = len(self.mixed_frame) self.mixed_frame['J'] = '1.' self.mixed_frame['K'] = '1' self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled' @@ -476,8 +457,8 @@ def test_convert_objects(self): assert converted['I'].dtype == 'int64' assert converted['J'].dtype == 'float64' assert converted['K'].dtype == 'float64' - assert len(converted['J'].dropna()) == l - 5 - assert len(converted['K'].dropna()) == l - 5 + assert len(converted['J'].dropna()) == length - 5 + assert len(converted['K'].dropna()) == length - 5 # via astype converted = self.mixed_frame.copy() diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 0197fc4c52617..e211807b6a3e4 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -130,11 +130,11 @@ def test_astype_raises(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) - pytest.raises(ValueError, idx.astype, float) - pytest.raises(ValueError, idx.astype, 'timedelta64') - pytest.raises(ValueError, idx.astype, 'timedelta64[ns]') - pytest.raises(ValueError, idx.astype, 'datetime64') - pytest.raises(ValueError, idx.astype, 'datetime64[D]') + pytest.raises(TypeError, idx.astype, float) + pytest.raises(TypeError, idx.astype, 'timedelta64') + pytest.raises(TypeError, idx.astype, 'timedelta64[ns]') + pytest.raises(TypeError, idx.astype, 'datetime64') + pytest.raises(TypeError, idx.astype, 'datetime64[D]') def test_index_convert_to_datetime_array(self): def _check_rng(rng): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index e5ee078d3558d..7fefcc859d447 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -47,10 +47,10 @@ def test_astype_raises(self): # GH 13149, GH 13209 idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') - pytest.raises(ValueError, idx.astype, str) - pytest.raises(ValueError, idx.astype, float) - pytest.raises(ValueError, idx.astype, 'timedelta64') - pytest.raises(ValueError, idx.astype, 'timedelta64[ns]') + pytest.raises(TypeError, idx.astype, str) + pytest.raises(TypeError, idx.astype, float) + pytest.raises(TypeError, idx.astype, 'timedelta64') + pytest.raises(TypeError, idx.astype, 'timedelta64[ns]') def test_pickle_compat_construction(self): pass diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index 7a761cfe30c62..0fa0e036096d0 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -66,10 +66,10 @@ def test_astype_raises(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) - pytest.raises(ValueError, idx.astype, float) - pytest.raises(ValueError, idx.astype, str) - pytest.raises(ValueError, idx.astype, 'datetime64') - pytest.raises(ValueError, idx.astype, 'datetime64[ns]') + pytest.raises(TypeError, idx.astype, float) + pytest.raises(TypeError, idx.astype, str) + pytest.raises(TypeError, idx.astype, 'datetime64') + pytest.raises(TypeError, idx.astype, 'datetime64[ns]') def test_pickle_compat_construction(self): pass diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index d296086021349..e62b19294a07b 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -14,9 +14,9 @@ from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64tz_dtype) -from pandas import (Index, Series, isna, date_range, - NaT, period_range, MultiIndex, IntervalIndex) -from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex +from pandas import (Index, Series, isna, date_range, Timestamp, + NaT, period_range, timedelta_range, MultiIndex, + IntervalIndex) from pandas._libs import lib from pandas._libs.tslib import iNaT @@ -289,6 +289,25 @@ def test_constructor_copy(self): assert x[0] == 2. assert y[0] == 1. + @pytest.mark.parametrize( + "index", + [ + pd.date_range('20170101', periods=3, tz='US/Eastern'), + pd.date_range('20170101', periods=3), + pd.timedelta_range('1 day', periods=3), + pd.period_range('2012Q1', periods=3, freq='Q'), + pd.Index(list('abc')), + pd.Int64Index([1, 2, 3]), + pd.RangeIndex(0, 3)], + ids=lambda x: type(x).__name__) + def test_constructor_limit_copies(self, index): + # GH 17449 + # limit copies of input + s = pd.Series(index) + + # we make 1 copy; this is just a smoke test here + assert s._data.blocks[0].values is not index + def test_constructor_pass_none(self): s = Series(None, index=lrange(5)) assert s.dtype == np.float64 @@ -524,25 +543,6 @@ def test_constructor_with_datetime_tz(self): result = pd.concat([s.iloc[0:1], s.iloc[1:]]) assert_series_equal(result, s) - # astype - result = s.astype(object) - expected = Series(DatetimeIndex(s._values).asobject) - assert_series_equal(result, expected) - - result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz) - assert_series_equal(result, s) - - # astype - datetime64[ns, tz] - result = Series(s.values).astype('datetime64[ns, US/Eastern]') - assert_series_equal(result, s) - - result = Series(s.values).astype(s.dtype) - assert_series_equal(result, s) - - result = s.astype('datetime64[ns, CET]') - expected = Series(date_range('20130101 06:00:00', periods=3, tz='CET')) - assert_series_equal(result, expected) - # short str assert 'datetime64[ns, US/Eastern]' in str(s) @@ -807,17 +807,67 @@ def test_auto_conversion(self): series = Series(list(date_range('1/1/2000', periods=10))) assert series.dtype == 'M8[ns]' - def test_constructor_cant_cast_datetime64(self): - msg = "Cannot cast datetime64 to " - with tm.assert_raises_regex(TypeError, msg): - Series(date_range('1/1/2000', periods=10), dtype=float) + def test_convert_non_ns(self): + # convert from a numpy array of non-ns timedelta64 + arr = np.array([1, 2, 3], dtype='timedelta64[s]') + s = Series(arr) + expected = Series(pd.timedelta_range('00:00:01', periods=3, freq='s')) + assert_series_equal(s, expected) + + # convert from a numpy array of non-ns datetime64 + # note that creating a numpy datetime64 is in LOCAL time!!!! + # seems to work for M8[D], but not for M8[s] + + s = Series(np.array(['2013-01-01', '2013-01-02', + '2013-01-03'], dtype='datetime64[D]')) + assert_series_equal(s, Series(date_range('20130101', periods=3, + freq='D'))) + + # s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 + # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')) + + # assert_series_equal(s,date_range('20130101 + # 00:00:01',period=3,freq='s')) + + @pytest.mark.parametrize( + "index", + [ + date_range('1/1/2000', periods=10), + timedelta_range('1 day', periods=10), + period_range('2000-Q1', periods=10, freq='Q')], + ids=lambda x: type(x).__name__) + def test_constructor_cant_cast_datetimelike(self, index): + # floats are not ok + msg = "Cannot cast {} to ".format(type(index).__name__) with tm.assert_raises_regex(TypeError, msg): - Series(date_range('1/1/2000', periods=10), dtype=int) + Series(index, dtype=float) + + # ints are ok + # we test with np.int64 to get similar results on + # windows / 32-bit platforms + result = Series(index, dtype=np.int64) + expected = Series(index.astype(np.int64)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + date_range('1/1/2000', periods=10), + timedelta_range('1 day', periods=10), + period_range('2000-Q1', periods=10, freq='Q')], + ids=lambda x: type(x).__name__) + def test_constructor_cast_object(self, index): + s = Series(index, dtype=object) + exp = Series(index).astype(object) + tm.assert_series_equal(s, exp) + + s = Series(pd.Index(index, dtype=object), dtype=object) + exp = Series(index).astype(object) + tm.assert_series_equal(s, exp) - def test_constructor_cast_object(self): - s = Series(date_range('1/1/2000', periods=10), dtype=object) - exp = Series(date_range('1/1/2000', periods=10)) + s = Series(index.astype(object), dtype=object) + exp = Series(index).astype(object) tm.assert_series_equal(s, exp) def test_constructor_generic_timestamp_deprecated(self): diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index b20c1817e5671..ad6d019b5287e 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -17,6 +17,7 @@ Categorical, Index ) from pandas.api.types import CategoricalDtype +import pandas._libs.tslib as tslib from pandas.compat import lrange, range, u from pandas import compat @@ -69,8 +70,7 @@ def test_astype_cast_object_int(self): tm.assert_series_equal(result, Series(np.arange(1, 5))) - def test_astype_datetimes(self): - import pandas._libs.tslib as tslib + def test_astype_datetime(self): s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5)) s = s.astype('O') @@ -89,6 +89,33 @@ def test_astype_datetimes(self): s = s.astype('O') assert s.dtype == np.object_ + def test_astype_datetime64tz(self): + s = Series(date_range('20130101', periods=3, tz='US/Eastern')) + + # astype + result = s.astype(object) + expected = Series(s.astype(object), dtype=object) + tm.assert_series_equal(result, expected) + + result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz) + tm.assert_series_equal(result, s) + + # astype - object, preserves on construction + result = Series(s.astype(object)) + expected = s.astype(object) + tm.assert_series_equal(result, expected) + + # astype - datetime64[ns, tz] + result = Series(s.values).astype('datetime64[ns, US/Eastern]') + tm.assert_series_equal(result, s) + + result = Series(s.values).astype(s.dtype) + tm.assert_series_equal(result, s) + + result = s.astype('datetime64[ns, CET]') + expected = Series(date_range('20130101 06:00:00', periods=3, tz='CET')) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("dtype", [compat.text_type, np.str_]) @pytest.mark.parametrize("series", [Series([string.digits * 10, tm.rands(63),
closes #17449
https://api.github.com/repos/pandas-dev/pandas/pulls/18231
2017-11-11T20:29:19Z
2017-11-21T00:30:37Z
2017-11-21T00:30:37Z
2017-11-21T00:31:30Z
BUG: MultiIndex not raising AttributeError with a million records
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 185f08514641f..793e9bf17bac9 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -73,7 +73,7 @@ Conversion Indexing ^^^^^^^^ -- +- Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`) - - diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4cc59f5297058..f603a0eef36a5 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -446,6 +446,17 @@ def _shallow_copy_with_infer(self, values=None, **kwargs): **kwargs) return self._shallow_copy(values, **kwargs) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) + def __contains__(self, key): + hash(key) + try: + self.get_loc(key) + return True + except (LookupError, TypeError): + return False + + contains = __contains__ + @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: @@ -1370,17 +1381,6 @@ def nlevels(self): def levshape(self): return tuple(len(x) for x in self.levels) - @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) - def __contains__(self, key): - hash(key) - try: - self.get_loc(key) - return True - except LookupError: - return False - - contains = __contains__ - def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index ded5de9253eaf..b69b958d4e4ba 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2981,3 +2981,13 @@ def test_nan_stays_float(self): assert pd.isna(df0.index.get_level_values(1)).all() # the following failed in 0.14.1 assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + def test_million_record_attribute_error(self): + # GH 18165 + r = list(range(1000000)) + df = pd.DataFrame({'a': r, 'b': r}, + index=pd.MultiIndex.from_tuples([(x, x) for x in r])) + + with tm.assert_raises_regex(AttributeError, + "'Series' object has no attribute 'foo'"): + df['a'].foo()
- [X] closes #18165 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18229
2017-11-11T18:23:22Z
2017-11-12T16:14:07Z
2017-11-12T16:14:07Z
2017-12-11T20:23:49Z
Assert at least one tz arg is always UTC
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 61efc865112a9..3775ab3417b63 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -401,7 +401,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): """ Convert the val (in i8) from timezone1 to timezone2 - This is a single timezone versoin of tz_convert + This is a single timezone version of tz_convert Parameters ---------- @@ -422,6 +422,9 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): pandas_datetimestruct dts datetime dt + # See GH#17734 We should always be converting either from UTC or to UTC + assert (is_utc(tz1) or tz1 == 'UTC') or (is_utc(tz2) or tz2 == 'UTC') + if val == NPY_NAT: return val @@ -444,8 +447,8 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): if get_timezone(tz2) == 'UTC': return utc_date - if is_tzlocal(tz2): - dt64_to_dtstruct(val, &dts) + elif is_tzlocal(tz2): + dt64_to_dtstruct(utc_date, &dts) dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz2) delta = int(get_utcoffset(tz2, dt).total_seconds()) * 1000000000
closes #17734 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18228
2017-11-11T16:37:06Z
2017-11-12T19:34:00Z
2017-11-12T19:34:00Z
2017-12-08T19:38:48Z
Prevent passing invalid kwds to DateOffset constructors
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 7229bd38fffa9..0ffc02aa72456 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -79,7 +79,7 @@ Other API Changes - :func:`Dataframe.unstack` will now default to filling with ``np.nan`` for ``object`` columns. (:issue:`12815`) - :class:`IntervalIndex` constructor will raise if the ``closed`` parameter conflicts with how the input data is inferred to be closed (:issue:`18421`) - Inserting missing values into indexes will work for all types of indexes and automatically insert the correct type of missing value (``NaN``, ``NaT``, etc.) regardless of the type passed in (:issue:`18295`) - +- Restricted ``DateOffset`` keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`, :issue:`18226`). .. _whatsnew_0220.deprecations: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index b03d48bba1649..4ed4d4a9b7b99 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -261,7 +261,7 @@ def _validate_business_time(t_input): # --------------------------------------------------------------------- # Constructor Helpers -_rd_kwds = set([ +relativedelta_kwds = set([ 'years', 'months', 'weeks', 'days', 'year', 'month', 'week', 'day', 'weekday', 'hour', 'minute', 'second', 'microsecond', @@ -406,6 +406,33 @@ class _BaseOffset(object): # will raise NotImplementedError. return get_day_of_month(other, self._day_opt) + def _validate_n(self, n): + """ + Require that `n` be a nonzero integer. + + Parameters + ---------- + n : int + + Returns + ------- + nint : int + + Raises + ------ + TypeError if `int(n)` raises + ValueError if n != int(n) + """ + try: + nint = int(n) + except (ValueError, TypeError): + raise TypeError('`n` argument must be an integer, ' + 'got {ntype}'.format(ntype=type(n))) + if n != nint: + raise ValueError('`n` argument must be an integer, ' + 'got {n}'.format(n=n)) + return nint + class BaseOffset(_BaseOffset): # Here we add __rfoo__ methods that don't play well with cdef classes diff --git a/pandas/tests/tseries/offsets/conftest.py b/pandas/tests/tseries/offsets/conftest.py index 25446c24b28c0..76f24123ea0e1 100644 --- a/pandas/tests/tseries/offsets/conftest.py +++ b/pandas/tests/tseries/offsets/conftest.py @@ -7,6 +7,19 @@ def offset_types(request): return request.param +@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__ if + issubclass(getattr(offsets, o), offsets.MonthOffset) + and o != 'MonthOffset']) +def month_classes(request): + return request.param + + +@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__ if + issubclass(getattr(offsets, o), offsets.Tick)]) +def tick_classes(request): + return request.param + + @pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']) def tz(request): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 6821017c89c3a..357c95282e78d 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -17,6 +17,7 @@ get_offset, get_standard_freq) from pandas.core.indexes.datetimes import ( _to_m8, DatetimeIndex, _daterange_cache) +import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import WeekDay, CacheableOffset from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour, WeekOfMonth, CBMonthEnd, @@ -4682,9 +4683,45 @@ def test_all_offset_classes(self, tup): assert first == second +# --------------------------------------------------------------------- def test_get_offset_day_error(): # subclass of _BaseOffset must override _day_opt attribute, or we should # get a NotImplementedError with pytest.raises(NotImplementedError): DateOffset()._get_offset_day(datetime.now()) + + +@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds))) +def test_valid_month_attributes(kwd, month_classes): + # GH#18226 + cls = month_classes + # check that we cannot create e.g. MonthEnd(weeks=3) + with pytest.raises(TypeError): + cls(**{kwd: 3}) + + +@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds))) +def test_valid_tick_attributes(kwd, tick_classes): + # GH#18226 + cls = tick_classes + # check that we cannot create e.g. Hour(weeks=3) + with pytest.raises(TypeError): + cls(**{kwd: 3}) + + +def test_validate_n_error(): + with pytest.raises(TypeError): + DateOffset(n='Doh!') + + with pytest.raises(TypeError): + MonthBegin(n=timedelta(1)) + + with pytest.raises(TypeError): + BDay(n=np.array([1, 2], dtype=np.int64)) + + +def test_require_integers(offset_types): + cls = offset_types + with pytest.raises(ValueError): + cls(n=1.5) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 90496729554f8..7b699349c3f07 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- +from datetime import date, datetime, timedelta import functools import operator -from datetime import date, datetime, timedelta from pandas.compat import range from pandas import compat import numpy as np @@ -166,7 +166,7 @@ def __add__(date): normalize = False def __init__(self, n=1, normalize=False, **kwds): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize self.kwds = kwds @@ -473,7 +473,7 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset): _adjust_dst = True def __init__(self, n=1, normalize=False, offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize self.kwds = {'offset': offset} self._offset = offset @@ -782,7 +782,7 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset): def __init__(self, n=1, normalize=False, start='09:00', end='17:00', offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize super(BusinessHour, self).__init__(start=start, end=end, offset=offset) @@ -819,7 +819,7 @@ class CustomBusinessDay(BusinessDay): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize self._offset = offset self.kwds = {} @@ -887,7 +887,7 @@ class CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, start='09:00', end='17:00', offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize super(CustomBusinessHour, self).__init__(start=start, end=end, offset=offset) @@ -919,6 +919,11 @@ def next_bday(self): class MonthOffset(SingleConstructorOffset): _adjust_dst = True + def __init__(self, n=1, normalize=False): + self.n = self._validate_n(n) + self.normalize = normalize + self.kwds = {} + @property def name(self): if self.isAnchored: @@ -994,7 +999,8 @@ def __init__(self, n=1, normalize=False, day_of_month=None): msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}' raise ValueError(msg.format(min=self._min_day_of_month, day=self.day_of_month)) - self.n = int(n) + + self.n = self._validate_n(n) self.normalize = normalize self.kwds = {'day_of_month': self.day_of_month} @@ -1205,7 +1211,7 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize self._offset = offset self.kwds = {} @@ -1278,7 +1284,7 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) + self.n = self._validate_n(n) self.normalize = normalize self._offset = offset self.kwds = {} @@ -1345,7 +1351,7 @@ class Week(EndMixin, DateOffset): _prefix = 'W' def __init__(self, n=1, normalize=False, weekday=None): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday @@ -1424,7 +1430,7 @@ class WeekOfMonth(DateOffset): _adjust_dst = True def __init__(self, n=1, normalize=False, week=None, weekday=None): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday self.week = week @@ -1509,7 +1515,7 @@ class LastWeekOfMonth(DateOffset): _prefix = 'LWOM' def __init__(self, n=1, normalize=False, weekday=None): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday @@ -1575,7 +1581,7 @@ class QuarterOffset(DateOffset): # point def __init__(self, n=1, normalize=False, startingMonth=None): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize if startingMonth is None: startingMonth = self._default_startingMonth @@ -1820,7 +1826,7 @@ class FY5253(DateOffset): def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, variation="nearest"): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize self.startingMonth = startingMonth self.weekday = weekday @@ -2032,7 +2038,7 @@ class FY5253Quarter(DateOffset): def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, qtr_with_extra_week=1, variation="nearest"): - self.n = n + self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday @@ -2158,6 +2164,11 @@ class Easter(DateOffset): """ _adjust_dst = True + def __init__(self, n=1, normalize=False): + self.n = self._validate_n(n) + self.normalize = normalize + self.kwds = {} + @apply_wraps def apply(self, other): current_easter = easter(other.year) @@ -2199,6 +2210,12 @@ class Tick(SingleConstructorOffset): _inc = Timedelta(microseconds=1000) _prefix = 'undefined' + def __init__(self, n=1, normalize=False): + # TODO: do Tick classes with normalize=True make sense? + self.n = self._validate_n(n) + self.normalize = normalize + self.kwds = {} + __gt__ = _tick_comp(operator.gt) __ge__ = _tick_comp(operator.ge) __lt__ = _tick_comp(operator.lt) @@ -2257,6 +2274,7 @@ def delta(self): def nanos(self): return delta_to_nanoseconds(self.delta) + # TODO: Should Tick have its own apply_index? def apply(self, other): # Timestamp can handle tz and nano sec, thus no need to use apply_wraps if isinstance(other, Timestamp):
- [ ] closes #xxxx - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18226
2017-11-11T04:09:16Z
2017-11-25T20:58:01Z
2017-11-25T20:58:01Z
2017-11-25T21:13:49Z
PERF: Vectorized Timedelta property access (#18092)
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index c112d1ef72eb8..0f8c8458628b1 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -40,3 +40,46 @@ def setup(self): def test_add_td_ts(self): self.td + self.ts + + +class TimedeltaProperties(object): + goal_time = 0.2 + + def setup(self): + self.td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35) + + def time_timedelta_days(self): + self.td.days + + def time_timedelta_seconds(self): + self.td.seconds + + def time_timedelta_microseconds(self): + self.td.microseconds + + def time_timedelta_nanoseconds(self): + self.td.nanoseconds + + +class DatetimeAccessor(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.series = pd.Series( + pd.timedelta_range('1 days', periods=self.N, freq='h') + ) + def time_dt_accessor(self): + self.series.dt + + def time_timedelta_dt_accessor_days(self): + self.series.dt.days + + def time_timedelta_dt_accessor_seconds(self): + self.series.dt.seconds + + def time_timedelta_dt_accessor_microseconds(self): + self.series.dt.microseconds + + def time_timedelta_dt_accessor_nanoseconds(self): + self.series.dt.nanoseconds diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 50f10efb07484..dd5b849b42a08 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -72,6 +72,7 @@ Performance Improvements - Indexers on ``Series`` or ``DataFrame`` no longer create a reference cycle (:issue:`17956`) - Added a keyword argument, ``cache``, to :func:`to_datetime` that improved the performance of converting duplicate datetime arguments (:issue:`11665`) - :class`DateOffset` arithmetic performance is improved (:issue:`18218`) +- Converting a ``Series`` of ``Timedelta`` objects to days, seconds, etc... sped up through vectorization of underlying methods (:issue:`18092`) - .. _whatsnew_0220.docs: diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index f8254ed9d8418..7278cbaff86ca 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -562,6 +562,17 @@ void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, convert_datetime_to_datetimestruct(&meta, val, result); } +void pandas_timedelta_to_timedeltastruct(npy_timedelta val, + PANDAS_DATETIMEUNIT fr, + pandas_timedeltastruct *result) { + pandas_datetime_metadata meta; + + meta.base = fr; + meta.num - 1; + + convert_timedelta_to_timedeltastruct(&meta, val, result); +} + PANDAS_DATETIMEUNIT get_datetime64_unit(PyObject *obj) { return (PANDAS_DATETIMEUNIT)((PyDatetimeScalarObject *)obj)->obmeta.base; } @@ -980,3 +991,107 @@ int convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta, return 0; } + +/* + * Converts a timedelta from a timedeltastruct to a timedelta based + * on some metadata. The timedelta is assumed to be valid. + * + * Returns 0 on success, -1 on failure. + */ +int convert_timedelta_to_timedeltastruct(pandas_timedelta_metadata *meta, + npy_timedelta td, + pandas_timedeltastruct *out) { + npy_int64 perday; + npy_int64 frac; + npy_int64 sfrac; + npy_int64 ifrac; + int sign; + npy_int64 DAY_NS = 86400000000000LL; + + /* Initialize the output to all zeros */ + memset(out, 0, sizeof(pandas_timedeltastruct)); + + switch (meta->base) { + case PANDAS_FR_ns: + + // put frac in seconds + if (td < 0 && td % (1000LL * 1000LL * 1000LL) != 0) + frac = td / (1000LL * 1000LL * 1000LL) - 1; + else + frac = td / (1000LL * 1000LL * 1000LL); + + if (frac < 0) { + sign = -1; + + // even fraction + if ((-frac % 86400LL) != 0) { + out->days = -frac / 86400LL + 1; + frac += 86400LL * out->days; + } else { + frac = -frac; + } + } else { + sign = 1; + out->days = 0; + } + + if (frac >= 86400) { + out->days += frac / 86400LL; + frac -= out->days * 86400LL; + } + + if (frac >= 3600) { + out->hrs = frac / 3600LL; + frac -= out->hrs * 3600LL; + } else { + out->hrs = 0; + } + + if (frac >= 60) { + out->min = frac / 60LL; + frac -= out->min * 60LL; + } else { + out->min = 0; + } + + if (frac >= 0) { + out->sec = frac; + frac -= out->sec; + } else { + out->sec = 0; + } + + sfrac = (out->hrs * 3600LL + out->min * 60LL + + out->sec) * (1000LL * 1000LL * 1000LL); + + if (sign < 0) + out->days = -out->days; + + ifrac = td - (out->days * DAY_NS + sfrac); + + if (ifrac != 0) { + out->ms = ifrac / (1000LL * 1000LL); + ifrac -= out->ms * 1000LL * 1000LL; + out->us = ifrac / 1000LL; + ifrac -= out->us * 1000LL; + out->ns = ifrac; + } else { + out->ms = 0; + out->us = 0; + out->ns = 0; + } + + out->seconds = out->hrs * 3600 + out->min * 60 + out->sec; + out->microseconds = out->ms * 1000 + out->us; + out->nanoseconds = out->ns; + break; + + default: + PyErr_SetString(PyExc_RuntimeError, + "NumPy datetime metadata is corrupted with invalid " + "base unit"); + return -1; + } + + return 0; +} diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index af3d2e0f01c1b..c51a4bddac82f 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -49,11 +49,18 @@ typedef struct { npy_int32 month, day, hour, min, sec, us, ps, as; } pandas_datetimestruct; +typedef struct { + npy_int64 days; + npy_int32 hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds; +} pandas_timedeltastruct; + typedef struct { PANDAS_DATETIMEUNIT base; int num; } pandas_datetime_metadata; +typedef pandas_datetime_metadata pandas_timedelta_metadata; + extern const pandas_datetimestruct _NS_MIN_DTS; extern const pandas_datetimestruct _NS_MAX_DTS; @@ -71,6 +78,10 @@ npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *result); +void pandas_timedelta_to_timedeltastruct(npy_timedelta val, + PANDAS_DATETIMEUNIT fr, + pandas_timedeltastruct *result); + int dayofweek(int y, int m, int d); extern const int days_per_month_table[2][12]; @@ -131,6 +142,11 @@ convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta, npy_datetime dt, pandas_datetimestruct *out); +int +convert_timedelta_to_timedeltastruct(pandas_timedelta_metadata *meta, + npy_timedelta td, + pandas_timedeltastruct *out); + PANDAS_DATETIMEUNIT get_datetime64_unit(PyObject *obj); diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index b40646295cce5..3ab84853dfc4a 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -17,7 +17,8 @@ from numpy cimport ndarray, int64_t, int32_t, int8_t np.import_array() -from np_datetime cimport pandas_datetimestruct, dt64_to_dtstruct +from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct, + dt64_to_dtstruct, td64_to_tdstruct) from datetime cimport ( days_per_month_table, @@ -545,6 +546,123 @@ def get_date_field(ndarray[int64_t] dtindex, object field): raise ValueError("Field %s not supported" % field) +@cython.wraparound(False) +@cython.boundscheck(False) +def get_timedelta_field(ndarray[int64_t] tdindex, object field): + """ + Given a int64-based timedelta index, extract the days, hrs, sec., + field and return an array of these values. + """ + cdef: + Py_ssize_t i, count = 0 + ndarray[int32_t] out + pandas_timedeltastruct tds + + count = len(tdindex) + out = np.empty(count, dtype='i4') + + if field == 'days': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.days + return out + + elif field == 'h': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.hrs + return out + + elif field == 's': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.sec + return out + + elif field == 'seconds': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.seconds + return out + + elif field == 'ms': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.ms + return out + + elif field == 'microseconds': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.microseconds + return out + + elif field == 'us': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.us + return out + + elif field == 'ns': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.ns + return out + + elif field == 'nanoseconds': + with nogil: + for i in range(count): + if tdindex[i] == NPY_NAT: + out[i] = -1 + continue + + td64_to_tdstruct(tdindex[i], &tds) + out[i] = tds.nanoseconds + return out + + raise ValueError("Field %s not supported" % field) + + cdef inline int days_in_month(pandas_datetimestruct dts) nogil: return days_per_month_table[is_leapyear(dts.year)][dts.month - 1] diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 1ae0499f90c0d..3692822ada135 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -30,6 +30,10 @@ cdef extern from "../src/datetime/np_datetime.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + ctypedef struct pandas_timedeltastruct: + int64_t days + int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds + ctypedef enum PANDAS_DATETIMEUNIT: PANDAS_FR_Y PANDAS_FR_M @@ -54,6 +58,7 @@ cdef check_dts_bounds(pandas_datetimestruct *dts) cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil cdef void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil +cdef void td64_to_tdstruct(int64_t td64, pandas_timedeltastruct* out) nogil cdef int64_t pydatetime_to_dt64(datetime val, pandas_datetimestruct *dts) cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index abd6c59ea6244..72c028161a937 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -26,6 +26,11 @@ cdef extern from "../src/datetime/np_datetime.h": PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil + void pandas_timedelta_to_timedeltastruct(npy_timedelta val, + PANDAS_DATETIMEUNIT fr, + pandas_timedeltastruct *result + ) nogil + pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS # ---------------------------------------------------------------------- @@ -127,6 +132,13 @@ cdef inline void dt64_to_dtstruct(int64_t dt64, pandas_datetime_to_datetimestruct(dt64, PANDAS_FR_ns, out) return +cdef inline void td64_to_tdstruct(int64_t td64, + pandas_timedeltastruct* out) nogil: + """Convenience function to call pandas_timedelta_to_timedeltastruct + with the by-far-most-common frequency PANDAS_FR_ns""" + pandas_timedelta_to_timedeltastruct(td64, PANDAS_FR_ns, out) + return + cdef inline int64_t pydatetime_to_dt64(datetime val, pandas_datetimestruct *dts): diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 869ff5ee77bda..aba213122ea31 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -26,7 +26,8 @@ from util cimport (is_timedelta64_object, is_datetime64_object, is_integer_object, is_float_object, is_string_object) -from np_datetime cimport cmp_scalar, reverse_ops +from np_datetime cimport (cmp_scalar, reverse_ops, td64_to_tdstruct, + pandas_timedeltastruct) from nattype import nat_strings, NaT from nattype cimport _checknull_with_nat @@ -584,65 +585,26 @@ cdef class _Timedelta(timedelta): """ compute the components """ - cdef int64_t sfrac, ifrac, frac, ivalue = self.value - if self.is_populated: return - # put frac in seconds - frac = ivalue / (1000 * 1000 * 1000) - if frac < 0: - self._sign = -1 + cdef: + pandas_timedeltastruct tds - # even fraction - if (-frac % 86400) != 0: - self._d = -frac / 86400 + 1 - frac += 86400 * self._d - else: - frac = -frac + td64_to_tdstruct(self.value, &tds) + self._d = tds.days + if self._d < 0: + self._sign = -1 else: self._sign = 1 - self._d = 0 - - if frac >= 86400: - self._d += frac / 86400 - frac -= self._d * 86400 - - if frac >= 3600: - self._h = frac / 3600 - frac -= self._h * 3600 - else: - self._h = 0 - - if frac >= 60: - self._m = frac / 60 - frac -= self._m * 60 - else: - self._m = 0 - - if frac >= 0: - self._s = frac - frac -= self._s - else: - self._s = 0 - - sfrac = (self._h * 3600 + self._m * 60 - + self._s) * (1000 * 1000 * 1000) - if self._sign < 0: - ifrac = ivalue + self._d * DAY_NS - sfrac - else: - ifrac = ivalue - (self._d * DAY_NS + sfrac) - - if ifrac != 0: - self._ms = ifrac / (1000 * 1000) - ifrac -= self._ms * 1000 * 1000 - self._us = ifrac / 1000 - ifrac -= self._us * 1000 - self._ns = ifrac - else: - self._ms = 0 - self._us = 0 - self._ns = 0 + self._h = tds.hrs + self._m = tds.min + self._s = tds.sec + self._ms = tds.ms + self._us = tds.us + self._ns = tds.ns + self._seconds = tds.seconds + self._microseconds = tds.microseconds self.is_populated = 1 @@ -671,10 +633,6 @@ cdef class _Timedelta(timedelta): def components(self): """ Return a Components NamedTuple-like """ self._ensure_components() - if self._sign < 0: - return Components(-self._d, self._h, self._m, self._s, - self._ms, self._us, self._ns) - # return the named tuple return Components(self._d, self._h, self._m, self._s, self._ms, self._us, self._ns) @@ -717,8 +675,6 @@ cdef class _Timedelta(timedelta): .components will return the shown components """ self._ensure_components() - if self._sign < 0: - return -1 * self._d return self._d @property @@ -729,7 +685,7 @@ cdef class _Timedelta(timedelta): .components will return the shown components """ self._ensure_components() - return self._h * 3600 + self._m * 60 + self._s + return self._seconds @property def microseconds(self): @@ -739,7 +695,7 @@ cdef class _Timedelta(timedelta): .components will return the shown components """ self._ensure_components() - return self._ms * 1000 + self._us + return self._microseconds @property def nanoseconds(self): @@ -778,9 +734,9 @@ cdef class _Timedelta(timedelta): if format == 'all': seconds_pretty = "%02d.%03d%03d%03d" % ( self._s, self._ms, self._us, self._ns) - return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, - sign2_pretty, self._h, - self._m, seconds_pretty) + return "%d days%s%02d:%02d:%s" % (self._d, + sign2_pretty, self._h, + self._m, seconds_pretty) # by default not showing nano if self._ms or self._us or self._ns: @@ -794,7 +750,7 @@ cdef class _Timedelta(timedelta): if format == 'even_day': if not subs: - return "%s%d days" % (sign_pretty, self._d) + return "%d days" % (self._d) elif format == 'sub_day': if not self._d: @@ -806,10 +762,10 @@ cdef class _Timedelta(timedelta): self._h, self._m, seconds_pretty) if subs or format=='long': - return "%s%d days%s%02d:%02d:%s" % (sign_pretty, self._d, - sign2_pretty, self._h, - self._m, seconds_pretty) - return "%s%d days" % (sign_pretty, self._d) + return "%d days%s%02d:%02d:%s" % (self._d, + sign2_pretty, self._h, + self._m, seconds_pretty) + return "%d days" % (self._d) def __repr__(self): return "Timedelta('{0}')".format(self._repr_base(format='long')) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 604af1cfd678a..e4bc46fb7bdbe 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -35,20 +35,15 @@ from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timedelta, NaT, iNaT) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas._libs.tslibs.fields import get_timedelta_field def _field_accessor(name, alias, docstring=None): def f(self): + values = self.asi8 + result = get_timedelta_field(values, alias) if self.hasnans: - result = np.empty(len(self), dtype='float64') - mask = self._isnan - imask = ~mask - result.flat[imask] = np.array([getattr(Timedelta(val), alias) - for val in self.asi8[imask]]) - result[mask] = np.nan - else: - result = np.array([getattr(Timedelta(val), alias) - for val in self.asi8], dtype='int64') + result = self._maybe_mask_results(result, convert='float64') return Index(result, name=self.name)
closes #18092 - [X] closes #xxxx - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18225
2017-11-11T03:26:04Z
2017-11-12T18:02:09Z
2017-11-12T18:02:09Z
2017-11-15T22:46:43Z
Tslibs offsets immutable
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 87be9fa910101..c7f43d6c10a7a 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -10,6 +10,7 @@ from dateutil.relativedelta import relativedelta import numpy as np cimport numpy as np +from numpy cimport int64_t np.import_array() @@ -315,8 +316,8 @@ class EndMixin(object): # --------------------------------------------------------------------- # Base Classes - -class _BaseOffset(object): +@cython.auto_pickle(False) +cdef class _BaseOffset(object): """ Base class for DateOffset methods that are not overriden by subclasses and will (after pickle errors are resolved) go into a cdef class. @@ -325,6 +326,14 @@ class _BaseOffset(object): _normalize_cache = True _cacheable = False + cdef readonly: + int64_t n + bint normalize + + def __init__(self, n=1, normalize=False): + self.n = n + self.normalize = normalize + def __call__(self, other): return self.apply(other) @@ -361,6 +370,58 @@ class _BaseOffset(object): out = '<%s' % n_str + className + plural + self._repr_attrs() + '>' return out + def __setstate__(self, state): + """Reconstruct an instance from a pickled state""" + # Note: __setstate__ needs to be defined in the cython class otherwise + # trying to set self.n and self.normalize below will + # raise an AttributeError. + if 'normalize' not in state: + # default for prior pickles + # See GH #7748, #7789 + state['normalize'] = False + if '_use_relativedelta' not in state: + state['_use_relativedelta'] = False + + if 'offset' in state: + # Older versions Business offsets have offset attribute + # instead of _offset + if '_offset' in state: # pragma: no cover + raise ValueError('Unexpected key `_offset`') + state['_offset'] = state.pop('offset') + state['kwds']['offset'] = state['_offset'] + + self.n = state.pop('n', 1) + self.normalize = state.pop('normalize', False) + self.__dict__ = state + + if 'weekmask' in state and 'holidays' in state: + # Business subclasses + calendar, holidays = _get_calendar(weekmask=self.weekmask, + holidays=self.holidays, + calendar=None) + self.kwds['calendar'] = self.calendar = calendar + self.kwds['holidays'] = self.holidays = holidays + self.kwds['weekmask'] = state['weekmask'] + + def __getstate__(self): + """Return a pickleable state""" + state = self.__dict__.copy() + + # Add attributes from the C base class that aren't in self.__dict__ + state['n'] = self.n + state['normalize'] = self.normalize + + # we don't want to actually pickle the calendar object + # as its a np.busyday; we recreate on deserilization + if 'calendar' in state: + del state['calendar'] + try: + state['kwds'].pop('calendar') + except KeyError: + pass + + return state + class BaseOffset(_BaseOffset): # Here we add __rfoo__ methods that don't play well with cdef classes diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 8015642919611..dbe137de1ce2b 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -2,6 +2,7 @@ Support pre-0.12 series pickle compatibility. """ +import inspect import sys import pandas # noqa import copy @@ -22,7 +23,6 @@ def load_reduce(self): stack[-1] = func(*args) return except Exception as e: - # If we have a deprecated function, # try to replace and try again. @@ -47,6 +47,22 @@ def load_reduce(self): except: pass + if (len(args) and inspect.isclass(args[0]) and + getattr(args[0], '_typ', None) == 'dateoffset' and + args[1] is object): + # See GH#17313 + from pandas.tseries import offsets + args = (args[0], offsets.BaseOffset,) + args[2:] + if len(args) == 3 and args[2] is None: + args = args[:2] + (1,) + # kludge + try: + stack[-1] = func(*args) + return + except: + pass + + # unknown exception, re-raise if getattr(self, 'is_verbose', None): print(sys.exc_info()) diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py index 4fd3bba01602f..30ae113637435 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/test_offsets.py @@ -539,8 +539,7 @@ def setup_method(self, method): def test_different_normalize_equals(self): # equivalent in this special case offset = BDay() - offset2 = BDay() - offset2.normalize = True + offset2 = BDay(normalize=True) assert offset == offset2 def test_repr(self): @@ -734,8 +733,7 @@ def test_constructor_errors(self): def test_different_normalize_equals(self): # equivalent in this special case offset = self._offset() - offset2 = self._offset() - offset2.normalize = True + offset2 = self._offset(normalize=True) assert offset == offset2 def test_repr(self): @@ -1426,8 +1424,7 @@ def test_constructor_errors(self): def test_different_normalize_equals(self): # equivalent in this special case offset = self._offset() - offset2 = self._offset() - offset2.normalize = True + offset2 = self._offset(normalize=True) assert offset == offset2 def test_repr(self): @@ -1667,8 +1664,7 @@ def setup_method(self, method): def test_different_normalize_equals(self): # equivalent in this special case offset = CDay() - offset2 = CDay() - offset2.normalize = True + offset2 = CDay(normalize=True) assert offset == offset2 def test_repr(self): @@ -1953,8 +1949,7 @@ class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base): def test_different_normalize_equals(self): # equivalent in this special case offset = CBMonthEnd() - offset2 = CBMonthEnd() - offset2.normalize = True + offset2 = CBMonthEnd(normalize=True) assert offset == offset2 def test_repr(self): @@ -2067,8 +2062,7 @@ class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base): def test_different_normalize_equals(self): # equivalent in this special case offset = CBMonthBegin() - offset2 = CBMonthBegin() - offset2.normalize = True + offset2 = CBMonthBegin(normalize=True) assert offset == offset2 def test_repr(self): @@ -4899,3 +4893,15 @@ def test_all_offset_classes(self): first = Timestamp(test_values[0], tz='US/Eastern') + offset() second = Timestamp(test_values[1], tz='US/Eastern') assert first == second + + +def test_date_offset_immutable(): + offset = offsets.MonthBegin(n=2, normalize=True) + with pytest.raises(AttributeError): + offset.n = 1 + + # Check that it didn't get changed + assert offset.n == 2 + + with pytest.raises(AttributeError): + offset.normalize = False diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5843aaa23be57..80dbd95f51621 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -156,15 +156,10 @@ def __add__(date): Since 0 is a bit weird, we suggest avoiding its use. """ - _use_relativedelta = False _adjust_dst = False - # default for prior pickles - normalize = False - def __init__(self, n=1, normalize=False, **kwds): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.kwds = kwds self._offset, self._use_relativedelta = _determine_offset(kwds) @@ -256,6 +251,11 @@ def isAnchored(self): def _params(self): all_paras = dict(list(vars(self).items()) + list(self.kwds.items())) + + # Add in C-class attributes not present in self.__dict__ + all_paras['n'] = self.n + all_paras['normalize'] = self.normalize + if 'holidays' in all_paras and not all_paras['holidays']: all_paras.pop('holidays') exclude = ['kwds', 'name', 'normalize', 'calendar'] @@ -427,38 +427,6 @@ def _repr_attrs(self): out += ': ' + ', '.join(attrs) return out - def __getstate__(self): - """Return a pickleable state""" - state = self.__dict__.copy() - - # we don't want to actually pickle the calendar object - # as its a np.busyday; we recreate on deserilization - if 'calendar' in state: - del state['calendar'] - try: - state['kwds'].pop('calendar') - except KeyError: - pass - - return state - - def __setstate__(self, state): - """Reconstruct an instance from a pickled state""" - if 'offset' in state: - # Older versions have offset attribute instead of _offset - if '_offset' in state: # pragma: no cover - raise ValueError('Unexpected key `_offset`') - state['_offset'] = state.pop('offset') - state['kwds']['offset'] = state['_offset'] - self.__dict__ = state - if 'weekmask' in state and 'holidays' in state: - calendar, holidays = _get_calendar(weekmask=self.weekmask, - holidays=self.holidays, - calendar=None) - self.kwds['calendar'] = self.calendar = calendar - self.kwds['holidays'] = self.holidays = holidays - self.kwds['weekmask'] = state['weekmask'] - class BusinessDay(BusinessMixin, SingleConstructorOffset): """ @@ -468,8 +436,7 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset): _adjust_dst = True def __init__(self, n=1, normalize=False, offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.kwds = {'offset': offset} self._offset = offset @@ -776,8 +743,7 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset): def __init__(self, n=1, normalize=False, start='09:00', end='17:00', offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) super(BusinessHour, self).__init__(start=start, end=end, offset=offset) @cache_readonly @@ -813,8 +779,7 @@ class CustomBusinessDay(BusinessDay): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self._offset = offset self.kwds = {} @@ -881,8 +846,7 @@ class CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, start='09:00', end='17:00', offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) super(CustomBusinessHour, self).__init__(start=start, end=end, offset=offset) @@ -975,6 +939,7 @@ class SemiMonthOffset(DateOffset): _min_day_of_month = 2 def __init__(self, n=1, normalize=False, day_of_month=None): + BaseOffset.__init__(self, n, normalize) if day_of_month is None: self.day_of_month = self._default_day_of_month else: @@ -983,8 +948,8 @@ def __init__(self, n=1, normalize=False, day_of_month=None): msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}' raise ValueError(msg.format(min=self._min_day_of_month, day=self.day_of_month)) - self.n = int(n) - self.normalize = normalize + # self.n = int(n) + # self.normalize = normalize self.kwds = {'day_of_month': self.day_of_month} @classmethod @@ -1259,8 +1224,7 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self._offset = offset self.kwds = {} @@ -1330,8 +1294,7 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset): def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): - self.n = int(n) - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self._offset = offset self.kwds = {} @@ -1394,8 +1357,7 @@ class Week(EndMixin, DateOffset): _prefix = 'W' def __init__(self, n=1, normalize=False, weekday=None): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.weekday = weekday if self.weekday is not None: @@ -1485,8 +1447,7 @@ class WeekOfMonth(DateOffset): _adjust_dst = True def __init__(self, n=1, normalize=False, week=None, weekday=None): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.weekday = weekday self.week = week @@ -1582,8 +1543,7 @@ class LastWeekOfMonth(DateOffset): _prefix = 'LWOM' def __init__(self, n=1, normalize=False, weekday=None): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.weekday = weekday if self.n == 0: @@ -1656,8 +1616,7 @@ class QuarterOffset(DateOffset): # point def __init__(self, n=1, normalize=False, startingMonth=None): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) if startingMonth is None: startingMonth = self._default_startingMonth self.startingMonth = startingMonth @@ -2092,8 +2051,7 @@ class FY5253(DateOffset): def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, variation="nearest"): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.startingMonth = startingMonth self.weekday = weekday @@ -2342,8 +2300,7 @@ class FY5253Quarter(DateOffset): def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, qtr_with_extra_week=1, variation="nearest"): - self.n = n - self.normalize = normalize + BaseOffset.__init__(self, n, normalize) self.weekday = weekday self.startingMonth = startingMonth
This is the big hurdle towards making DateOffsets immutable. - Make tslibs.offsets._BaseOffset a cython class, define `n` and `normalize` to be readonly - Update tests that try to alter offsets inplace - Poke at things until pickle tests stop failing <-- the part with the kludge I'm not wild about the hack in pickle_compat, but haven't found any other way to avoid `TestCommon.test_pickle_v0_15_2` failing with: ``` cls = <class 'pandas.tseries.offsets.YearBegin'>, base = <type 'object'>, state = None def _reconstructor(cls, base, state): if base is object: > obj = object.__new__(cls) E TypeError: object.__new__(YearBegin) is not safe, use pandas._libs.tslibs.offsets._BaseOffset.__new__() ``` After this there will then be a) a bunch of other attributes to make readonly and b) the optimizations that are available once DateOffsets are immutable. But first we need to either OK the pickle_compat hack or find another way around.
https://api.github.com/repos/pandas-dev/pandas/pulls/18224
2017-11-10T23:57:47Z
2017-11-12T06:36:33Z
null
2018-06-19T22:58:04Z
DOC: small whatsnew updates
diff --git a/doc/source/api.rst b/doc/source/api.rst index b5cf593ac0d1f..ce88aed91823c 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1870,8 +1870,52 @@ Methods Timedelta.to_timedelta64 Timedelta.total_seconds +.. _api.frequencies: + +Frequencies +----------- + +.. currentmodule:: pandas.tseries.frequencies + + +.. autosummary:: + :toctree: generated/ + + to_offset + +.. _api.offsets: + +Offsets +------- + +.. currentmodule:: pandas.tseries.offsets + +.. autosummary:: + :toctree: generated/ + + DateOffset + Week + Day + Hour + Minute + Second + Milli + Micro + Nano + +.. autosummary:: + :toctree: generated/ + + MonthBegin + MonthEnd + QuarterBegin + QuarterEnd + YearBegin + YearEnd + Window ------ + .. currentmodule:: pandas.core.window Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc. diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 51cce713ff930..61679b14a8592 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -22,8 +22,8 @@ New features Other Enhancements ^^^^^^^^^^^^^^^^^^ -- Better support for ``Dataframe.style.to_excel()`` output with the ``xlsxwriter`` engine. (:issue:`16149`) -- :func:`pd.tseries.frequencies.to_offset()` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`) +- Better support for :func:`Dataframe.style.to_excel` output with the ``xlsxwriter`` engine. (:issue:`16149`) +- :func:`pandas.tseries.frequencies.to_offset` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`) - .. _whatsnew_0220.api_breaking: @@ -41,10 +41,10 @@ Other API Changes ^^^^^^^^^^^^^^^^^ - ``NaT`` division with :class:`datetime.timedelta` will now return ``NaN`` instead of raising (:issue:`17876`) -- All-NaN levels in ``MultiIndex`` are now assigned float rather than object dtype, coherently with flat indexes (:issue:`17929`). -- :class:`Timestamp` will no longer silently ignore unused or invalid `tz` or `tzinfo` keyword arguments (:issue:`17690`) -- :class:`Timestamp` will no longer silently ignore invalid `freq` arguments (:issue:`5168`) -- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the `tseries.offsets` module (:issue:`17830`) +- All-NaN levels in a ``MultiIndex`` are now assigned ``float`` rather than ``object`` dtype, promoting consistency with ``Index`` (:issue:`17929`). +- :class:`Timestamp` will no longer silently ignore unused or invalid ``tz`` or ``tzinfo`` keyword arguments (:issue:`17690`) +- :class:`Timestamp` will no longer silently ignore invalid ``freq`` arguments (:issue:`5168`) +- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`) .. _whatsnew_0220.deprecations: @@ -69,7 +69,7 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Indexers on Series or DataFrame no longer create a reference cycle (:issue:`17956`) +- Indexers on ``Series`` or ``DataFrame`` no longer create a reference cycle (:issue:`17956`) - - @@ -98,8 +98,8 @@ Conversion Indexing ^^^^^^^^ -- Bug in :func:`PeriodIndex.truncate` which raises ``TypeError`` when ``PeriodIndex`` is monotonic (:issue:`17717`) -- Bug in ``DataFrame.groupby`` where key as tuple in a ``MultiIndex`` were interpreted as a list of keys (:issue:`17979`) +- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) +- Bug in :func:`DataFrame.groupby` where key as tuple in a ``MultiIndex`` were interpreted as a list of keys (:issue:`17979`) - - @@ -107,9 +107,9 @@ I/O ^^^ - :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`) -- Bug in ``pd.read_msgpack()`` with a non existent file is passed in Python 2 (:issue:`15296`) -- Bug in :func:`pd.read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) -- Bug in :func:`pd.read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) +- Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`) +- Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) +- Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) - -
https://api.github.com/repos/pandas-dev/pandas/pulls/18219
2017-11-10T19:50:01Z
2017-11-11T12:52:14Z
2017-11-11T12:52:14Z
2017-11-11T22:07:05Z
Implement scalar shift_month mirroring tslib.shift_months
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 712119caae6f2..13fa33d5c3412 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -71,6 +71,7 @@ Performance Improvements - Indexers on ``Series`` or ``DataFrame`` no longer create a reference cycle (:issue:`17956`) - Added a keyword argument, ``cache``, to :func:`to_datetime` that improved the performance of converting duplicate datetime arguments (:issue:`11665`) +- :class`DateOffset` arithmetic performance is improved (:issue:`18218`) - .. _whatsnew_0220.docs: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 87be9fa910101..c64b6568a0495 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -4,7 +4,7 @@ cimport cython import time -from cpython.datetime cimport timedelta, time as dt_time +from cpython.datetime cimport datetime, timedelta, time as dt_time from dateutil.relativedelta import relativedelta @@ -13,9 +13,9 @@ cimport numpy as np np.import_array() -from util cimport is_string_object +from util cimport is_string_object, is_integer_object -from pandas._libs.tslib import pydt_to_i8 +from pandas._libs.tslib import pydt_to_i8, monthrange from frequencies cimport get_freq_code from conversion cimport tz_convert_single @@ -375,3 +375,56 @@ class BaseOffset(_BaseOffset): # i.e. isinstance(other, (ABCDatetimeIndex, ABCSeries)) return other - self return -self + other + + +# ---------------------------------------------------------------------- +# RelativeDelta Arithmetic + + +cpdef datetime shift_month(datetime stamp, int months, object day_opt=None): + """ + Given a datetime (or Timestamp) `stamp`, an integer `months` and an + option `day_opt`, return a new datetimelike that many months later, + with day determined by `day_opt` using relativedelta semantics. + + Scalar analogue of tslib.shift_months + + Parameters + ---------- + stamp : datetime or Timestamp + months : int + day_opt : None, 'start', 'end', or an integer + None: returned datetimelike has the same day as the input, or the + last day of the month if the new month is too short + 'start': returned datetimelike has day=1 + 'end': returned datetimelike has day on the last day of the month + int: returned datetimelike has day equal to day_opt + + Returns + ------- + shifted : datetime or Timestamp (same as input `stamp`) + """ + cdef: + int year, month, day + int dim, dy + + dy = (stamp.month + months) // 12 + month = (stamp.month + months) % 12 + + if month == 0: + month = 12 + dy -= 1 + year = stamp.year + dy + + dim = monthrange(year, month)[1] + if day_opt is None: + day = min(stamp.day, dim) + elif day_opt == 'start': + day = 1 + elif day_opt == 'end': + day = dim + elif is_integer_object(day_opt): + day = min(day_opt, dim) + else: + raise ValueError(day_opt) + return stamp.replace(year=year, month=month, day=day) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5d1ea71d5cef5..4dc26f4dd69e2 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -22,6 +22,7 @@ _int_to_weekday, _weekday_to_int, _determine_offset, apply_index_wraps, + shift_month, BeginMixin, EndMixin, BaseOffset) @@ -252,6 +253,8 @@ def apply_index(self, i): "applied vectorized".format(kwd=kwd)) def isAnchored(self): + # TODO: Does this make sense for the general case? It would help + # if there were a canonical docstring for what isAnchored means. return (self.n == 1) def _params(self): @@ -721,6 +724,7 @@ def apply(self, other): return result else: + # TODO: Figure out the end of this sente raise ApplyTypeError( 'Only know how to combine business hour with ') @@ -927,10 +931,10 @@ def apply(self, other): n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) if other.day != days_in_month: - other = other + relativedelta(months=-1, day=31) + other = shift_month(other, -1, 'end') if n <= 0: n = n + 1 - other = other + relativedelta(months=n, day=31) + other = shift_month(other, n, 'end') return other @apply_index_wraps @@ -956,7 +960,7 @@ def apply(self, other): if other.day > 1 and n <= 0: # then roll forward if n<=0 n += 1 - return other + relativedelta(months=n, day=1) + return shift_month(other, n, 'start') @apply_index_wraps def apply_index(self, i): @@ -1002,12 +1006,12 @@ def apply(self, other): if not self.onOffset(other): _, days_in_month = tslib.monthrange(other.year, other.month) if 1 < other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n > 0: # rollforward so subtract 1 n -= 1 elif self.day_of_month < other.day < days_in_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n < 0: # rollforward in the negative direction so add 1 n += 1 @@ -1084,11 +1088,11 @@ def onOffset(self, dt): def _apply(self, n, other): # if other.day is not day_of_month move to day_of_month and update n if other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n > 0: n -= 1 elif other.day > self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = 1 else: @@ -1096,7 +1100,7 @@ def _apply(self, n, other): months = n // 2 day = 31 if n % 2 else self.day_of_month - return other + relativedelta(months=months, day=day) + return shift_month(other, months, day) def _get_roll(self, i, before_day_of_month, after_day_of_month): n = self.n @@ -1141,13 +1145,13 @@ def onOffset(self, dt): def _apply(self, n, other): # if other.day is not day_of_month move to day_of_month and update n if other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = -1 else: n -= 1 elif other.day > self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = 1 elif n < 0: @@ -1155,7 +1159,7 @@ def _apply(self, n, other): months = n // 2 + n % 2 day = 1 if n % 2 else self.day_of_month - return other + relativedelta(months=months, day=day) + return shift_month(other, months, day) def _get_roll(self, i, before_day_of_month, after_day_of_month): n = self.n @@ -1191,7 +1195,7 @@ def apply(self, other): n = n - 1 elif n <= 0 and other.day > lastBDay: n = n + 1 - other = other + relativedelta(months=n, day=31) + other = shift_month(other, n, 'end') if other.weekday() > 4: other = other - BDay() @@ -1215,7 +1219,7 @@ def apply(self, other): other = other + timedelta(days=first - other.day) n -= 1 - other = other + relativedelta(months=n) + other = shift_month(other, n, None) wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) result = datetime(other.year, other.month, first, @@ -1520,8 +1524,7 @@ def apply(self, other): else: months = self.n + 1 - other = self.getOffsetOfMonth( - other + relativedelta(months=months, day=1)) + other = self.getOffsetOfMonth(shift_month(other, months, 'start')) other = datetime(other.year, other.month, other.day, base.hour, base.minute, base.second, base.microsecond) return other @@ -1612,8 +1615,7 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth( - other + relativedelta(months=months, day=1)) + return self.getOffsetOfMonth(shift_month(other, months, 'start')) def getOffsetOfMonth(self, dt): m = MonthEnd() @@ -1716,7 +1718,7 @@ def apply(self, other): elif n <= 0 and other.day > lastBDay and monthsToGo == 0: n = n + 1 - other = other + relativedelta(months=monthsToGo + 3 * n, day=31) + other = shift_month(other, monthsToGo + 3 * n, 'end') other = tslib._localize_pydatetime(other, base.tzinfo) if other.weekday() > 4: other = other - BDay() @@ -1761,7 +1763,7 @@ def apply(self, other): n = n - 1 # get the first bday for result - other = other + relativedelta(months=3 * n - monthsSince) + other = shift_month(other, 3 * n - monthsSince, None) wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) result = datetime(other.year, other.month, first, @@ -1795,7 +1797,7 @@ def apply(self, other): if n > 0 and not (other.day >= days_in_month and monthsToGo == 0): n = n - 1 - other = other + relativedelta(months=monthsToGo + 3 * n, day=31) + other = shift_month(other, monthsToGo + 3 * n, 'end') return other @apply_index_wraps @@ -1830,7 +1832,7 @@ def apply(self, other): # after start, so come back an extra period as if rolled forward n = n + 1 - other = other + relativedelta(months=3 * n - monthsSince, day=1) + other = shift_month(other, 3 * n - monthsSince, 'start') return other @apply_index_wraps @@ -1889,7 +1891,7 @@ def apply(self, other): (other.month == self.month and other.day > lastBDay)): years += 1 - other = other + relativedelta(years=years) + other = shift_month(other, 12 * years, None) _, days_in_month = tslib.monthrange(other.year, self.month) result = datetime(other.year, self.month, days_in_month, @@ -1927,7 +1929,7 @@ def apply(self, other): years += 1 # set first bday for result - other = other + relativedelta(years=years) + other = shift_month(other, years * 12, None) wkday, days_in_month = tslib.monthrange(other.year, self.month) first = _get_firstbday(wkday) return datetime(other.year, self.month, first, other.hour, @@ -2145,8 +2147,8 @@ def onOffset(self, dt): if self.variation == "nearest": # We have to check the year end of "this" cal year AND the previous - return year_end == dt or \ - self.get_year_end(dt - relativedelta(months=1)) == dt + return (year_end == dt or + self.get_year_end(shift_month(dt, -1, None)) == dt) else: return year_end == dt @@ -2226,8 +2228,8 @@ def get_year_end(self, dt): def get_target_month_end(self, dt): target_month = datetime( dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) - next_month_first_of = target_month + relativedelta(months=+1) - return next_month_first_of + relativedelta(days=-1) + next_month_first_of = shift_month(target_month, 1, None) + return next_month_first_of + timedelta(days=-1) def _get_year_end_nearest(self, dt): target_date = self.get_target_month_end(dt) @@ -2382,7 +2384,7 @@ def apply(self, other): qtr_lens = self.get_weeks(other + self._offset) for weeks in qtr_lens: - start += relativedelta(weeks=weeks) + start += timedelta(weeks=weeks) if start > other: other = start n -= 1 @@ -2399,7 +2401,7 @@ def apply(self, other): qtr_lens = self.get_weeks(other) for weeks in reversed(qtr_lens): - end -= relativedelta(weeks=weeks) + end -= timedelta(weeks=weeks) if end < other: other = end n -= 1 @@ -2442,7 +2444,7 @@ def onOffset(self, dt): current = next_year_end for qtr_len in qtr_lens[0:4]: - current += relativedelta(weeks=qtr_len) + current += timedelta(weeks=qtr_len) if dt == current: return True return False
replace relativedelta usage in relevant cases. This should be orthogonal to other ongoing offsets PRs. Ran asv repeatedly overnight, posting results below.
https://api.github.com/repos/pandas-dev/pandas/pulls/18218
2017-11-10T16:47:14Z
2017-11-12T16:01:16Z
2017-11-12T16:01:16Z
2017-12-08T19:38:50Z
Pass kwargs from read_parquet() to the underlying engines.
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 185f08514641f..68ddce145403f 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -85,6 +85,7 @@ I/O - Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) - :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) Plotting ^^^^^^^^ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index ef95e32cc241e..4a13d2c9db944 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -76,9 +76,10 @@ def write(self, df, path, compression='snappy', table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs) - def read(self, path, columns=None): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.parquet.read_table(path, columns=columns).to_pandas() + return self.api.parquet.read_table(path, columns=columns, + **kwargs).to_pandas() class FastParquetImpl(object): @@ -115,9 +116,9 @@ def write(self, df, path, compression='snappy', **kwargs): self.api.write(path, df, compression=compression, **kwargs) - def read(self, path, columns=None): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas(columns=columns) + return self.api.ParquetFile(path).to_pandas(columns=columns, **kwargs) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -175,7 +176,7 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): if df.columns.inferred_type not in valid_types: raise ValueError("parquet must have string column names") - return impl.write(df, path, compression=compression) + return impl.write(df, path, compression=compression, **kwargs) def read_parquet(path, engine='auto', columns=None, **kwargs): @@ -205,4 +206,4 @@ def read_parquet(path, engine='auto', columns=None, **kwargs): """ impl = get_engine(engine) - return impl.read(path, columns=columns) + return impl.read(path, columns=columns, **kwargs) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 9a4edf38e2ef4..e7bcff22371b7 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -105,7 +105,7 @@ def test_options_py(df_compat, pa): with pd.option_context('io.parquet.engine', 'pyarrow'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -118,7 +118,7 @@ def test_options_fp(df_compat, fp): with pd.option_context('io.parquet.engine', 'fastparquet'): df.to_parquet(path, compression=None) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -130,7 +130,7 @@ def test_options_auto(df_compat, fp, pa): with pd.option_context('io.parquet.engine', 'auto'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -162,7 +162,7 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=pa, compression=None) - result = read_parquet(path, engine=fp, compression=None) + result = read_parquet(path, engine=fp) tm.assert_frame_equal(result, df) @@ -174,7 +174,7 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None) - result = read_parquet(path, engine=pa, compression=None) + result = read_parquet(path, engine=pa) tm.assert_frame_equal(result, df) @@ -188,19 +188,23 @@ def check_error_on_write(self, df, engine, exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, **kwargs): - + def check_round_trip(self, df, engine, expected=None, + write_kwargs=None, read_kwargs=None): + if write_kwargs is None: + write_kwargs = {} + if read_kwargs is None: + read_kwargs = {} with tm.ensure_clean() as path: - df.to_parquet(path, engine, **kwargs) - result = read_parquet(path, engine, **kwargs) + df.to_parquet(path, engine, **write_kwargs) + result = read_parquet(path, engine, **read_kwargs) if expected is None: expected = df tm.assert_frame_equal(result, expected) # repeat - to_parquet(df, path, engine, **kwargs) - result = pd.read_parquet(path, engine, **kwargs) + to_parquet(df, path, engine, **write_kwargs) + result = pd.read_parquet(path, engine, **read_kwargs) if expected is None: expected = df @@ -222,7 +226,7 @@ def test_columns_dtypes(self, engine): # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) def test_columns_dtypes_invalid(self, engine): @@ -246,7 +250,7 @@ def test_columns_dtypes_invalid(self, engine): def test_write_with_index(self, engine): df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) # non-default index for index in [[2, 3, 4], @@ -280,7 +284,8 @@ def test_compression(self, engine, compression): pytest.importorskip('brotli') df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=compression) + self.check_round_trip(df, engine, + write_kwargs={'compression': compression}) def test_read_columns(self, engine): # GH18154 @@ -289,7 +294,8 @@ def test_read_columns(self, engine): expected = pd.DataFrame({'string': list('abc')}) self.check_round_trip(df, engine, expected=expected, - compression=None, columns=["string"]) + write_kwargs={'compression': None}, + read_kwargs={'columns': ['string']}) class TestParquetPyArrow(Base): @@ -377,7 +383,7 @@ def test_basic(self, fp): 'timedelta': pd.timedelta_range('1 day', periods=3), }) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -390,7 +396,8 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected, compression=None) + self.check_round_trip(df, fp, expected=expected, + write_kwargs={'compression': None}) def test_unsupported(self, fp): @@ -406,7 +413,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -416,4 +423,13 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): self.check_round_trip(df, fp, df.astype('datetime64[ns]'), - compression=None) + write_kwargs={'compression': None}) + + def test_filter_row_groups(self, fp): + d = {'a': list(range(0, 3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, fp, compression=None, + row_group_offsets=1) + result = read_parquet(path, fp, filters=[('a', '==', 0)]) + assert len(result) == 1
This allows e.g. to specify filters for predicate pushdown to fastparquet. This is a followup to #18155/#18154 - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18216
2017-11-10T15:48:43Z
2017-11-14T15:40:08Z
2017-11-14T15:40:07Z
2017-11-14T15:43:26Z
DOC/DEPR: ensure that @deprecated functions have correct docstring
diff --git a/pandas/core/series.py b/pandas/core/series.py index c9a72bb688270..9d56edd98d9f8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1361,13 +1361,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): # ndarray compat argmin = deprecate('argmin', idxmin, - msg="'argmin' is deprecated. Use 'idxmin' instead. " + msg="'argmin' is deprecated, use 'idxmin' instead. " "The behavior of 'argmin' will be corrected to " "return the positional minimum in the future. " "Use 'series.values.argmin' to get the position of " "the minimum now.") argmax = deprecate('argmax', idxmax, - msg="'argmax' is deprecated. Use 'idxmax' instead. " + msg="'argmax' is deprecated, use 'idxmax' instead. " "The behavior of 'argmax' will be corrected to " "return the positional maximum in the future. " "Use 'series.values.argmax' to get the position of " diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 7c9250e52d482..6be6152b09fc8 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -3,7 +3,7 @@ import inspect import types import warnings -from textwrap import dedent +from textwrap import dedent, wrap from functools import wraps, update_wrapper @@ -29,11 +29,16 @@ def deprecate(name, alternative, alt_name=None, klass=None, alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning - msg = msg or "{} is deprecated. Use {} instead".format(name, alt_name) + msg = msg or "{} is deprecated, use {} instead".format(name, alt_name) + @wraps(alternative) def wrapper(*args, **kwargs): warnings.warn(msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) + + if getattr(wrapper, '__doc__', None) is not None: + wrapper.__doc__ = ('\n'.join(wrap(msg, 70)) + '\n' + + dedent(wrapper.__doc__)) return wrapper
Follow-up on https://github.com/pandas-dev/pandas/pull/16955, the Series.argmin/argmax now have a docstring. Currently: ``` In [1]: pd.Series.argmax? Signature: pd.Series.argmax(*args, **kwargs) Docstring: <no docstring> File: ~/scipy/pandas/pandas/util/_decorators.py Type: function ``` With this PR: ``` In [1]: pd.Series.argmax? Signature: pd.Series.argmax(self, axis=None, skipna=True, *args, **kwargs) Docstring: 'argmax' is deprecated, use 'idxmax' instead. The behavior of 'argmax' will be corrected to return the positional maximum in the future. Use 'series.values.argmax' to get the position of the maximum now. Index *label* of the first occurrence of maximum of values. Parameters ---------- skipna : boolean, default True Exclude NA/null values Returns ------- idxmax : Index of maximum of values Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. See Also -------- DataFrame.idxmax numpy.ndarray.argmax File: ~/scipy/pandas/pandas/core/series.py Type: function ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18215
2017-11-10T15:42:00Z
2017-11-14T09:13:56Z
2017-11-14T09:13:56Z
2017-11-14T09:38:30Z
_concat_rangeindex_... now returns an empty RangeIndex for empty ranges
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index dc4d819383dfb..23884869a4d9f 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -571,6 +571,7 @@ def _concat_rangeindex_same_dtype(indexes): indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) """ + from pandas import Int64Index, RangeIndex start = step = next = None @@ -587,14 +588,12 @@ def _concat_rangeindex_same_dtype(indexes): elif step is None: # First non-empty index had only one element if obj._start == start: - from pandas import Int64Index return _concat_index_same_dtype(indexes, klass=Int64Index) step = obj._start - start non_consecutive = ((step != obj._step and len(obj) > 1) or (next is not None and obj._start != next)) if non_consecutive: - from pandas import Int64Index return _concat_index_same_dtype(indexes, klass=Int64Index) if step is not None: @@ -604,12 +603,8 @@ def _concat_rangeindex_same_dtype(indexes): # Get the stop value from "next" or alternatively # from the last non-empty index stop = non_empty_indexes[-1]._stop if next is None else next - else: - # Here all "indexes" had 0 length, i.e. were empty. - # Simply take start, stop, and step from the last empty index. - obj = indexes[-1] - start = obj._start - step = obj._step - stop = obj._stop - - return indexes[0].__class__(start, stop, step) + return RangeIndex(start, stop, step) + + # Here all "indexes" had 0 length, i.e. were empty. + # In this case return an empty range index. + return RangeIndex(0, 0) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 9fe10885186de..7d88b547746f6 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -971,8 +971,8 @@ def test_append(self): ([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)), ([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)), ([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)), - ([RI(-4, -8), RI(-8, -12)], RI(-8, -12)), - ([RI(-4, -8), RI(3, -4)], RI(3, -8)), + ([RI(-4, -8), RI(-8, -12)], RI(0, 0)), + ([RI(-4, -8), RI(3, -4)], RI(0, 0)), ([RI(-4, -8), RI(3, 5)], RI(3, 5)), ([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])), ([RI(-2,), RI(3, 5)], RI(3, 5)),
This is a follow up to PR #18191. Overall the code is more readable by adding the constructors at the beginning and explicitly calling `RangeIndex` constructor. One range index test was changed to accommodate for the new behavior of the index concatenation function. - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] 1 test changed
https://api.github.com/repos/pandas-dev/pandas/pulls/18214
2017-11-10T14:32:19Z
2017-11-11T13:03:50Z
2017-11-11T13:03:50Z
2017-11-11T13:03:53Z
Add requirement for a 1-dimensional ndarray in the `pd.qcut` docstring
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index fda339aa30461..2adf17a227a59 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -148,7 +148,7 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): Parameters ---------- - x : ndarray or Series + x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
- [x] closes #18173 `pd.qcut` only accepts 1-dimensional numpy arrays, which is not mentioned in the docstring. This PR updates the docstring accordingly.
https://api.github.com/repos/pandas-dev/pandas/pulls/18211
2017-11-10T10:38:07Z
2017-11-10T13:54:56Z
2017-11-10T13:54:56Z
2017-12-11T20:24:33Z
Tslibs offsets validation
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 526595e3a2eda..26fea593cc703 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -251,13 +251,66 @@ def _validate_business_time(t_input): raise ValueError("time data must match '%H:%M' format") elif isinstance(t_input, dt_time): if t_input.second != 0 or t_input.microsecond != 0: - raise ValueError( - "time data must be specified only with hour and minute") + raise ValueError("time data must be specified only with " + "hour and minute") return t_input else: raise ValueError("time data must be string or datetime.time") +def _validate_n(n): + """ + Require that `n` be a nonzero integer. + + Parameters + ---------- + n : int + + Raises + ------ + ValueError + """ + if n == 0 or not isinstance(n, int): + raise ValueError("N cannot be 0") + + +def _validate_month(month): + """ + Require that `month` be an integer between 1 and 12 inclusive. + + Parameters + ---------- + month : int + + Raises + ------ + ValueError + """ + if not isinstance(month, int) or not 1 <= month <= 12: + raise ValueError("Month must go from 1 to 12") + + +def _validate_weekday(weekday, allow_none=False): + """ + Require that `weekday` be an integer between 0 and 6, inclusive, or that + None be explicitly allowed. + + Parameters + ---------- + weekday : int (or None) + allow_none : bool, default False + + Raises + ------ + ValueError + """ + if allow_none and weekday is None: + pass + elif not isinstance(weekday, int) or not 0 <= weekday <= 6: + raise ValueError("weekday must be 0<=weekday<=6, got " + "{day}".format(day=weekday)) + + # --------------------------------------------------------------------- # Constructor Helpers diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 6821017c89c3a..8bf4dd92b1ece 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -2180,7 +2180,7 @@ def test_repr(self): def test_corner(self): pytest.raises(ValueError, Week, weekday=7) tm.assert_raises_regex( - ValueError, "Day must be", Week, weekday=-1) + ValueError, "weekday must be", Week, weekday=-1) def test_isAnchored(self): assert Week(weekday=0).isAnchored() @@ -2251,9 +2251,9 @@ def test_constructor(self): n=1, week=4, weekday=0) tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth, n=1, week=-1, weekday=0) - tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth, + tm.assert_raises_regex(ValueError, "^weekday", WeekOfMonth, n=1, week=0, weekday=-1) - tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth, + tm.assert_raises_regex(ValueError, "^weekday", WeekOfMonth, n=1, week=0, weekday=7) def test_repr(self): @@ -2335,10 +2335,10 @@ def test_constructor(self): tm.assert_raises_regex(ValueError, "^N cannot be 0", LastWeekOfMonth, n=0, weekday=1) - tm.assert_raises_regex(ValueError, "^Day", LastWeekOfMonth, n=1, + tm.assert_raises_regex(ValueError, "^weekday", LastWeekOfMonth, n=1, weekday=-1) tm.assert_raises_regex( - ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7) + ValueError, "^weekday", LastWeekOfMonth, n=1, weekday=7) def test_offset(self): # Saturday diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 021d636042954..dea6f6427a410 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -22,7 +22,8 @@ from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, - _get_calendar, _to_dt64, _validate_business_time, + _get_calendar, _to_dt64, + _validate_business_time, _validate_n, _validate_month, _validate_weekday, _int_to_weekday, _weekday_to_int, _determine_offset, apply_index_wraps, @@ -1350,10 +1351,7 @@ def __init__(self, n=1, normalize=False, weekday=None): self.normalize = normalize self.weekday = weekday - if self.weekday is not None: - if self.weekday < 0 or self.weekday > 6: - raise ValueError('Day must be 0<=day<=6, got {day}' - .format(day=self.weekday)) + _validate_weekday(weekday, allow_none=True) self.kwds = {'weekday': weekday} @@ -1430,12 +1428,8 @@ def __init__(self, n=1, normalize=False, week=None, weekday=None): self.weekday = weekday self.week = week - if self.n == 0: - raise ValueError('N cannot be 0') - - if self.weekday < 0 or self.weekday > 6: - raise ValueError('Day must be 0<=day<=6, got {day}' - .format(day=self.weekday)) + _validate_n(n) + _validate_weekday(weekday) if self.week < 0 or self.week > 3: raise ValueError('Week must be 0<=week<=3, got {week}' .format(week=self.week)) @@ -1514,12 +1508,8 @@ def __init__(self, n=1, normalize=False, weekday=None): self.normalize = normalize self.weekday = weekday - if self.n == 0: - raise ValueError('N cannot be 0') - - if self.weekday < 0 or self.weekday > 6: - raise ValueError('Day must be 0<=day<=6, got {day}' - .format(day=self.weekday)) + _validate_n(n) + _validate_weekday(weekday) self.kwds = {'weekday': weekday} @@ -1717,8 +1707,7 @@ def __init__(self, n=1, normalize=False, month=None): month = month if month is not None else self._default_month self.month = month - if self.month < 1 or self.month > 12: - raise ValueError('Month must go from 1 to 12') + _validate_month(month) DateOffset.__init__(self, n=n, normalize=normalize, month=month) @@ -1834,8 +1823,7 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, self.kwds = {'weekday': weekday, 'startingMonth': startingMonth, 'variation': variation} - if self.n == 0: - raise ValueError('N cannot be 0') + _validate_n(n) if self.variation not in ["nearest", "last"]: raise ValueError('{variation} is not a valid variation' @@ -2086,8 +2074,7 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, 'qtr_with_extra_week': qtr_with_extra_week, 'variation': variation} - if self.n == 0: - raise ValueError('N cannot be 0') + _validate_n(n) @cache_readonly def _offset(self):
Lots to do in offsets, trying to push breaking changes to the very end. This just implements validation for DateOffset args/kwargs, fixes a poorly-worded error message and the associated tm.assert_raises_regex tests. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18210
2017-11-10T04:42:29Z
2017-12-05T06:44:43Z
null
2017-12-08T19:40:10Z
DOC: clarify idxmax behaviour issue #18206
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 70f1ff0a5380d..2dd1292fd9da7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5781,7 +5781,12 @@ def idxmin(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be NA + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- @@ -5812,7 +5817,12 @@ def idxmax(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be first index. + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..3b3f30c903afb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1306,7 +1306,13 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1336,7 +1342,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns -------
- [ ] closes #18206 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18209
2017-11-10T01:30:48Z
2017-11-14T16:03:19Z
2017-11-14T16:03:19Z
2017-11-14T16:03:26Z
Handle unsortable Periods correctly in set_index, MultiIndex
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 27252b9616a44..a0cc196728e4b 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -431,11 +431,14 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): def sort_mixed(values): # order ints before strings, safe in py3 + num_pos = np.array([isinstance(x, (float, int, long)) for x in values], + dtype=bool) str_pos = np.array([isinstance(x, string_types) for x in values], dtype=bool) - nums = np.sort(values[~str_pos]) + nums = np.sort(values[num_pos]) strs = np.sort(values[str_pos]) - return np.concatenate([nums, np.asarray(strs, dtype=object)]) + others = values[~(str_pos | num_pos)] # We don't bother sorting these + return np.concatenate([nums, np.asarray(strs, dtype=object), others]) sorter = None if PY3 and lib.infer_dtype(values) == 'mixed-integer': @@ -445,7 +448,9 @@ def sort_mixed(values): try: sorter = values.argsort() ordered = values.take(sorter) - except TypeError: + except (ValueError, TypeError): + # Period comparison may raise IncompatibleFrequency, which + # subclasses ValueError instead of TypeError # try this anyway ordered = sort_mixed(values) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index e5ee078d3558d..eb7f47736c96f 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -11,6 +11,36 @@ from ..datetimelike import DatetimeLike +class TestPeriodLevelMultiIndex(object): + # TODO: Is there a more appropriate place for these? + def test_set_index(self): + # GH#17112 + index = Index(['PCE'] * 4, name='Variable') + data = [Period('2018Q2'), + Period('2021', freq='5A-Dec'), + Period('2026', freq='10A-Dec'), + Period('2017Q2')] + ser = Series(data, index=index, name='Period') + df = ser.to_frame() + + res = df.set_index('Period', append=True) + # If the doesn't raise then that's a good start + assert res.index.names == ['Variable', 'Period'] + + def test_from_arrays_period_level(self): + # GH#17112 + index = Index(['PCE'] * 4, name='Variable') + data = [Period('2018Q2'), + Period('2021', freq='5A-Dec'), + Period('2026', freq='10A-Dec'), + Period('2017Q2')] + ser = Series(data, index=index, name='Period') + + mi = pd.MultiIndex.from_arrays([ser.index, ser]) + assert mi.names == ['Variable', 'Period'] + assert mi.get_level_values('Variable').equals(index) + + class TestPeriodIndex(DatetimeLike): _holder = PeriodIndex _multiprocess_can_split_ = True
3rd of 3 to address bugs in #17112 `set_index` goes through MultiIndex.from_arrays, which calls `factorize_from_iterables`... which tries to sort the inputs. In cases like `Period`, sometimes the inputs can't be sorted. But for the purposes of `set_index`, we don't actually _care_ about the order. So we impose a reasonable fallback. New tests are likely not in the correct place. Pls advise.
https://api.github.com/repos/pandas-dev/pandas/pulls/18208
2017-11-10T00:42:48Z
2017-11-12T06:35:39Z
null
2020-04-05T17:40:10Z
ERR: raise if values passed to Categorical is a DataFrame
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 1e3c8f89c0e05..b55c02a166754 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -308,6 +308,9 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, elif isinstance(values, (ABCIndexClass, ABCSeries)): # we'll do inference later pass + elif getattr(values, 'ndim', 0) > 1: + raise NotImplementedError("> 1 ndim Categorical are not " + "supported at this time") else: @@ -2331,6 +2334,8 @@ def _factorize_from_iterable(values): categories=values.categories, ordered=values.ordered) codes = values.codes + elif getattr(values, 'ndim', 0) > 1: + raise NotImplementedError('Factorizing DataFrame is not supported.') else: cat = Categorical(values, ordered=True) categories = cat.categories diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index b77e2d1dcda8a..d6221cc48bc1e 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -173,6 +173,12 @@ def test_constructor_unsortable(self): pytest.raises( TypeError, lambda: Categorical(arr, ordered=True)) + def test_constructor_dataframe_error(self): + # GH#17112 + df = pd.DataFrame(np.random.randn(3, 2), columns=['A', 'B']) + with pytest.raises(NotImplementedError): + Categorical(df) + def test_constructor_interval(self): result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True)
2nd of 3 to address #17112. This raises instead of making the following mistake: ``` >>> df = pd.DataFrame(np.random.randn(3,2), columns=['A', 'B']) >>> pd.Categorical(df) [A, B] Categories (2, object): [A, B] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18207
2017-11-10T00:42:44Z
2017-11-12T18:56:24Z
null
2020-04-05T17:40:04Z
Catch SystemError in py3 Period.__richcmp__
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 72523a19b9595..5c5c10684551d 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -693,7 +693,12 @@ cdef class _Period(object): def __richcmp__(self, other, op): if is_period_object(other): - if other.freq != self.freq: + try: + if other.freq != self.freq: + msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr) + raise IncompatibleFrequency(msg) + except SystemError: + # See GH#17112 in python3 msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr) raise IncompatibleFrequency(msg) return PyObject_RichCompareBool(self.ordinal, other.ordinal, op) diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index 28d85c52604d9..2824d2aa266ff 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -1418,3 +1418,12 @@ def test_period_immutable(): freq = per.freq with pytest.raises(AttributeError): per.freq = 2 * freq + + +def test_comparison_catches_system_error(): + # see GH#17112 in py3 this comparison could raise SystemError instead + # of IncompatibleFrequency + per1 = pd.Period('2014Q1') + per2 = pd.Period('2015', freq='A') + with pytest.raises(period.IncompatibleFrequency): + per1 < per2
First of ~3 to fix bugs reported in #17112. This just catches a py3-specific error in `Period.__richcmp__` and raises the correct error instead. With a test that fails under the status quo.
https://api.github.com/repos/pandas-dev/pandas/pulls/18205
2017-11-10T00:30:35Z
2017-11-13T02:18:26Z
null
2020-04-05T17:40:10Z
DOC: clarfiy sum of empty Series case in 0.21.0 whatsnew
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c3e7f847b485..a3289b1144863 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -52,7 +52,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 4c460eeb85b82..89e2d3006696c 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -12,7 +12,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). @@ -369,11 +369,11 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`). .. _whatsnew_0210.api_breaking.bottleneck: -Sum/Prod of all-NaN Series/DataFrames is now consistently NaN -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames no longer depends on -whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed. (:issue:`9422`, :issue:`15507`). +whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and return value of ``sum`` and ``prod`` on an empty Series has changed (:issue:`9422`, :issue:`15507`). Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, will result in ``NaN``. See the :ref:`docs <missing_data.numeric_sum>`. @@ -381,35 +381,35 @@ Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of s = Series([np.nan]) -Previously NO ``bottleneck`` +Previously WITHOUT ``bottleneck`` installed: .. code-block:: ipython In [2]: s.sum() Out[2]: np.nan -Previously WITH ``bottleneck`` +Previously WITH ``bottleneck``: .. code-block:: ipython In [2]: s.sum() Out[2]: 0.0 -New Behavior, without regard to the bottleneck installation. +New Behavior, without regard to the bottleneck installation: .. ipython:: python s.sum() -Note that this also changes the sum of an empty ``Series`` - -Previously regardless of ``bottlenck`` +Note that this also changes the sum of an empty ``Series``. Previously this always returned 0 regardless of a ``bottlenck`` installation: .. code-block:: ipython In [1]: pd.Series([]).sum() Out[1]: 0 +but for consistency with the all-NaN case, this was changed to return NaN as well: + .. ipython:: python pd.Series([]).sum() @@ -877,6 +877,28 @@ New Behavior: pd.interval_range(start=0, end=4) +.. _whatsnew_0210.api.mpl_converters: + +No Automatic Matplotlib Converters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas no longer registers our ``date``, ``time``, ``datetime``, +``datetime64``, and ``Period`` converters with matplotlib when pandas is +imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not +nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You +must explicitly register these methods: + +.. ipython:: python + + from pandas.tseries import converter + converter.register() + + fig, ax = plt.subplots() + plt.plot(pd.date_range('2017', periods=6), range(6)) + +Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these +converters on first-use (:issue:17710). + .. _whatsnew_0210.api: Other API Changes @@ -900,8 +922,6 @@ Other API Changes - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) - Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`). -- Pandas no longer registers matplotlib converters on import. The converters - will be registered and used when the first plot is draw (:issue:`17710`) .. _whatsnew_0210.deprecations:
Attempt to better highlight the change of empty Series, instead of mainly focussing now on all-NaN (given the recent issues that were raised were often about the empty case)
https://api.github.com/repos/pandas-dev/pandas/pulls/18204
2017-11-09T23:16:46Z
2017-11-11T21:41:12Z
2017-11-11T21:41:12Z
2017-11-11T21:41:16Z
Added repr string for Grouper and TimeGrouper
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 4c6cdb9846305..4389dbcff280d 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -22,7 +22,7 @@ Other Enhancements ^^^^^^^^^^^^^^^^^^ - :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) -- +- :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). - .. _whatsnew_0211.deprecations: diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 78b8ca8d5a480..f7552a6da7eb8 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -100,7 +100,7 @@ Removal of prior version deprecations/changes - The ``levels`` and ``labels`` attributes of a ``MultiIndex`` can no longer be set directly (:issue:`4039`). - ``pd.tseries.util.pivot_annual`` has been removed (deprecated since v0.19). Use ``pivot_table`` instead (:issue:`18370`) - ``pd.tseries.util.isleapyear`` has been removed (deprecated since v0.19). Use ``.is_leap_year`` property in Datetime-likes instead (:issue:`18370`) -- ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd..merge_ordered`` instead (:issue:`18459`) +- ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd.merge_ordered`` instead (:issue:`18459`) .. _whatsnew_0220.performance: diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index ba180cc98cb08..5327f49b9a76b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -206,12 +206,13 @@ class Grouper(object): sort : boolean, default to False whether to sort the resulting labels - additional kwargs to control time-like groupers (when freq is passed) + additional kwargs to control time-like groupers (when ``freq`` is passed) - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If grouper is PeriodIndex + base, loffset Returns ------- @@ -233,6 +234,7 @@ class Grouper(object): >>> df.groupby(Grouper(level='date', freq='60s', axis=1)) """ + _attributes = ('key', 'level', 'freq', 'axis', 'sort') def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: @@ -333,6 +335,14 @@ def _set_grouper(self, obj, sort=False): def groups(self): return self.grouper.groups + def __repr__(self): + attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name)) + for attr_name in self._attributes + if getattr(self, attr_name) is not None] + attrs = ", ".join(attrs_list) + cls_name = self.__class__.__name__ + return "{}({})".format(cls_name, attrs) + class GroupByPlot(PandasObject): """ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 6988528af415f..bd441a8248841 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1014,22 +1014,18 @@ class TimeGrouper(Grouper): Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right - nperiods : optional, integer + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex - - Notes - ----- - Use begin, end, nperiods to generate intervals that cannot be derived - directly from the associated object """ + _attributes = Grouper._attributes + ('closed', 'label', 'how', + 'loffset', 'kind', 'convention', + 'base') def __init__(self, freq='Min', closed=None, label=None, how='mean', - nperiods=None, axis=0, - fill_method=None, limit=None, loffset=None, kind=None, - convention=None, base=0, **kwargs): + axis=0, fill_method=None, limit=None, loffset=None, + kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) @@ -1048,7 +1044,6 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', self.closed = closed self.label = label - self.nperiods = nperiods self.kind = kind self.convention = convention or 'E' diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 81153e83471cd..3436dd9169081 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -25,6 +25,15 @@ from .common import MixIn +class TestGrouper(object): + + def test_repr(self): + # GH18203 + result = repr(pd.Grouper(key='A', level='B')) + expected = "Grouper(key='A', level='B', axis=0, sort=False)" + assert result == expected + + class TestGroupBy(MixIn): def test_basic(self): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index c9e40074c06ad..bf1cac3112c46 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3416,3 +3416,11 @@ def test_aggregate_with_nat(self): # if NaT is included, 'var', 'std', 'mean', 'first','last' # and 'nth' doesn't work yet + + def test_repr(self): + # GH18203 + result = repr(TimeGrouper(key='A', freq='H')) + expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, " + "closed='left', label='left', how='mean', " + "convention='e', base=0)") + assert result == expected
- [ ] closes #xxxx - [ ] tests added / passed - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is a new implementation of #17727. This is much simpler than the previous attempts. The outputs are now: ```python >>> pd.Grouper(key='A') Grouper(key='A') >>> pd.Grouper(key='A', freq='2T') TimeGrouper(key='A', freq=<2 * Minutes>, sort=True, closed='left', label='left', convention='e') ``` Tests still need to be written. I can do them when/if this is ok.
https://api.github.com/repos/pandas-dev/pandas/pulls/18203
2017-11-09T22:29:21Z
2017-11-27T11:28:14Z
2017-11-27T11:28:14Z
2017-12-11T20:21:32Z
DOC: document subclasses in API docs with selection of specific methods/attributes
diff --git a/doc/source/api.rst b/doc/source/api.rst index ce88aed91823c..f3405fcdee608 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -281,6 +281,15 @@ Attributes Series.base Series.T Series.memory_usage + Series.hasnans + Series.flags + Series.empty + Series.dtypes + Series.ftypes + Series.data + Series.is_copy + Series.name + Series.put Conversion ~~~~~~~~~~ @@ -289,9 +298,14 @@ Conversion Series.astype Series.infer_objects + Series.convert_objects Series.copy - Series.isna - Series.notna + Series.bool + Series.to_period + Series.to_timestamp + Series.tolist + Series.get_values + Indexing, iteration ~~~~~~~~~~~~~~~~~~~ @@ -305,6 +319,11 @@ Indexing, iteration Series.iloc Series.__iter__ Series.iteritems + Series.items + Series.keys + Series.pop + Series.item + Series.xs For more information on ``.at``, ``.iat``, ``.loc``, and ``.iloc``, see the :ref:`indexing documentation <indexing>`. @@ -339,6 +358,8 @@ Binary operator functions Series.ge Series.ne Series.eq + Series.product + Series.dot Function application, GroupBy & Window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -346,6 +367,7 @@ Function application, GroupBy & Window :toctree: generated/ Series.apply + Series.agg Series.aggregate Series.transform Series.map @@ -353,6 +375,7 @@ Function application, GroupBy & Window Series.rolling Series.expanding Series.ewm + Series.pipe .. _api.series.stats: @@ -397,6 +420,7 @@ Computations / Descriptive Stats Series.std Series.sum Series.var + Series.kurtosis Series.unique Series.nunique Series.is_unique @@ -404,6 +428,10 @@ Computations / Descriptive Stats Series.is_monotonic_increasing Series.is_monotonic_decreasing Series.value_counts + Series.compound + Series.nonzero + Series.ptp + Reindexing / Selection / Label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -434,12 +462,17 @@ Reindexing / Selection / Label manipulation Series.truncate Series.where Series.mask + Series.add_prefix + Series.add_suffix + Series.filter Missing data handling ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ + Series.isna + Series.notna Series.dropna Series.fillna Series.interpolate @@ -450,12 +483,20 @@ Reshaping, sorting :toctree: generated/ Series.argsort + Series.argmin + Series.argmax Series.reorder_levels Series.sort_values Series.sort_index Series.swaplevel Series.unstack Series.searchsorted + Series.ravel + Series.repeat + Series.squeeze + Series.view + Series.sortlevel + Combining / joining / merging ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -479,6 +520,10 @@ Time series-related Series.resample Series.tz_convert Series.tz_localize + Series.at_time + Series.between_time + Series.tshift + Series.slice_shift Datetimelike Properties ~~~~~~~~~~~~~~~~~~~~~~~ @@ -635,17 +680,15 @@ strings and apply several methods to it. These can be accessed like Series.cat Series.dt Index.str - MultiIndex.str - DatetimeIndex.str - TimedeltaIndex.str - .. _api.categorical: Categorical ~~~~~~~~~~~ -The dtype of a ``Categorical`` can be described by a :class:`pandas.api.types.CategoricalDtype`. +Pandas defines a custom data type for representing data that can take only a +limited, fixed set of values. The dtype of a ``Categorical`` can be described by +a :class:`pandas.api.types.CategoricalDtype`. .. autosummary:: :toctree: generated/ @@ -653,46 +696,38 @@ The dtype of a ``Categorical`` can be described by a :class:`pandas.api.types.Ca api.types.CategoricalDtype -If the Series is of dtype ``CategoricalDtype``, ``Series.cat`` can be used to change the categorical -data. This accessor is similar to the ``Series.dt`` or ``Series.str`` and has the -following usable methods and properties: - .. autosummary:: :toctree: generated/ - :template: autosummary/accessor_attribute.rst - Series.cat.categories - Series.cat.ordered - Series.cat.codes + api.types.CategoricalDtype.categories + api.types.CategoricalDtype.ordered + +Categorical data can be stored in a :class:`pandas.Categorical` .. autosummary:: :toctree: generated/ - :template: autosummary/accessor_method.rst + :template: autosummary/class_without_autosummary.rst - Series.cat.rename_categories - Series.cat.reorder_categories - Series.cat.add_categories - Series.cat.remove_categories - Series.cat.remove_unused_categories - Series.cat.set_categories - Series.cat.as_ordered - Series.cat.as_unordered + Categorical -To create a Series of dtype ``category``, use ``cat = s.astype("category")``. -The following two ``Categorical`` constructors are considered API but should only be used when -adding ordering information or special categories is need at creation time of the categorical data: +The alternative :meth:`Categorical.from_codes` constructor can be used when you +have the categories and integer codes already: .. autosummary:: :toctree: generated/ - :template: autosummary/class_without_autosummary.rst - Categorical + Categorical.from_codes + +The dtype information is available on the ``Categorical`` .. autosummary:: :toctree: generated/ - Categorical.from_codes + Categorical.dtype + Categorical.categories + Categorical.ordered + Categorical.codes ``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts the Categorical back to a numpy array, so categories and order information is not preserved! @@ -702,6 +737,38 @@ the Categorical back to a numpy array, so categories and order information is no Categorical.__array__ +A ``Categorical`` can be stored in a ``Series`` or ``DataFrame``. +To create a Series of dtype ``category``, use ``cat = s.astype(dtype)`` or +``Series(..., dtype=dtype)`` where ``dtype`` is either + +* the string ``'category'`` +* an instance of :class:`~pandas.api.types.CategoricalDtype`. + +If the Series is of dtype ``CategoricalDtype``, ``Series.cat`` can be used to change the categorical +data. This accessor is similar to the ``Series.dt`` or ``Series.str`` and has the +following usable methods and properties: + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_attribute.rst + + Series.cat.categories + Series.cat.ordered + Series.cat.codes + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_method.rst + + Series.cat.rename_categories + Series.cat.reorder_categories + Series.cat.add_categories + Series.cat.remove_categories + Series.cat.remove_unused_categories + Series.cat.set_categories + Series.cat.as_ordered + Series.cat.as_unordered + Plotting ~~~~~~~~ @@ -738,7 +805,6 @@ Serialization / IO / Conversion .. autosummary:: :toctree: generated/ - Series.from_csv Series.to_pickle Series.to_csv Series.to_dict @@ -792,11 +858,14 @@ Attributes and underlying data DataFrame.get_ftype_counts DataFrame.select_dtypes DataFrame.values + DataFrame.get_values DataFrame.axes DataFrame.ndim DataFrame.size DataFrame.shape DataFrame.memory_usage + DataFrame.empty + DataFrame.is_copy Conversion ~~~~~~~~~~ @@ -809,6 +878,7 @@ Conversion DataFrame.copy DataFrame.isna DataFrame.notna + DataFrame.bool Indexing, iteration ~~~~~~~~~~~~~~~~~~~ @@ -821,7 +891,10 @@ Indexing, iteration DataFrame.loc DataFrame.iloc DataFrame.insert + DataFrame.insert DataFrame.__iter__ + DataFrame.items + DataFrame.keys DataFrame.iteritems DataFrame.iterrows DataFrame.itertuples @@ -829,6 +902,7 @@ Indexing, iteration DataFrame.pop DataFrame.tail DataFrame.xs + DataFrame.get DataFrame.isin DataFrame.where DataFrame.mask @@ -851,6 +925,7 @@ Binary operator functions DataFrame.floordiv DataFrame.mod DataFrame.pow + DataFrame.dot DataFrame.radd DataFrame.rsub DataFrame.rmul @@ -875,6 +950,8 @@ Function application, GroupBy & Window DataFrame.apply DataFrame.applymap + DataFrame.pipe + DataFrame.agg DataFrame.aggregate DataFrame.transform DataFrame.groupby @@ -895,6 +972,7 @@ Computations / Descriptive Stats DataFrame.clip DataFrame.clip_lower DataFrame.clip_upper + DataFrame.compound DataFrame.corr DataFrame.corrwith DataFrame.count @@ -907,6 +985,7 @@ Computations / Descriptive Stats DataFrame.diff DataFrame.eval DataFrame.kurt + DataFrame.kurtosis DataFrame.mad DataFrame.max DataFrame.mean @@ -915,6 +994,7 @@ Computations / Descriptive Stats DataFrame.mode DataFrame.pct_change DataFrame.prod + DataFrame.product DataFrame.quantile DataFrame.rank DataFrame.round @@ -923,6 +1003,7 @@ Computations / Descriptive Stats DataFrame.sum DataFrame.std DataFrame.var + DataFrame.nunique Reindexing / Selection / Label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -932,6 +1013,8 @@ Reindexing / Selection / Label manipulation DataFrame.add_prefix DataFrame.add_suffix DataFrame.align + DataFrame.at_time + DataFrame.between_time DataFrame.drop DataFrame.drop_duplicates DataFrame.duplicated @@ -950,6 +1033,7 @@ Reindexing / Selection / Label manipulation DataFrame.reset_index DataFrame.sample DataFrame.select + DataFrame.set_axis DataFrame.set_index DataFrame.tail DataFrame.take @@ -965,6 +1049,7 @@ Missing data handling DataFrame.dropna DataFrame.fillna DataFrame.replace + DataFrame.interpolate Reshaping, sorting, transposing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -972,6 +1057,7 @@ Reshaping, sorting, transposing :toctree: generated/ DataFrame.pivot + DataFrame.pivot_table DataFrame.reorder_levels DataFrame.sort_values DataFrame.sort_index @@ -980,10 +1066,12 @@ Reshaping, sorting, transposing DataFrame.swaplevel DataFrame.stack DataFrame.unstack + DataFrame.swapaxes DataFrame.melt - DataFrame.T + DataFrame.squeeze DataFrame.to_panel DataFrame.to_xarray + DataFrame.T DataFrame.transpose Combining / joining / merging @@ -1005,6 +1093,8 @@ Time series-related DataFrame.asfreq DataFrame.asof DataFrame.shift + DataFrame.slice_shift + DataFrame.tshift DataFrame.first_valid_index DataFrame.last_valid_index DataFrame.resample @@ -1059,6 +1149,7 @@ Serialization / IO / Conversion DataFrame.from_items DataFrame.from_records DataFrame.info + DataFrame.to_parquet DataFrame.to_pickle DataFrame.to_csv DataFrame.to_hdf @@ -1077,6 +1168,7 @@ Serialization / IO / Conversion DataFrame.to_dense DataFrame.to_string DataFrame.to_clipboard + DataFrame.style Sparse ~~~~~~ @@ -1219,6 +1311,7 @@ Computations / Descriptive Stats Panel.std Panel.var + Reindexing / Selection / Label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: @@ -1240,13 +1333,13 @@ Reindexing / Selection / Label manipulation Panel.take Panel.truncate + Missing data handling ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ Panel.dropna - Panel.fillna Reshaping, sorting, transposing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1289,7 +1382,6 @@ Serialization / IO / Conversion Panel.to_hdf Panel.to_sparse Panel.to_frame - Panel.to_xarray Panel.to_clipboard .. _api.index: @@ -1318,10 +1410,14 @@ Attributes Index.is_monotonic_decreasing Index.is_unique Index.has_duplicates + Index.hasnans Index.dtype + Index.dtype_str Index.inferred_type Index.is_all_dates Index.shape + Index.name + Index.names Index.nbytes Index.ndim Index.size @@ -1350,9 +1446,20 @@ Modifying and Computations Index.factorize Index.identical Index.insert + Index.is_ + Index.is_boolean + Index.is_categorical + Index.is_floating + Index.is_integer + Index.is_interval + Index.is_lexsorted_for_tuple + Index.is_mixed + Index.is_numeric + Index.is_object Index.min Index.max Index.reindex + Index.rename Index.repeat Index.where Index.take @@ -1378,10 +1485,15 @@ Conversion :toctree: generated/ Index.astype + Index.item + Index.map + Index.ravel Index.tolist Index.to_datetime + Index.to_native_types Index.to_series Index.to_frame + Index.view Sorting ~~~~~~~ @@ -1389,6 +1501,7 @@ Sorting :toctree: generated/ Index.argsort + Index.searchsorted Index.sort_values Time-specific operations @@ -1415,11 +1528,19 @@ Selecting .. autosummary:: :toctree: generated/ + Index.asof + Index.asof_locs + Index.contains + Index.get_duplicates Index.get_indexer + Index.get_indexer_for Index.get_indexer_non_unique Index.get_level_values Index.get_loc + Index.get_slice_bound Index.get_value + Index.get_values + Index.set_value Index.isin Index.slice_indexer Index.slice_locs @@ -1438,6 +1559,15 @@ Numeric Index UInt64Index Float64Index +.. We need this autosummary so that the methods are generated. +.. Separate block, since they aren't classes. + +.. autosummary:: + :toctree: generated/ + + RangeIndex.from_range + + .. _api.categoricalindex: CategoricalIndex @@ -1466,6 +1596,7 @@ Categorical Components CategoricalIndex.set_categories CategoricalIndex.as_ordered CategoricalIndex.as_unordered + CategoricalIndex.map .. _api.intervalindex: @@ -1488,6 +1619,14 @@ IntervalIndex Components IntervalIndex.from_tuples IntervalIndex.from_breaks IntervalIndex.from_intervals + IntervalIndex.contains + IntervalIndex.left + IntervalIndex.right + IntervalIndex.mid + IntervalIndex.closed + IntervalIndex.values + IntervalIndex.is_non_overlapping_monotonic + .. _api.multiindex: @@ -1496,12 +1635,17 @@ MultiIndex .. autosummary:: :toctree: generated/ + :template: autosummary/class_without_autosummary.rst MultiIndex + +.. autosummary:: + :toctree: generated/ + IndexSlice -MultiIndex Components -~~~~~~~~~~~~~~~~~~~~~~ +MultiIndex Constructors +~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ @@ -1509,11 +1653,31 @@ MultiIndex Components MultiIndex.from_arrays MultiIndex.from_tuples MultiIndex.from_product + +MultiIndex Attributes +~~~~~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + MultiIndex.names + MultiIndex.levels + MultiIndex.labels + MultiIndex.nlevels + MultiIndex.levshape + +MultiIndex Components +~~~~~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + MultiIndex.set_levels MultiIndex.set_labels MultiIndex.to_hierarchical MultiIndex.to_frame MultiIndex.is_lexsorted + MultiIndex.sortlevel MultiIndex.droplevel MultiIndex.swaplevel MultiIndex.reorder_levels @@ -1526,6 +1690,7 @@ DatetimeIndex .. autosummary:: :toctree: generated/ + :template: autosummary/class_without_autosummary.rst DatetimeIndex @@ -1604,6 +1769,7 @@ TimedeltaIndex .. autosummary:: :toctree: generated/ + :template: autosummary/class_without_autosummary.rst TimedeltaIndex @@ -1745,6 +1911,7 @@ Properties Timestamp.dayofyear Timestamp.days_in_month Timestamp.daysinmonth + Timestamp.fold Timestamp.hour Timestamp.is_leap_year Timestamp.is_month_end @@ -1756,6 +1923,7 @@ Properties Timestamp.max Timestamp.microsecond Timestamp.min + Timestamp.minute Timestamp.month Timestamp.nanosecond Timestamp.quarter @@ -1764,6 +1932,7 @@ Properties Timestamp.tz Timestamp.tzinfo Timestamp.value + Timestamp.week Timestamp.weekday_name Timestamp.weekofyear Timestamp.year @@ -1848,7 +2017,9 @@ Properties Timedelta.asm8 Timedelta.components Timedelta.days + Timedelta.delta Timedelta.freq + Timedelta.is_populated Timedelta.max Timedelta.microseconds Timedelta.min @@ -1856,6 +2027,7 @@ Properties Timedelta.resolution Timedelta.seconds Timedelta.value + Timedelta.view Methods ~~~~~~~ @@ -1877,41 +2049,13 @@ Frequencies .. currentmodule:: pandas.tseries.frequencies - -.. autosummary:: - :toctree: generated/ - - to_offset - .. _api.offsets: -Offsets -------- - -.. currentmodule:: pandas.tseries.offsets - .. autosummary:: :toctree: generated/ - DateOffset - Week - Day - Hour - Minute - Second - Milli - Micro - Nano - -.. autosummary:: - :toctree: generated/ + to_offset - MonthBegin - MonthEnd - QuarterBegin - QuarterEnd - YearBegin - YearEnd Window ------ @@ -2006,6 +2150,7 @@ Indexing, iteration .. autosummary:: :toctree: generated/ + :template: autosummary/class_without_autosummary.rst Grouper @@ -2172,13 +2317,23 @@ Style ``Styler`` objects are returned by :attr:`pandas.DataFrame.style`. - -Constructor -~~~~~~~~~~~ +Styler Constructor +~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ Styler + Styler.from_custom_template + + +Styler Attributes +~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Styler.env + Styler.template + Styler.loader Style Application ~~~~~~~~~~~~~~~~~ @@ -2191,6 +2346,7 @@ Style Application Styler.format Styler.set_precision Styler.set_table_styles + Styler.set_table_attributes Styler.set_caption Styler.set_properties Styler.set_uuid @@ -2217,6 +2373,7 @@ Style Export and Import Styler.render Styler.export Styler.use + Styler.to_excel .. currentmodule:: pandas @@ -2328,3 +2485,35 @@ Scalar introspection api.types.is_re api.types.is_re_compilable api.types.is_scalar + + +.. This is to prevent warnings in the doc build. We don't want to encourage +.. these methods. + +.. toctree:: + :hidden: + + generated/pandas.DataFrame.blocks + generated/pandas.DataFrame.columns + generated/pandas.DataFrame.index + generated/pandas.DataFrame.ix + generated/pandas.Index.asi8 + generated/pandas.Index.data + generated/pandas.Index.flags + generated/pandas.Index.nlevels + generated/pandas.Index.sort + generated/pandas.Panel.agg + generated/pandas.Panel.aggregate + generated/pandas.Panel.blocks + generated/pandas.Panel.empty + generated/pandas.Panel.is_copy + generated/pandas.Panel.items + generated/pandas.Panel.ix + generated/pandas.Panel.major_axis + generated/pandas.Panel.minor_axis + generated/pandas.Series.asobject + generated/pandas.Series.blocks + generated/pandas.Series.from_array + generated/pandas.Series.index + generated/pandas.Series.ix + generated/pandas.Timestamp.offset diff --git a/doc/source/conf.py b/doc/source/conf.py index 6d85f64317e7c..3ac41b0e7e0c9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -55,7 +55,7 @@ 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.todo', - 'numpydoc', # used to parse numpy-style docstrings for autodoc + 'numpydoc', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', 'sphinx.ext.intersphinx', diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 40189f0e45518..0e5d701353d78 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -317,6 +317,27 @@ Some other important things to know about the docs: doc build. This approach means that code examples will always be up to date, but it does make the doc building a bit more complex. +- Our API documentation in ``doc/source/api.rst`` houses the auto-generated + documentation from the docstrings. For classes, there are a few subtleties + around controlling which methods and attributes have pages auto-generated. + + We have two autosummary templates for classes. + + 1. ``_templates/autosummary/class.rst``. Use this when you want to + automatically generate a page for every public method and attribute on the + class. The ``Attributes`` and ``Methods`` sections will be automatically + added to the class' rendered documentation by numpydoc. See ``DataFrame`` + for an example. + + 2. ``_templates/autosummary/class_without_autosummary``. Use this when you + want to pick a subset of methods / attributes to auto-generate pages for. + When using this template, you should include an ``Attributes`` and + ``Methods`` section in the class docstring. See ``CategoricalIndex`` for an + example. + + Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx + will emit a warning. + .. note:: The ``.rst`` files are used to automatically generate Markdown and HTML versions diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py index 09e31f9efd217..2bc2d1e91ed3f 100755 --- a/doc/sphinxext/numpydoc/numpydoc.py +++ b/doc/sphinxext/numpydoc/numpydoc.py @@ -42,13 +42,6 @@ def mangle_docstrings(app, what, name, obj, options, lines, class_members_toctree=app.config.numpydoc_class_members_toctree, ) - # PANDAS HACK (to remove the list of methods/attributes for Categorical) - no_autosummary = [".Categorical", "CategoricalIndex", "IntervalIndex", - "RangeIndex", "Int64Index", "UInt64Index", - "Float64Index", "PeriodIndex", "CategoricalDtype"] - if what == "class" and any(name.endswith(n) for n in no_autosummary): - cfg['class_members_list'] = False - if what == 'module': # Strip top title title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 540a081bdda2e..cd1440a90fcec 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -282,6 +282,8 @@ class Timestamp(_Timestamp): @classmethod def fromordinal(cls, ordinal, freq=None, tz=None, offset=None): """ + Timestamp.fromordinal(ordinal, freq=None, tz=None, offset=None) + passed an ordinal, translate and convert to a ts note: by definition there cannot be any tz info on the ordinal itself @@ -302,8 +304,10 @@ class Timestamp(_Timestamp): @classmethod def now(cls, tz=None): """ - Return the current time in the local timezone. Equivalent - to datetime.now([tz]) + Timestamp.now(tz=None) + + Returns new Timestamp object representing current time local to + tz. Parameters ---------- @@ -317,6 +321,8 @@ class Timestamp(_Timestamp): @classmethod def today(cls, tz=None): """ + Timestamp.today(cls, tz=None) + Return the current time in the local timezone. This differs from datetime.today() in that it can be localized to a passed timezone. @@ -330,18 +336,38 @@ class Timestamp(_Timestamp): @classmethod def utcnow(cls): + """ + Timestamp.utcnow() + + Return a new Timestamp representing UTC day and time. + """ return cls.now('UTC') @classmethod def utcfromtimestamp(cls, ts): + """ + Timestamp.utcfromtimestamp(ts) + + Construct a naive UTC datetime from a POSIX timestamp. + """ return cls(datetime.utcfromtimestamp(ts)) @classmethod def fromtimestamp(cls, ts): + """ + Timestamp.fromtimestamp(ts) + + timestamp[, tz] -> tz's local time from POSIX timestamp. + """ return cls(datetime.fromtimestamp(ts)) @classmethod def combine(cls, date, time): + """ + Timsetamp.combine(date, time) + + date, time -> datetime with same date and time fields + """ return cls(datetime.combine(date, time)) def __new__(cls, object ts_input=_no_input, diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index a5861f5865a39..d9234bc3779a8 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -336,16 +336,39 @@ class NaTType(_NaT): tzname = _make_error_func('tzname', datetime) utcoffset = _make_error_func('utcoffset', datetime) - # Timestamp has empty docstring for some methods. - utcfromtimestamp = _make_error_func('utcfromtimestamp', None) - fromtimestamp = _make_error_func('fromtimestamp', None) - combine = _make_error_func('combine', None) - utcnow = _make_error_func('utcnow', None) - # ---------------------------------------------------------------------- # The remaining methods have docstrings copy/pasted from the analogous # Timestamp methods. + utcfromtimestamp = _make_error_func('utcfromtimestamp', # noqa:E128 + """ + Timestamp.utcfromtimestamp(ts) + + Construct a naive UTC datetime from a POSIX timestamp. + """ + ) + fromtimestamp = _make_error_func('fromtimestamp', # noqa:E128 + """ + Timestamp.fromtimestamp(ts) + + timestamp[, tz] -> tz's local time from POSIX timestamp. + """ + ) + combine = _make_error_func('combine', # noqa:E128 + """ + Timsetamp.combine(date, time) + + date, time -> datetime with same date and time fields + """ + ) + utcnow = _make_error_func('utcnow', # noqa:E128 + """ + Timestamp.utcnow() + + Return a new Timestamp representing UTC day and time. + """ + ) + timestamp = _make_error_func('timestamp', # noqa:E128 """Return POSIX timestamp as float.""") @@ -372,6 +395,8 @@ class NaTType(_NaT): """) fromordinal = _make_error_func('fromordinal', # noqa:E128 """ + Timestamp.fromordinal(ordinal, freq=None, tz=None, offset=None) + passed an ordinal, translate and convert to a ts note: by definition there cannot be any tz info on the ordinal itself @@ -397,8 +422,10 @@ class NaTType(_NaT): now = _make_nat_func('now', # noqa:E128 """ - Return the current time in the local timezone. Equivalent - to datetime.now([tz]) + Timestamp.now(tz=None) + + Returns new Timestamp object representing current time local to + tz. Parameters ---------- @@ -407,6 +434,8 @@ class NaTType(_NaT): """) today = _make_nat_func('today', # noqa:E128 """ + Timestamp.today(cls, tz=None) + Return the current time in the local timezone. This differs from datetime.today() in that it can be localized to a passed timezone. diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 6d60cf72efd62..d0851e3ab4f96 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -194,6 +194,11 @@ class Categorical(PandasObject): .. versionadded:: 0.21.0 + Methods + ------- + from_codes + __array__ + Raises ------ ValueError @@ -401,7 +406,7 @@ def ordered(self): @property def dtype(self): - """The :ref:`~pandas.api.types.CategoricalDtype` for this instance""" + """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" return self._dtype @property diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index b4467f0f9733b..040b735f8de2c 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -120,6 +120,15 @@ class CategoricalDtype(ExtensionDtype): Must be unique, and must not contain any nulls. ordered : bool, default False + Attributes + ---------- + categories + ordered + + Methods + ------- + None + Notes ----- This class is useful for specifying the type of a ``Categorical`` diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 70b531ffb0ec4..3812ed96b6c36 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -46,6 +46,24 @@ class CategoricalIndex(Index, accessor.PandasDelegate): name : object Name to be stored in the index + Attributes + ---------- + codes + categories + ordered + + Methods + ------- + rename_categories + reorder_categories + add_categories + remove_categories + remove_unused_categories + set_categories + as_ordered + as_unordered + map + See Also -------- Categorical, Index diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 2e022cb151008..b0e22460fdcb0 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -203,6 +203,54 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, name : object Name to be stored in the index + Attributes + ---------- + year + month + day + hour + minute + second + microsecond + nanosecond + date + time + dayofyear + weekofyear + week + dayofweek + weekday + weekday_name + quarter + tz + freq + freqstr + is_month_start + is_month_end + is_quarter_start + is_quarter_end + is_year_start + is_year_end + is_leap_year + inferred_freq + + Methods + ------- + normalize + strftime + snap + tz_convert + tz_localize + round + floor + ceil + to_datetime + to_period + to_perioddelta + to_pydatetime + to_series + to_frame + Notes ----- To learn more about the frequency strings, please see `this link diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7bf7cfce515a1..6ae55b063b676 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -121,6 +121,17 @@ class IntervalIndex(IntervalMixin, Index): Name to be stored in the index. copy : boolean, default False Copy the meta-data + mid + values + is_non_overlapping_monotonic + + Methods + ------- + from_arrays + from_tuples + from_breaks + from_intervals + contains Examples --------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index e6294f7d47aff..87d899a5902a8 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -95,6 +95,30 @@ class MultiIndex(Index): of iterables MultiIndex.from_tuples : Convert list of tuples to a MultiIndex Index : The base pandas Index type + + Attributes + ---------- + names + levels + labels + nlevels + levshape + + Methods + ------- + from_arrays + from_tuples + from_product + set_levels + set_labels + to_hierarchical + to_frame + is_lexsorted + sortlevel + droplevel + swaplevel + reorder_levels + remove_unused_levels """ # initialize to zero-length tuples to make everything work @@ -1362,10 +1386,12 @@ def remove_unused_levels(self): @property def nlevels(self): + """Integer number of levels in this MultiIndex.""" return len(self.levels) @property def levshape(self): + """A tuple with the length of each level.""" return tuple(len(x) for x in self.levels) def __reduce__(self): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index b0703869948c2..ae6a810ece510 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -122,6 +122,14 @@ def is_all_dates(self): name : object Name to be stored in the index + Attributes + ---------- + inferred_type + + Methods + ------- + None + Notes ----- An Index instance can **only** contain hashable objects. @@ -154,6 +162,7 @@ class Int64Index(NumericIndex): @property def inferred_type(self): + """Always 'integer' for ``Int64Index``""" return 'integer' @property @@ -213,6 +222,7 @@ class UInt64Index(NumericIndex): @property def inferred_type(self): + """Always 'integer' for ``UInt64Index``""" return 'integer' @property @@ -290,6 +300,7 @@ class Float64Index(NumericIndex): @property def inferred_type(self): + """Always 'floating' for ``Float64Index``""" return 'floating' @Appender(_index_shared_docs['astype']) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index df242e657c9d7..85e3300913000 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -161,6 +161,37 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): Timezone for converting datetime64 data to Periods dtype : str or PeriodDtype, default None + Attributes + ---------- + day + dayofweek + dayofyear + days_in_month + daysinmonth + end_time + freq + freqstr + hour + is_leap_year + minute + month + quarter + qyear + second + start_time + week + weekday + weekofyear + year + + Methods + ------- + asfreq + strftime + to_timestamp + tz_convert + tz_localize + Examples -------- >>> idx = PeriodIndex(year=year_arr, quarter=q_arr) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9cb01896424f7..c8f1c26f532a7 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -48,6 +48,10 @@ class RangeIndex(Int64Index): -------- Index : The base pandas Index type Int64Index : Index of int64 data + + Methods + ------- + from_range """ _typ = 'rangeindex' diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index e4bc46fb7bdbe..c592aa9608d97 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -137,6 +137,24 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index): Timedelta : Represents a duration between two dates or times. DatetimeIndex : Index of datetime64 data PeriodIndex : Index of Period data + + Attributes + ---------- + days + seconds + microseconds + nanoseconds + components + inferred_freq + + Methods + ------- + to_pytimedelta + to_series + round + floor + ceil + to_frame """ _typ = 'timedeltaindex' diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 96738afbca9e3..8b656d8ba25e9 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -727,6 +727,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, first level. .. versionadded:: 0.18.0 + Returns ------- dummies : DataFrame or SparseDataFrame diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 19f7e459d0725..cbf393046907f 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -185,6 +185,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, strings, especially ones with timezone offsets. .. versionadded: 0.22.0 + Returns ------- ret : datetime if parsing succeeded. @@ -200,7 +201,6 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, Examples -------- - Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 776669d6d28db..6a99b798a123d 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -386,12 +386,11 @@ def format(self, formatter, subset=None): return self def render(self, **kwargs): - r""" - Render the built up styles to HTML + """Render the built up styles to HTML Parameters ---------- - **kwargs: + `**kwargs`: Any additional keyword arguments are passed through to ``self.template.render``. This is useful when you need to provide additional variables for a custom diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b4dc9173f11ba..12e52123064e2 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -75,7 +75,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, see `BigQuery SQL Reference <https://cloud.google.com/bigquery/sql-reference/>`__ - **kwargs : Arbitrary keyword arguments + `**kwargs` : Arbitrary keyword arguments configuration (dict): query config parameters for job processing. For example: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 62b2899f49413..c934648a1d111 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1859,7 +1859,7 @@ def _plot(data, x=None, y=None, subplots=False, mark_right : boolean, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend - kwds : keywords + `**kwds` : keywords Options to pass to matplotlib plotting method Returns @@ -1955,8 +1955,9 @@ def plot_series(data, kind='line', ax=None, # Series unique array of axes is returned with the same shape as ``layout``. See the prose documentation for more. - kwds : other plotting keyword arguments to be passed to matplotlib boxplot - function + `**kwds` : Keyword Arguments + All other plotting keyword arguments to be passed to + matplotlib's boxplot function Returns ------- @@ -2152,7 +2153,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, Tuple of (rows, columns) for the layout of the histograms bins : integer, default 10 Number of histogram bins to be used - kwds : other plotting keyword arguments + `**kwds` : other plotting keyword arguments To be passed to hist function """ _setup() @@ -2215,7 +2216,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, figure size in inches by default bins: integer, default 10 Number of histogram bins to be used - kwds : keywords + `**kwds` : keywords To be passed to the actual plotting function Notes @@ -2327,8 +2328,9 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, figsize : A tuple (width, height) in inches layout : tuple (optional) (rows, columns) for the layout of the plot - kwds : other plotting keyword arguments to be passed to matplotlib boxplot - function + `**kwds` : Keyword Arguments + All other plotting keyword arguments to be passed to + matplotlib's boxplot function Returns ------- @@ -2505,7 +2507,7 @@ def line(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2520,7 +2522,7 @@ def bar(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2535,7 +2537,7 @@ def barh(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2550,7 +2552,7 @@ def box(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2567,7 +2569,7 @@ def hist(self, bins=10, **kwds): ---------- bins: integer, default 10 Number of histogram bins to be used - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2582,7 +2584,7 @@ def kde(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2599,7 +2601,7 @@ def area(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2614,7 +2616,7 @@ def pie(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. Returns @@ -2665,7 +2667,7 @@ def line(self, x=None, y=None, **kwds): ---------- x, y : label or position, optional Coordinates for each point. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2682,7 +2684,7 @@ def bar(self, x=None, y=None, **kwds): ---------- x, y : label or position, optional Coordinates for each point. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2699,7 +2701,7 @@ def barh(self, x=None, y=None, **kwds): ---------- x, y : label or position, optional Coordinates for each point. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2716,7 +2718,7 @@ def box(self, by=None, **kwds): ---------- by : string or sequence Column in the DataFrame to group by. - \*\*kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2735,7 +2737,7 @@ def hist(self, by=None, bins=10, **kwds): Column in the DataFrame to group by. bins: integer, default 10 Number of histogram bins to be used - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2750,7 +2752,7 @@ def kde(self, **kwds): Parameters ---------- - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2769,7 +2771,7 @@ def area(self, x=None, y=None, **kwds): ---------- x, y : label or position, optional Coordinates for each point. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2786,7 +2788,7 @@ def pie(self, y=None, **kwds): ---------- y : label or position, optional Column to plot. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2807,7 +2809,7 @@ def scatter(self, x, y, s=None, c=None, **kwds): Size of each point. c : label or position, optional Color of each point. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns @@ -2832,7 +2834,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, a single number (e.g. `mean`, `max`, `sum`, `std`). gridsize : int, optional Number of bins. - **kwds : optional + `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns
xref https://github.com/pandas-dev/pandas/issues/18147 OK, https://github.com/pandas-dev/pandas/commit/500ab71bda109327cf191a11fcf514eaffd70167 removes our hack in the vendored `numpydoc/numpydoc.py` to exclude autosummary generation for certain methods. Instead, we'll explicitly list the relevant methods in a new `Methods` section in those docs. This let's us include things like `IntervalIndex` in the API docs, without having all the regular index methods in the table, and without auto-gen making pages for all of those. The downside is that we have to list the methods in `Methods` in the docstring and under a toctree in `api.rst`. But that's not so bad. https://github.com/pandas-dev/pandas/commit/257fc8a0af7de854d0f5e2f0bbc9380320760c69 is removing all the warnings about references a non-existent document / this document isn't included in a toctree. But it's a bit tedious so I'll come back later (~600 warnings remaining :/ )
https://api.github.com/repos/pandas-dev/pandas/pulls/18202
2017-11-09T20:33:19Z
2017-11-15T16:36:42Z
2017-11-15T16:36:42Z
2017-12-20T16:11:03Z
DOC: Update parquet metadata format description around index levels
diff --git a/doc/source/developer.rst b/doc/source/developer.rst index 9c214020ab43d..5b9cbb7ae799a 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -50,15 +50,37 @@ So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a 'pandas_version': $VERSION} Here, ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata -for each column. This has JSON form: +for each column, *including the index columns*. This has JSON form: .. code-block:: text {'name': column_name, + 'field_name': parquet_column_name, 'pandas_type': pandas_type, 'numpy_type': numpy_type, 'metadata': metadata} +.. note:: + + Every index column is stored with a name matching the pattern + ``__index_level_\d+__`` and its corresponding column information is can be + found with the following code snippet. + + Following this naming convention isn't strictly necessary, but strongly + suggested for compatibility with Arrow. + + Here's an example of how the index metadata is structured in pyarrow: + + .. code-block:: python + + # assuming there's at least 3 levels in the index + index_columns = metadata['index_columns'] + columns = metadata['columns'] + ith_index = 2 + assert index_columns[ith_index] == '__index_level_2__' + ith_index_info = columns[-len(index_columns):][ith_index] + ith_index_level_name = ith_index_info['name'] + ``pandas_type`` is the logical type of the column, and is one of: * Boolean: ``'bool'`` @@ -109,32 +131,39 @@ As an example of fully-formed metadata: {'index_columns': ['__index_level_0__'], 'column_indexes': [ {'name': None, - 'pandas_type': 'string', + 'field_name': 'None', + 'pandas_type': 'unicode', 'numpy_type': 'object', - 'metadata': None} + 'metadata': {'encoding': 'UTF-8'}} ], 'columns': [ {'name': 'c0', + 'field_name': 'c0', 'pandas_type': 'int8', 'numpy_type': 'int8', 'metadata': None}, {'name': 'c1', + 'field_name': 'c1', 'pandas_type': 'bytes', 'numpy_type': 'object', 'metadata': None}, {'name': 'c2', + 'field_name': 'c2', 'pandas_type': 'categorical', 'numpy_type': 'int16', 'metadata': {'num_categories': 1000, 'ordered': False}}, {'name': 'c3', + 'field_name': 'c3', 'pandas_type': 'datetimetz', 'numpy_type': 'datetime64[ns]', 'metadata': {'timezone': 'America/Los_Angeles'}}, {'name': 'c4', + 'field_name': 'c4', 'pandas_type': 'object', 'numpy_type': 'object', 'metadata': {'encoding': 'pickle'}}, - {'name': '__index_level_0__', + {'name': None, + 'field_name': '__index_level_0__', 'pandas_type': 'int64', 'numpy_type': 'int64', 'metadata': None}
cc @wesm @jcrist
https://api.github.com/repos/pandas-dev/pandas/pulls/18201
2017-11-09T19:44:25Z
2018-12-07T23:52:08Z
2018-12-07T23:52:08Z
2018-12-07T23:52:16Z
Allow accessing AxisProperties on classes
diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx index 374da8067eedd..4beb24f07c21c 100644 --- a/pandas/_libs/properties.pyx +++ b/pandas/_libs/properties.pyx @@ -63,7 +63,14 @@ cdef class AxisProperty(object): self.axis = axis def __get__(self, obj, type): - cdef list axes = obj._data.axes + cdef: + list axes + + if obj is None: + # Only instances have _data, not classes + return None + else: + axes = obj._data.axes return axes[self.axis] def __set__(self, obj, value): diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index be6d81c63ae1e..c50aa858a15b5 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -306,6 +306,11 @@ def test_axis_aliases(self): result = f.sum(axis='columns') assert_series_equal(result, expected) + def test_class_axis(self): + # https://github.com/pandas-dev/pandas/issues/18147 + DataFrame.index # no exception! + DataFrame.columns # no exception! + def test_more_asMatrix(self): values = self.mixed_frame.as_matrix() assert values.shape[1] == len(self.mixed_frame.columns) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 6b950be15ca46..c1e4189283928 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -334,6 +334,10 @@ def test_axis_alias(self): assert s._get_axis_number('rows') == 0 assert s._get_axis_name('rows') == 'index' + def test_class_axis(self): + # https://github.com/pandas-dev/pandas/issues/18147 + Series.index # no exception! + def test_numpy_unique(self): # it works! np.unique(self.ts)
xref https://github.com/pandas-dev/pandas/issues/18147 This will remove the doc warnings about DataFrame.index, Series.index, ... Basically, before if you did `pd.DataFrame.index` you got an exception, about `None._data`, since there was no instance. cc @jreback if you have any concerns.
https://api.github.com/repos/pandas-dev/pandas/pulls/18196
2017-11-09T17:50:33Z
2017-11-10T10:09:00Z
2017-11-10T10:09:00Z
2017-12-20T16:10:44Z
cleanup import order for core.indexes
diff --git a/pandas/core/indexes/__init__.py b/pandas/core/indexes/__init__.py index e69de29bb2d1d..40a96afc6ff09 100644 --- a/pandas/core/indexes/__init__.py +++ b/pandas/core/indexes/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 2176338574304..fc935bab66545 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -1,9 +1,16 @@ +# -*- coding: utf-8 -*- """ datetimelike delegation """ import numpy as np +from pandas._libs.period import IncompatibleFrequency # noqa + +from pandas.core.accessor import PandasDelegate +from pandas.core.algorithms import take_1d +from pandas.core.base import NoNewAttributesMixin, PandasObject + from pandas.core.dtypes.common import ( is_period_arraylike, is_datetime_arraylike, is_integer_dtype, @@ -11,13 +18,9 @@ is_timedelta64_dtype, is_categorical_dtype, is_list_like) -from pandas.core.accessor import PandasDelegate -from pandas.core.base import NoNewAttributesMixin, PandasObject from pandas.core.indexes.datetimes import DatetimeIndex -from pandas._libs.period import IncompatibleFrequency # noqa from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.timedeltas import TimedeltaIndex -from pandas.core.algorithms import take_1d def is_datetimelike(data): diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 08cda8a06ba64..b1b82e63a38c3 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from pandas.core.indexes.base import (Index, _new_Index, _ensure_index, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 57d2d07294a53..e4bda7ee45632 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,18 +1,32 @@ +# -*- coding: utf-8 -*- import datetime -import warnings import operator +import warnings import numpy as np + from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, Timestamp, Timedelta, ) from pandas._libs.lib import is_datetime_array from pandas._libs.tslibs import parsing +from pandas import compat from pandas.compat import range, u from pandas.compat.numpy import function as nv -from pandas import compat +from pandas.io.formats.printing import pprint_thing + +from pandas.util._decorators import (Appender, Substitution, + cache_readonly, deprecate_kwarg) + +import pandas.core.algorithms as algos +from pandas.core import accessor, base, missing, sorting, strings +from pandas.core.base import PandasObject, IndexOpsMixin +from pandas.core.common import (is_bool_indexer, _values_from_object, + _asarray_tuplesafe, _not_none, + _index_labels_to_array) +from pandas.core.config import get_option from pandas.core.dtypes.generic import ( ABCSeries, @@ -42,23 +56,11 @@ needs_i8_conversion, is_iterator, is_list_like, is_scalar) -from pandas.core.common import (is_bool_indexer, _values_from_object, - _asarray_tuplesafe, _not_none, - _index_labels_to_array) - -from pandas.core.base import PandasObject, IndexOpsMixin -import pandas.core.base as base -from pandas.util._decorators import ( - Appender, Substitution, cache_readonly, deprecate_kwarg) -from pandas.core.indexes.frozen import FrozenList import pandas.core.dtypes.concat as _concat -import pandas.core.missing as missing -import pandas.core.algorithms as algos -import pandas.core.sorting as sorting -from pandas.io.formats.printing import pprint_thing + from pandas.core.ops import _comp_method_OBJECT_ARRAY -from pandas.core import strings, accessor -from pandas.core.config import get_option + +from pandas.core.indexes.frozen import FrozenList # simplify diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 70b531ffb0ec4..a6cf974d547c1 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,8 +1,13 @@ +# -*- coding: utf-8 -*- import numpy as np + from pandas._libs import index as libindex +from pandas.util._decorators import Appender, cache_readonly + from pandas import compat from pandas.compat.numpy import function as nv + from pandas.core.dtypes.generic import ABCCategorical, ABCSeries from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -10,19 +15,15 @@ is_list_like, is_interval_dtype, is_scalar) -from pandas.core.common import (_asarray_tuplesafe, - _values_from_object) from pandas.core.dtypes.missing import array_equivalent -from pandas.core.algorithms import take_1d - -from pandas.util._decorators import Appender, cache_readonly +from pandas.core import base, accessor, missing +from pandas.core.algorithms import take_1d +from pandas.core.common import _asarray_tuplesafe, _values_from_object from pandas.core.config import get_option -from pandas.core.indexes.base import Index, _index_shared_docs -from pandas.core import accessor -import pandas.core.base as base -import pandas.core.missing as missing + import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import Index, _index_shared_docs _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update(dict(target_klass='CategoricalIndex')) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 4934ccb49b844..180da246d067d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -1,37 +1,42 @@ +# -*- coding: utf-8 -*- """ Base and utility classes for tseries type pandas objects. """ -import warnings from datetime import datetime, timedelta +import warnings + +import numpy as np + +from pandas._libs import lib, iNaT, NaT +from pandas._libs.period import Period +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas import compat from pandas.compat.numpy import function as nv -from pandas.core.tools.timedeltas import to_timedelta -import numpy as np +import pandas.io.formats.printing as printing + +from pandas.tseries import frequencies + +from pandas.util._decorators import Appender, cache_readonly + from pandas.core.dtypes.common import ( is_integer, is_float, is_bool_dtype, _ensure_int64, is_scalar, is_dtype_equal, is_list_like) -from pandas.core.dtypes.generic import ( - ABCIndex, ABCSeries, - ABCPeriodIndex, ABCIndexClass) +from pandas.core.dtypes.generic import (ABCIndex, ABCSeries, + ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat + from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr from pandas.core.common import AbstractMethodError - -import pandas.io.formats.printing as printing -from pandas._libs import lib, iNaT, NaT -from pandas._libs.period import Period -from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas.core.tools.timedeltas import to_timedelta from pandas.core.indexes.base import Index, _index_shared_docs -from pandas.util._decorators import Appender, cache_readonly -import pandas.core.dtypes.concat as _concat -import pandas.tseries.frequencies as frequencies import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 78869de318dce..d3851142c4d0c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1,12 +1,31 @@ +# -*- coding: utf-8 -*- # pylint: disable=E1101 from __future__ import division + +from datetime import time, datetime, timedelta import operator import warnings -from datetime import time, datetime, timedelta import numpy as np from pytz import utc +from pandas._libs import (lib, index as libindex, tslib as libts, + algos as libalgos, join as libjoin, + Timestamp, period as libperiod) +from pandas._libs.tslibs import timezones, parsing + +from pandas import compat + +from pandas.errors import PerformanceWarning + +from pandas.tseries import offsets +from pandas.tseries.frequencies import to_offset, get_period_alias, Resolution +from pandas.tseries.offsets import (DateOffset, generate_range, Tick, + CDay, prefix_mapping) + +from pandas.util._decorators import (Appender, cache_readonly, + deprecate_kwarg, Substitution) + from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( @@ -24,38 +43,21 @@ is_scalar, pandas_dtype, _ensure_int64) -from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna - import pandas.core.dtypes.concat as _concat -from pandas.errors import PerformanceWarning -from pandas.core.common import _values_from_object, _maybe_box + from pandas.core.algorithms import checked_add_with_arr +import pandas.core.common as com -from pandas.core.indexes.base import Index, _index_shared_docs -from pandas.core.indexes.numeric import Int64Index, Float64Index -import pandas.compat as compat -from pandas.tseries.frequencies import ( - to_offset, get_period_alias, - Resolution) -from pandas.core.indexes.datetimelike import ( - DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) -from pandas.tseries.offsets import ( - DateOffset, generate_range, Tick, CDay, prefix_mapping) -from pandas.core.tools.datetimes import ( - parse_time_string, normalize_date, to_time) +from pandas.core.tools.datetimes import to_time, to_datetime from pandas.core.tools.timedeltas import to_timedelta -from pandas.util._decorators import (Appender, cache_readonly, - deprecate_kwarg, Substitution) -import pandas.core.common as com -import pandas.tseries.offsets as offsets -import pandas.core.tools.datetimes as tools -from pandas._libs import (lib, index as libindex, tslib as libts, - algos as libalgos, join as libjoin, - Timestamp, period as libperiod) -from pandas._libs.tslibs import timezones +from pandas.core.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.numeric import Int64Index, Float64Index +from pandas.core.indexes.datetimelike import (DatelikeOps, TimelikeOps, + DatetimeIndexOpsMixin) # -------- some conversion wrapper functions @@ -118,7 +120,7 @@ def wrapper(self, other): elif not isinstance(other, (np.ndarray, Index, ABCSeries)): other = _ensure_datetime64(other) result = func(np.asarray(other)) - result = _values_from_object(result) + result = com._values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == libts.iNaT @@ -338,8 +340,7 @@ def __new__(cls, data=None, # data must be Index or np.ndarray here if not (is_datetime64_dtype(data) or is_datetimetz(data) or is_integer_dtype(data)): - data = tools.to_datetime(data, dayfirst=dayfirst, - yearfirst=yearfirst) + data = to_datetime(data, dayfirst=dayfirst, yearfirst=yearfirst) if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data): @@ -473,14 +474,14 @@ def _generate(cls, start, end, periods, name, offset, if start is not None: if normalize: - start = normalize_date(start) + start = libts.normalize_date(start) _normalized = True else: _normalized = _normalized and start.time() == _midnight if end is not None: if normalize: - end = normalize_date(end) + end = libts.normalize_date(end) _normalized = True else: _normalized = _normalized and end.time() == _midnight @@ -647,7 +648,7 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None, xdr = generate_range(offset=offset, start=_CACHE_START, end=_CACHE_END) - arr = tools.to_datetime(list(xdr), box=False) + arr = to_datetime(list(xdr), box=False) cachedRange = DatetimeIndex._simple_new(arr) cachedRange.offset = offset @@ -1380,8 +1381,8 @@ def get_value(self, series, key): return series.take(locs) try: - return _maybe_box(self, Index.get_value(self, series, key), - series, key) + return com._maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1400,9 +1401,9 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(_values_from_object(series), + values = self._engine.get_value(com._values_from_object(series), key, tz=self.tz) - return _maybe_box(self, values, series, key) + return com._maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ @@ -1475,7 +1476,7 @@ def _maybe_cast_slice_bound(self, label, side, kind): if isinstance(label, compat.string_types): freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) - _, parsed, reso = parse_time_string(label, freq) + _, parsed, reso = parsing.parse_time_string(label, freq) lower, upper = self._parsed_string_to_bounds(reso, parsed) # lower, upper form the half-open interval: # [parsed, parsed + 1 freq) @@ -1492,7 +1493,7 @@ def _maybe_cast_slice_bound(self, label, side, kind): def _get_string_slice(self, key, use_lhs=True, use_rhs=True): freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) - _, parsed, reso = parse_time_string(key, freq) + _, parsed, reso = parsing.parse_time_string(key, freq) loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs) return loc @@ -2006,7 +2007,7 @@ def _generate_regular_range(start, end, periods, offset): dates = list(xdr) # utc = len(dates) > 0 and dates[0].tzinfo is not None - data = tools.to_datetime(dates) + data = to_datetime(dates) return data diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 3c6b922178abf..8fd5851a5fc6f 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ frozen (immutable) data structures to support MultiIndexing @@ -9,9 +10,11 @@ """ import numpy as np + +from pandas.io.formats.printing import pprint_thing + from pandas.core.base import PandasObject from pandas.core.dtypes.cast import coerce_indexer_dtype -from pandas.io.formats.printing import pprint_thing class FrozenList(PandasObject, list): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7bf7cfce515a1..ec3acd7c5f621 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,7 +1,23 @@ +# -*- coding: utf-8 -*- """ define the IntervalIndex """ import numpy as np +from pandas._libs import Timestamp, Timedelta +from pandas._libs.interval import ( + Interval, IntervalMixin, IntervalTree, + intervals_to_interval_bounds) + +from pandas.compat.numpy import function as nv + +from pandas.tseries.frequencies import to_offset +from pandas.tseries.offsets import DateOffset + +from pandas.util._decorators import cache_readonly, Appender + +from pandas.core import common as com +from pandas.core.config import get_option + from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.generic import ABCPeriodIndex from pandas.core.dtypes.dtypes import IntervalDtype @@ -18,26 +34,14 @@ is_float, is_number, is_integer) -from pandas.core.indexes.base import ( - Index, _ensure_index, - default_pprint, _index_shared_docs) - -from pandas._libs import Timestamp, Timedelta -from pandas._libs.interval import ( - Interval, IntervalMixin, IntervalTree, - intervals_to_interval_bounds) +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import (Index, _ensure_index, + default_pprint, _index_shared_docs) from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex -from pandas.compat.numpy import function as nv -from pandas.core import common as com -from pandas.util._decorators import cache_readonly, Appender -from pandas.core.config import get_option -from pandas.tseries.frequencies import to_offset -from pandas.tseries.offsets import DateOffset -import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='IntervalIndex', diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4cc59f5297058..c642a1167445c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,4 +1,4 @@ - +# -*- coding: utf-8 -*- # pylint: disable=E1101,E1103,W0232 import datetime import warnings @@ -6,11 +6,28 @@ from sys import getsizeof import numpy as np + from pandas._libs import index as libindex, lib, Timestamp +from pandas import compat from pandas.compat import range, zip, lrange, lzip, map from pandas.compat.numpy import function as nv -from pandas import compat + +from pandas.errors import PerformanceWarning, UnsortedIndexError +from pandas.io.formats.printing import pprint_thing +from pandas.util._decorators import (Appender, cache_readonly, + deprecate, deprecate_kwarg) + +from pandas.core import base, missing +import pandas.core.algorithms as algos + +import pandas.core.common as com +from pandas.core.common import (_any_not_none, + _values_from_object, + is_bool_indexer, + is_null_slice, + is_true_slices) +from pandas.core.config import get_option from pandas.core.dtypes.common import ( _ensure_int64, @@ -20,30 +37,14 @@ is_list_like, is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent -from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.core.common import (_any_not_none, - _values_from_object, - is_bool_indexer, - is_null_slice, - is_true_slices) -import pandas.core.base as base -from pandas.util._decorators import (Appender, cache_readonly, - deprecate, deprecate_kwarg) -import pandas.core.common as com -import pandas.core.missing as missing -import pandas.core.algorithms as algos -from pandas.io.formats.printing import pprint_thing - -from pandas.core.config import get_option - -from pandas.core.indexes.base import ( - Index, _ensure_index, - _get_na_value, InvalidIndexError, - _index_shared_docs) -from pandas.core.indexes.frozen import ( - FrozenNDArray, FrozenList, _ensure_frozen) import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import (Index, _ensure_index, + _get_na_value, InvalidIndexError, + _index_shared_docs) +from pandas.core.indexes.frozen import (FrozenNDArray, FrozenList, + _ensure_frozen) + _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='MultiIndex', diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index b0703869948c2..8ebadad68959c 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,6 +1,15 @@ +# -*- coding: utf-8 -*- import numpy as np + from pandas._libs import (index as libindex, algos as libalgos, join as libjoin) + +from pandas import compat +from pandas.util._decorators import Appender, cache_readonly + +from pandas.core import algorithms +from pandas.core.common import _asarray_tuplesafe, _values_from_object + from pandas.core.dtypes.common import ( is_dtype_equal, pandas_dtype, @@ -10,14 +19,10 @@ is_bool, is_bool_dtype, is_scalar) -from pandas.core.common import _asarray_tuplesafe, _values_from_object - -from pandas import compat -from pandas.core import algorithms -from pandas.core.indexes.base import ( - Index, InvalidIndexError, _index_shared_docs) -from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat + +from pandas.core.indexes.base import (Index, InvalidIndexError, + _index_shared_docs) import pandas.core.indexes.base as ibase diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a6d5690767c10..fdc2ead39c3e0 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,10 +1,30 @@ +# -*- coding: utf-8 -*- # pylint: disable=E1101,E1103,W0232 from datetime import datetime, timedelta -import numpy as np import warnings +import numpy as np + +from pandas._libs.lib import infer_dtype +from pandas._libs import NaT, iNaT, period, index as libindex +from pandas._libs.period import (Period, IncompatibleFrequency, + get_period_field_arr, _validate_end_alias, + _quarter_to_myear) +from pandas._libs.tslibs.fields import isleapyear_arr +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas._libs.tslibs.parsing import parse_time_string + +from pandas import compat +from pandas.compat import zip, u + +from pandas.tseries import frequencies, offsets +from pandas.tseries.frequencies import get_freq_code as _gfc + +from pandas.util._decorators import (Appender, Substitution, cache_readonly, + deprecate_kwarg) from pandas.core import common as com + from pandas.core.dtypes.common import ( is_integer, is_float, @@ -22,29 +42,12 @@ from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ABCSeries -import pandas.tseries.frequencies as frequencies -from pandas.tseries.frequencies import get_freq_code as _gfc -from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index +from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin -from pandas.core.tools.datetimes import parse_time_string -import pandas.tseries.offsets as offsets - -from pandas._libs.lib import infer_dtype -from pandas._libs import tslib, period, index as libindex -from pandas._libs.period import (Period, IncompatibleFrequency, - get_period_field_arr, _validate_end_alias, - _quarter_to_myear) -from pandas._libs.tslibs.fields import isleapyear_arr -from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas.core.base import _shared_docs -from pandas.core.indexes.base import _index_shared_docs, _ensure_index - -from pandas import compat -from pandas.util._decorators import (Appender, Substitution, cache_readonly, - deprecate_kwarg) -from pandas.compat import zip, u +from pandas.core.indexes.base import _index_shared_docs, _ensure_index, Index import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -102,7 +105,7 @@ def wrapper(self, other): result[mask] = nat_result return result - elif other is tslib.NaT: + elif other is NaT: result = np.empty(len(self._values), dtype=bool) result.fill(nat_result) else: @@ -567,7 +570,7 @@ def asfreq(self, freq=None, how='E'): new_data = period.period_asfreq_arr(ordinal, base1, base2, end) if self.hasnans: - new_data[self._isnan] = tslib.iNaT + new_data[self._isnan] = iNaT return self._simple_new(new_data, self.name, freq=freq) @@ -691,9 +694,9 @@ def _add_delta(self, other): return self.shift(ordinal_delta) def _sub_datelike(self, other): - if other is tslib.NaT: + if other is NaT: new_data = np.empty(len(self), dtype=np.int64) - new_data.fill(tslib.iNaT) + new_data.fill(iNaT) return TimedeltaIndex(new_data, name=self.name) return NotImplemented @@ -726,7 +729,7 @@ def shift(self, n): """ values = self._values + n * self.freq.n if self.hasnans: - values[self._isnan] = tslib.iNaT + values[self._isnan] = iNaT return self._shallow_copy(values=values) @cache_readonly @@ -835,7 +838,7 @@ def get_loc(self, key, method=None, tolerance=None): raise KeyError(key) try: - ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal + ordinal = iNaT if key is NaT else key.ordinal if tolerance is not None: tolerance = self._convert_tolerance(tolerance, np.asarray(key)) @@ -1093,7 +1096,7 @@ def _get_ordinal_range(start, end, periods, freq, mult=1): if is_start_per and is_end_per and start.freq != end.freq: raise ValueError('start and end must have same freq') - if (start is tslib.NaT or end is tslib.NaT): + if (start is NaT or end is NaT): raise ValueError('start and end must not be NaT') if freq is None: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9cb01896424f7..68a6aa410c918 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,28 +1,28 @@ +# -*- coding: utf-8 -*- from sys import getsizeof import operator import numpy as np -from pandas._libs import index as libindex -from pandas.core.dtypes.common import ( - is_integer, - is_scalar, - is_int64_dtype) +from pandas._libs import index as libindex from pandas import compat from pandas.compat import lrange, range, get_range_parameters from pandas.compat.numpy import function as nv -from pandas.core.common import _all_none -from pandas.core.indexes.base import Index, _index_shared_docs + from pandas.util._decorators import Appender, cache_readonly + +from pandas.core.common import _all_none + +from pandas.core.dtypes.common import is_integer, is_scalar, is_int64_dtype import pandas.core.dtypes.concat as _concat -import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import Index, _index_shared_docs +import pandas.core.indexes.base as ibase from pandas.core.indexes.numeric import Int64Index class RangeIndex(Int64Index): - """ Immutable Index implementing a monotonic integer range. diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a4a5f7df9aa0f..4297525f73859 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ implement the TimedeltaIndex """ from datetime import timedelta
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18195
2017-11-09T17:23:25Z
2017-11-09T18:13:19Z
null
2017-11-12T23:04:50Z
move remaining core.tools.timedeltas to tslibs.timedeltas
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 623babe5422a8..276026861af83 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -43,40 +43,227 @@ Components = collections.namedtuple('Components', [ 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds']) -cdef dict timedelta_abbrevs = { 'D': 'd', - 'd': 'd', - 'days': 'd', - 'day': 'd', - 'hours': 'h', - 'hour': 'h', - 'hr': 'h', - 'h': 'h', - 'm': 'm', - 'minute': 'm', - 'min': 'm', - 'minutes': 'm', - 's': 's', - 'seconds': 's', - 'sec': 's', - 'second': 's', - 'ms': 'ms', - 'milliseconds': 'ms', - 'millisecond': 'ms', - 'milli': 'ms', - 'millis': 'ms', - 'us': 'us', - 'microseconds': 'us', - 'microsecond': 'us', - 'micro': 'us', - 'micros': 'us', - 'ns': 'ns', - 'nanoseconds': 'ns', - 'nano': 'ns', - 'nanos': 'ns', - 'nanosecond': 'ns'} +cdef dict timedelta_abbrevs = {'D': 'd', + 'd': 'd', + 'days': 'd', + 'day': 'd', + 'hours': 'h', + 'hour': 'h', + 'hr': 'h', + 'h': 'h', + 'm': 'm', + 'minute': 'm', + 'min': 'm', + 'minutes': 'm', + 's': 's', + 'seconds': 's', + 'sec': 's', + 'second': 's', + 'ms': 'ms', + 'milliseconds': 'ms', + 'millisecond': 'ms', + 'milli': 'ms', + 'millis': 'ms', + 'us': 'us', + 'microseconds': 'us', + 'microsecond': 'us', + 'micro': 'us', + 'micros': 'us', + 'ns': 'ns', + 'nanoseconds': 'ns', + 'nano': 'ns', + 'nanos': 'ns', + 'nanosecond': 'ns'} _no_input = object() +_unit_map = {'Y': 'Y', + 'y': 'Y', + 'W': 'W', + 'w': 'W', + 'D': 'D', + 'd': 'D', + 'days': 'D', + 'Days': 'D', + 'day': 'D', + 'Day': 'D', + 'M': 'M', + 'H': 'h', + 'h': 'h', + 'm': 'm', + 'T': 'm', + 'S': 's', + 's': 's', + 'L': 'ms', + 'MS': 'ms', + 'ms': 'ms', + 'US': 'us', + 'us': 'us', + 'NS': 'ns', + 'ns': 'ns'} + +# ---------------------------------------------------------------------- +# Top-Level API + + +def to_timedelta(arg, unit='ns', box=True, errors='raise'): + """ + Convert argument to timedelta + + Parameters + ---------- + arg : string, timedelta, list, tuple, 1-d array, or Series + unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an + integer/float number + box : boolean, default True + - If True returns a Timedelta/TimedeltaIndex of the results + - if False returns a np.timedelta64 or ndarray of values of dtype + timedelta64[ns] + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception + - If 'coerce', then invalid parsing will be set as NaT + - If 'ignore', then invalid parsing will return the input + + Returns + ------- + ret : timedelta64/arrays of timedelta64 if parsing succeeded + + Examples + -------- + + Parsing a single string to a Timedelta: + + >>> pd.to_timedelta('1 days 06:05:01.00003') + Timedelta('1 days 06:05:01.000030') + >>> pd.to_timedelta('15.5us') + Timedelta('0 days 00:00:00.000015') + + Parsing a list or array of strings: + + >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) + TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], + dtype='timedelta64[ns]', freq=None) + + Converting numbers by specifying the `unit` keyword argument: + + >>> pd.to_timedelta(np.arange(5), unit='s') + TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', + '00:00:03', '00:00:04'], + dtype='timedelta64[ns]', freq=None) + >>> pd.to_timedelta(np.arange(5), unit='d') + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + See also + -------- + pandas.DataFrame.astype : Cast argument to a specified dtype. + pandas.to_datetime : Convert argument to datetime. + """ + unit = _validate_timedelta_unit(unit) + + if errors not in ('ignore', 'raise', 'coerce'): + raise ValueError("errors must be one of 'ignore', " + "'raise', or 'coerce'}") + + if arg is None: + return arg + + typ = getattr(arg, '_typ', None) + ndim = getattr(arg, 'ndim', 1) + + if typ == 'series': + values = _convert_listlike(arg._values, unit=unit, + box=False, errors=errors) + from pandas import Series + return Series(values, index=arg.index, name=arg.name) + elif typ and 'index' in typ: + return _convert_listlike(arg, unit=unit, box=box, + errors=errors, name=arg.name) + elif is_string_object(arg): + pass + elif hasattr(arg, '__iter__') and ndim == 0: + # extract array scalar and process below + arg = arg.item() + elif hasattr(arg, '__iter__') and ndim == 1: + return _convert_listlike(arg, unit=unit, box=box, errors=errors) + elif ndim > 1: + raise TypeError('arg must be a string, timedelta, list, tuple, ' + '1-d array, or Series') + + # ...so it must be a scalar value. Return scalar. + return _coerce_scalar_to_timedelta_type(arg, unit=unit, + box=box, errors=errors) + + +def _validate_timedelta_unit(arg): + """ provide validation / translation for timedelta short units """ + try: + return _unit_map[arg] + except: + if arg is None: + return 'ns' + raise ValueError("invalid timedelta unit {arg} " + "provided".format(arg=arg)) + + +def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): + """Convert string 'r' to a timedelta object.""" + + try: + result = convert_to_timedelta64(r, unit) + except ValueError: + if errors == 'raise': + raise + elif errors == 'ignore': + return r + + # coerce + result = NaT + + if box: + result = Timedelta(result) + return result + + +def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): + """Convert a list of objects to a timedelta index object.""" + + if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'): + arg = np.array(list(arg), dtype='O') + + # these are shortcut-able + if arg.dtype.kind == 'm': + # Any variant of timedelta64 dtype + value = arg.astype('timedelta64[ns]') + elif arg.dtype.kind in ['i', 'u']: + # Any integer dtype, specifically excluding datetime64 + in_dtype = 'timedelta64[{unit}]'.format(unit=unit) + value = arg.astype(in_dtype).astype('timedelta64[ns]', copy=False) + else: + from pandas._libs.algos import ensure_object + try: + value = array_to_timedelta64(ensure_object(arg), + unit=unit, errors=errors) + value = value.astype('timedelta64[ns]', copy=False) + except ValueError: + if errors == 'ignore': + return arg + else: + # This else-block accounts for the cases when errors='raise' + # and errors='coerce'. If errors == 'raise', these errors + # should be raised. If errors == 'coerce', we shouldn't + # expect any errors to be raised, since all parsing errors + # cause coercion to pd.NaT. However, if an error / bug is + # introduced that causes an Exception to be raised, we would + # like to surface it. + raise + + if box: + from pandas import TimedeltaIndex + value = TimedeltaIndex(value, unit='ns', name=name) + return value + + # ---------------------------------------------------------------------- cpdef int64_t delta_to_nanoseconds(delta) except? -1: @@ -916,7 +1103,7 @@ class Timedelta(_Timedelta): if isinstance(value, Timedelta): value = value.value - elif util.is_string_object(value): + elif is_string_object(value): value = np.timedelta64(parse_timedelta_string(value)) elif PyDelta_Check(value): value = convert_to_timedelta64(value, 'ns') @@ -926,7 +1113,7 @@ class Timedelta(_Timedelta): value = value.astype('timedelta64[ns]') elif hasattr(value, 'delta'): value = np.timedelta64(delta_to_nanoseconds(value.delta), 'ns') - elif is_integer_object(value) or util.is_float_object(value): + elif is_integer_object(value) or is_float_object(value): # unit=None is de-facto 'ns' value = convert_to_timedelta64(value, unit) elif _checknull_with_nat(value): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 4b3c608a88be8..9c19772fb0e21 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -14,7 +14,8 @@ from pandas.core.computation.ops import is_term, UndefinedVariableError from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.common import _ensure_decoded -from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type + +from pandas._libs.tslibs.timedeltas import _coerce_scalar_to_timedelta_type class Scope(expr.Scope): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f3b11e52cdd7a..3e53c5c9977a0 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -610,7 +610,7 @@ def coerce_to_dtypes(result, dtypes): if len(result) != len(dtypes): raise AssertionError("_coerce_to_dtypes requires equal len arrays") - from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type + from pandas._libs.tslibs.timedeltas import _coerce_scalar_to_timedelta_type def conv(r, dtype): try: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a4a5f7df9aa0f..5f2a42d94eb13 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -29,12 +29,13 @@ import pandas.core.dtypes.concat as _concat from pandas.util._decorators import Appender, Substitution, deprecate_kwarg from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin -from pandas.core.tools.timedeltas import ( - to_timedelta, _coerce_scalar_to_timedelta_type) + from pandas.tseries.offsets import Tick, DateOffset from pandas._libs import (lib, index as libindex, tslib as libts, - join as libjoin, Timedelta, NaT, iNaT) -from pandas._libs.tslibs.timedeltas import array_to_timedelta64 + join as libjoin, NaT, iNaT) +from pandas._libs.tslibs.timedeltas import (array_to_timedelta64, to_timedelta, + _coerce_scalar_to_timedelta_type, + Timedelta) def _field_accessor(name, alias, docstring=None): diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 94e2f2342bd51..b3fb3b41ba043 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -1,193 +1,4 @@ """ timedelta support tools """ - -import numpy as np -import pandas as pd -import pandas._libs.tslib as tslib -from pandas._libs.tslibs.timedeltas import (convert_to_timedelta64, - array_to_timedelta64) - -from pandas.core.dtypes.common import ( - _ensure_object, - is_integer_dtype, - is_timedelta64_dtype, - is_list_like) -from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass - - -def to_timedelta(arg, unit='ns', box=True, errors='raise'): - """ - Convert argument to timedelta - - Parameters - ---------- - arg : string, timedelta, list, tuple, 1-d array, or Series - unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an - integer/float number - box : boolean, default True - - If True returns a Timedelta/TimedeltaIndex of the results - - if False returns a np.timedelta64 or ndarray of values of dtype - timedelta64[ns] - errors : {'ignore', 'raise', 'coerce'}, default 'raise' - - If 'raise', then invalid parsing will raise an exception - - If 'coerce', then invalid parsing will be set as NaT - - If 'ignore', then invalid parsing will return the input - - Returns - ------- - ret : timedelta64/arrays of timedelta64 if parsing succeeded - - Examples - -------- - - Parsing a single string to a Timedelta: - - >>> pd.to_timedelta('1 days 06:05:01.00003') - Timedelta('1 days 06:05:01.000030') - >>> pd.to_timedelta('15.5us') - Timedelta('0 days 00:00:00.000015') - - Parsing a list or array of strings: - - >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) - TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], - dtype='timedelta64[ns]', freq=None) - - Converting numbers by specifying the `unit` keyword argument: - - >>> pd.to_timedelta(np.arange(5), unit='s') - TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', - '00:00:03', '00:00:04'], - dtype='timedelta64[ns]', freq=None) - >>> pd.to_timedelta(np.arange(5), unit='d') - TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], - dtype='timedelta64[ns]', freq=None) - - See also - -------- - pandas.DataFrame.astype : Cast argument to a specified dtype. - pandas.to_datetime : Convert argument to datetime. - """ - unit = _validate_timedelta_unit(unit) - - if errors not in ('ignore', 'raise', 'coerce'): - raise ValueError("errors must be one of 'ignore', " - "'raise', or 'coerce'}") - - if arg is None: - return arg - elif isinstance(arg, ABCSeries): - from pandas import Series - values = _convert_listlike(arg._values, unit=unit, - box=False, errors=errors) - return Series(values, index=arg.index, name=arg.name) - elif isinstance(arg, ABCIndexClass): - return _convert_listlike(arg, unit=unit, box=box, - errors=errors, name=arg.name) - elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 0: - # extract array scalar and process below - arg = arg.item() - elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: - return _convert_listlike(arg, unit=unit, box=box, errors=errors) - elif getattr(arg, 'ndim', 1) > 1: - raise TypeError('arg must be a string, timedelta, list, tuple, ' - '1-d array, or Series') - - # ...so it must be a scalar value. Return scalar. - return _coerce_scalar_to_timedelta_type(arg, unit=unit, - box=box, errors=errors) - - -_unit_map = { - 'Y': 'Y', - 'y': 'Y', - 'W': 'W', - 'w': 'W', - 'D': 'D', - 'd': 'D', - 'days': 'D', - 'Days': 'D', - 'day': 'D', - 'Day': 'D', - 'M': 'M', - 'H': 'h', - 'h': 'h', - 'm': 'm', - 'T': 'm', - 'S': 's', - 's': 's', - 'L': 'ms', - 'MS': 'ms', - 'ms': 'ms', - 'US': 'us', - 'us': 'us', - 'NS': 'ns', - 'ns': 'ns', -} - - -def _validate_timedelta_unit(arg): - """ provide validation / translation for timedelta short units """ - try: - return _unit_map[arg] - except: - if arg is None: - return 'ns' - raise ValueError("invalid timedelta unit {arg} provided" - .format(arg=arg)) - - -def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): - """Convert string 'r' to a timedelta object.""" - - try: - result = convert_to_timedelta64(r, unit) - except ValueError: - if errors == 'raise': - raise - elif errors == 'ignore': - return r - - # coerce - result = pd.NaT - - if box: - result = tslib.Timedelta(result) - return result - - -def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): - """Convert a list of objects to a timedelta index object.""" - - if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'): - arg = np.array(list(arg), dtype='O') - - # these are shortcut-able - if is_timedelta64_dtype(arg): - value = arg.astype('timedelta64[ns]') - elif is_integer_dtype(arg): - value = arg.astype('timedelta64[{unit}]'.format(unit=unit)).astype( - 'timedelta64[ns]', copy=False) - else: - try: - value = array_to_timedelta64(_ensure_object(arg), - unit=unit, errors=errors) - value = value.astype('timedelta64[ns]', copy=False) - except ValueError: - if errors == 'ignore': - return arg - else: - # This else-block accounts for the cases when errors='raise' - # and errors='coerce'. If errors == 'raise', these errors - # should be raised. If errors == 'coerce', we shouldn't - # expect any errors to be raised, since all parsing errors - # cause coercion to pd.NaT. However, if an error / bug is - # introduced that causes an Exception to be raised, we would - # like to surface it. - raise - - if box: - from pandas import TimedeltaIndex - value = TimedeltaIndex(value, unit='ns', name=name) - return value +from pandas._libs.tslibs.timedeltas import to_timedelta # noqa diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index cfdb18cefee64..ee408ed4cd9d0 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -891,7 +891,7 @@ def test_operators_timedelta64(self): mixed['F'] = Timestamp('20130101') # results in an object array - from pandas.core.tools.timedeltas import ( + from pandas._libs.tslibs.timedeltas import ( _coerce_scalar_to_timedelta_type as _coerce) result = mixed.min() diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 17c818779c76d..08341b2e2ed1a 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -6,10 +6,13 @@ import pandas as pd import pandas.util.testing as tm -from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct -from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series, - to_timedelta, compat) + +from pandas import TimedeltaIndex, timedelta_range, Series, compat from pandas._libs.tslib import iNaT, NaT +from pandas._libs.tslibs.timedeltas import ( + Timedelta, + to_timedelta, + _coerce_scalar_to_timedelta_type as ct) class TestTimedeltaArithmetic(object):
Moves remaining contents of core.tools.timedeltas to tslibs.timedeltas. Several can be cut/paste directly. A few changes were made in moving `_convert_listlike` and `to_timedelta` up into tslibs.timedeltas. - instead of using `is_list_like` (not implemented in cython), directly exclude str-like case and then check `hasattr(arg, '__iter__')`. - instead of using `is_timedelta64_dtype(arg)`, check that arg is an ndarray and then use `arg.dtype.kind == 'm'`. Analogous for `is_integer_dtype`. Updates imports.
https://api.github.com/repos/pandas-dev/pandas/pulls/18194
2017-11-09T16:54:55Z
2017-11-09T17:57:18Z
null
2017-11-12T23:04:39Z
DOC: Remove vendored IPython.sphinext
diff --git a/.gitignore b/.gitignore index ff0a6aef47163..0c2058ffcdd71 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ doc/build/html/index.html doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ +doc/source/savefig/ diff --git a/doc/source/conf.py b/doc/source/conf.py index e006f1809da5a..6d85f64317e7c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -56,9 +56,8 @@ 'sphinx.ext.extlinks', 'sphinx.ext.todo', 'numpydoc', # used to parse numpy-style docstrings for autodoc - 'ipython_sphinxext.ipython_directive', - 'ipython_sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_console_highlighting', # lowercase didn't work + 'IPython.sphinxext.ipython_directive', + 'IPython.sphinxext.ipython_console_highlighting', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', diff --git a/doc/source/whatsnew/v0.7.3.txt b/doc/source/whatsnew/v0.7.3.txt index 6b5199c55cbf5..77cc72d8707cf 100644 --- a/doc/source/whatsnew/v0.7.3.txt +++ b/doc/source/whatsnew/v0.7.3.txt @@ -22,7 +22,7 @@ New features from pandas.tools.plotting import scatter_matrix scatter_matrix(df, alpha=0.2) -.. image:: _static/scatter_matrix_kde.png +.. image:: savefig/scatter_matrix_kde.png :width: 5in - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for @@ -32,14 +32,14 @@ New features df.plot(kind='bar', stacked=True) -.. image:: _static/bar_plot_stacked_ex.png +.. image:: savefig/bar_plot_stacked_ex.png :width: 4in .. code-block:: python df.plot(kind='barh', stacked=True) -.. image:: _static/barh_plot_stacked_ex.png +.. image:: savefig/barh_plot_stacked_ex.png :width: 4in - Add log x and y :ref:`scaling options <visualization.basic>` to diff --git a/doc/sphinxext/ipython_sphinxext/__init__.py b/doc/sphinxext/ipython_sphinxext/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py deleted file mode 100644 index dfb489e49394d..0000000000000 --- a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py +++ /dev/null @@ -1,116 +0,0 @@ -"""reST directive for syntax-highlighting ipython interactive sessions. - -XXX - See what improvements can be made based on the new (as of Sept 2009) -'pycon' lexer for the python console. At the very least it will give better -highlighted tracebacks. -""" - -#----------------------------------------------------------------------------- -# Needed modules - -# Standard library -import re - -# Third party -from pygments.lexer import Lexer, do_insertions -from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, - PythonTracebackLexer) -from pygments.token import Comment, Generic - -from sphinx import highlighting - -#----------------------------------------------------------------------------- -# Global constants -line_re = re.compile('.*?\n') - -#----------------------------------------------------------------------------- -# Code begins - classes and functions - - -class IPythonConsoleLexer(Lexer): - - """ - For IPython console output or doctests, such as: - - .. sourcecode:: ipython - - In [1]: a = 'foo' - - In [2]: a - Out[2]: 'foo' - - In [3]: print(a) - foo - - In [4]: 1 / 0 - - Notes: - - - Tracebacks are not currently supported. - - - It assumes the default IPython prompts, not customized ones. - """ - - name = 'IPython console session' - aliases = ['ipython'] - mimetypes = ['text/x-ipython-console'] - input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") - output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") - continue_prompt = re.compile(" \.\.\.+:") - tb_start = re.compile("\-+") - - def get_tokens_unprocessed(self, text): - pylexer = PythonLexer(**self.options) - tblexer = PythonTracebackLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - input_prompt = self.input_prompt.match(line) - continue_prompt = self.continue_prompt.match(line.rstrip()) - output_prompt = self.output_prompt.match(line) - if line.startswith("#"): - insertions.append((len(curcode), - [(0, Comment, line)])) - elif input_prompt is not None: - insertions.append((len(curcode), - [(0, Generic.Prompt, input_prompt.group())])) - curcode += line[input_prompt.end():] - elif continue_prompt is not None: - insertions.append((len(curcode), - [(0, Generic.Prompt, continue_prompt.group())])) - curcode += line[continue_prompt.end():] - elif output_prompt is not None: - # Use the 'error' token for output. We should probably make - # our own token, but error is typicaly in a bright color like - # red, so it works fine for our output prompts. - insertions.append((len(curcode), - [(0, Generic.Error, output_prompt.group())])) - curcode += line[output_prompt.end():] - else: - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - - -def setup(app): - """Setup as a sphinx extension.""" - - # This is only a lexer, so adding it below to pygments appears sufficient. - # But if somebody knows that the right API usage should be to do that via - # sphinx, by all means fix it here. At least having this setup.py - # suppresses the sphinx warning we'd get without it. - pass - -#----------------------------------------------------------------------------- -# Register the extension as a valid pygments lexer -highlighting.lexers['ipython'] = IPythonConsoleLexer() diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py deleted file mode 100644 index 922767a8e2d46..0000000000000 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ /dev/null @@ -1,1091 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Sphinx directive to support embedded IPython code. - -This directive allows pasting of entire interactive IPython sessions, prompts -and all, and their code will actually get re-executed at doc build time, with -all prompts renumbered sequentially. It also allows you to input code as a pure -python input by giving the argument python to the directive. The output looks -like an interactive ipython section. - -To enable this directive, simply list it in your Sphinx ``conf.py`` file -(making sure the directory where you placed it is visible to sphinx, as is -needed for all Sphinx directives). For example, to enable syntax highlighting -and the IPython directive:: - - extensions = ['IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive'] - -The IPython directive outputs code-blocks with the language 'ipython'. So -if you do not have the syntax highlighting extension enabled as well, then -all rendered code-blocks will be uncolored. By default this directive assumes -that your prompts are unchanged IPython ones, but this can be customized. -The configurable options that can be placed in conf.py are: - -ipython_savefig_dir: - The directory in which to save the figures. This is relative to the - Sphinx source directory. The default is `html_static_path`. -ipython_rgxin: - The compiled regular expression to denote the start of IPython input - lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You - shouldn't need to change this. -ipython_rgxout: - The compiled regular expression to denote the start of IPython output - lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You - shouldn't need to change this. -ipython_promptin: - The string to represent the IPython input prompt in the generated ReST. - The default is 'In [%d]:'. This expects that the line numbers are used - in the prompt. -ipython_promptout: - The string to represent the IPython prompt in the generated ReST. The - default is 'Out [%d]:'. This expects that the line numbers are used - in the prompt. -ipython_mplbackend: - The string which specifies if the embedded Sphinx shell should import - Matplotlib and set the backend. The value specifies a backend that is - passed to `matplotlib.use()` before any lines in `ipython_execlines` are - executed. If not specified in conf.py, then the default value of 'agg' is - used. To use the IPython directive without matplotlib as a dependency, set - the value to `None`. It may end up that matplotlib is still imported - if the user specifies so in `ipython_execlines` or makes use of the - @savefig pseudo decorator. -ipython_execlines: - A list of strings to be exec'd in the embedded Sphinx shell. Typical - usage is to make certain packages always available. Set this to an empty - list if you wish to have no imports always available. If specified in - conf.py as `None`, then it has the effect of making no imports available. - If omitted from conf.py altogether, then the default value of - ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. -ipython_holdcount - When the @suppress pseudo-decorator is used, the execution count can be - incremented or not. The default behavior is to hold the execution count, - corresponding to a value of `True`. Set this to `False` to increment - the execution count after each suppressed command. - -As an example, to use the IPython directive when `matplotlib` is not available, -one sets the backend to `None`:: - - ipython_mplbackend = None - -An example usage of the directive is: - -.. code-block:: rst - - .. ipython:: - - In [1]: x = 1 - - In [2]: y = x**2 - - In [3]: print(y) - -See http://matplotlib.org/sampledoc/ipython_directive.html for additional -documentation. - -ToDo ----- - -- Turn the ad-hoc test() function into a real test suite. -- Break up ipython-specific functionality from matplotlib stuff into better - separated code. - -Authors -------- - -- John D Hunter: orignal author. -- Fernando Perez: refactoring, documentation, cleanups, port to 0.11. -- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. -- Skipper Seabold, refactoring, cleanups, pure python addition -""" -from __future__ import print_function -from __future__ import unicode_literals - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -# Stdlib -import os -import re -import sys -import tempfile -import ast -from pandas.compat import zip, range, map, lmap, u, text_type, cStringIO as StringIO -import warnings - -# To keep compatibility with various python versions -try: - from hashlib import md5 -except ImportError: - from md5 import md5 - -# Third-party -import sphinx -from docutils.parsers.rst import directives -from docutils import nodes -from sphinx.util.compat import Directive - -# Our own -try: - from traitlets.config import Config -except ImportError: - from IPython import Config -from IPython import InteractiveShell -from IPython.core.profiledir import ProfileDir -from IPython.utils import io -from IPython.utils.py3compat import PY3 - -if PY3: - from io import StringIO -else: - from StringIO import StringIO - -#----------------------------------------------------------------------------- -# Globals -#----------------------------------------------------------------------------- -# for tokenizing blocks -COMMENT, INPUT, OUTPUT = range(3) - -#----------------------------------------------------------------------------- -# Functions and class declarations -#----------------------------------------------------------------------------- - -def block_parser(part, rgxin, rgxout, fmtin, fmtout): - """ - part is a string of ipython text, comprised of at most one - input, one ouput, comments, and blank lines. The block parser - parses the text into a list of:: - - blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] - - where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and - data is, depending on the type of token:: - - COMMENT : the comment string - - INPUT: the (DECORATOR, INPUT_LINE, REST) where - DECORATOR: the input decorator (or None) - INPUT_LINE: the input as string (possibly multi-line) - REST : any stdout generated by the input line (not OUTPUT) - - OUTPUT: the output string, possibly multi-line - - """ - block = [] - lines = part.split('\n') - N = len(lines) - i = 0 - decorator = None - while 1: - - if i==N: - # nothing left to parse -- the last line - break - - line = lines[i] - i += 1 - line_stripped = line.strip() - if line_stripped.startswith('#'): - block.append((COMMENT, line)) - continue - - if line_stripped.startswith('@'): - # we're assuming at most one decorator -- may need to - # rethink - decorator = line_stripped - continue - - # does this look like an input line? - matchin = rgxin.match(line) - if matchin: - lineno, inputline = int(matchin.group(1)), matchin.group(2) - - # the ....: continuation string - continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) - Nc = len(continuation) - # input lines can continue on for more than one line, if - # we have a '\' line continuation char or a function call - # echo line 'print'. The input line can only be - # terminated by the end of the block or an output line, so - # we parse out the rest of the input line if it is - # multiline as well as any echo text - - rest = [] - while i<N: - - # look ahead; if the next line is blank, or a comment, or - # an output line, we're done - - nextline = lines[i] - matchout = rgxout.match(nextline) - #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) - if matchout or nextline.startswith('#'): - break - elif nextline.startswith(continuation): - nextline = nextline[Nc:] - if nextline and nextline[0] == ' ': - nextline = nextline[1:] - - inputline += '\n' + nextline - - else: - rest.append(nextline) - i+= 1 - - block.append((INPUT, (decorator, inputline, '\n'.join(rest)))) - continue - - # if it looks like an output line grab all the text to the end - # of the block - matchout = rgxout.match(line) - if matchout: - lineno, output = int(matchout.group(1)), matchout.group(2) - if i<N-1: - output = '\n'.join([output] + lines[i:]) - - block.append((OUTPUT, output)) - break - - return block - - -class DecodingStringIO(StringIO, object): - def __init__(self,buf='',encodings=('utf8',), *args, **kwds): - super(DecodingStringIO, self).__init__(buf, *args, **kwds) - self.set_encodings(encodings) - - def set_encodings(self, encodings): - self.encodings = encodings - - def write(self,data): - if isinstance(data, text_type): - return super(DecodingStringIO, self).write(data) - else: - for enc in self.encodings: - try: - data = data.decode(enc) - return super(DecodingStringIO, self).write(data) - except : - pass - # default to brute utf8 if no encoding succeded - return super(DecodingStringIO, self).write(data.decode('utf8', 'replace')) - - -class EmbeddedSphinxShell(object): - """An embedded IPython instance to run inside Sphinx""" - - def __init__(self, exec_lines=None,state=None): - - self.cout = DecodingStringIO(u'') - - if exec_lines is None: - exec_lines = [] - - self.state = state - - # Create config object for IPython - config = Config() - config.InteractiveShell.autocall = False - config.InteractiveShell.autoindent = False - config.InteractiveShell.colors = 'NoColor' - - # create a profile so instance history isn't saved - tmp_profile_dir = tempfile.mkdtemp(prefix='profile_') - profname = 'auto_profile_sphinx_build' - pdir = os.path.join(tmp_profile_dir,profname) - profile = ProfileDir.create_profile_dir(pdir) - - # Create and initialize global ipython, but don't start its mainloop. - # This will persist across different EmbededSphinxShell instances. - IP = InteractiveShell.instance(config=config, profile_dir=profile) - - # io.stdout redirect must be done after instantiating InteractiveShell - io.stdout = self.cout - io.stderr = self.cout - - # For debugging, so we can see normal output, use this: - #from IPython.utils.io import Tee - #io.stdout = Tee(self.cout, channel='stdout') # dbg - #io.stderr = Tee(self.cout, channel='stderr') # dbg - - # Store a few parts of IPython we'll need. - self.IP = IP - self.user_ns = self.IP.user_ns - self.user_global_ns = self.IP.user_global_ns - - self.input = '' - self.output = '' - - self.is_verbatim = False - self.is_doctest = False - self.is_suppress = False - - # Optionally, provide more detailed information to shell. - self.directive = None - - # on the first call to the savefig decorator, we'll import - # pyplot as plt so we can make a call to the plt.gcf().savefig - self._pyplot_imported = False - - # Prepopulate the namespace. - for line in exec_lines: - self.process_input_line(line, store_history=False) - - def clear_cout(self): - self.cout.seek(0) - self.cout.truncate(0) - - def process_input_line(self, line, store_history=True): - """process the input, capturing stdout""" - - stdout = sys.stdout - splitter = self.IP.input_splitter - try: - sys.stdout = self.cout - splitter.push(line) - more = splitter.push_accepts_more() - if not more: - try: - source_raw = splitter.source_raw_reset()[1] - except: - # recent ipython #4504 - source_raw = splitter.raw_reset() - self.IP.run_cell(source_raw, store_history=store_history) - finally: - sys.stdout = stdout - - def process_image(self, decorator): - """ - # build out an image directive like - # .. image:: somefile.png - # :width 4in - # - # from an input like - # savefig somefile.png width=4in - """ - savefig_dir = self.savefig_dir - source_dir = self.source_dir - saveargs = decorator.split(' ') - filename = saveargs[1] - # insert relative path to image file in source - outfile = os.path.relpath(os.path.join(savefig_dir,filename), - source_dir) - - imagerows = ['.. image:: %s'%outfile] - - for kwarg in saveargs[2:]: - arg, val = kwarg.split('=') - arg = arg.strip() - val = val.strip() - imagerows.append(' :%s: %s'%(arg, val)) - - image_file = os.path.basename(outfile) # only return file name - image_directive = '\n'.join(imagerows) - return image_file, image_directive - - # Callbacks for each type of token - def process_input(self, data, input_prompt, lineno): - """ - Process data block for INPUT token. - - """ - decorator, input, rest = data - image_file = None - image_directive = None - - is_verbatim = decorator=='@verbatim' or self.is_verbatim - is_doctest = (decorator is not None and \ - decorator.startswith('@doctest')) or self.is_doctest - is_suppress = decorator=='@suppress' or self.is_suppress - is_okexcept = decorator=='@okexcept' or self.is_okexcept - is_okwarning = decorator=='@okwarning' or self.is_okwarning - is_savefig = decorator is not None and \ - decorator.startswith('@savefig') - - # set the encodings to be used by DecodingStringIO - # to convert the execution output into unicode if - # needed. this attrib is set by IpythonDirective.run() - # based on the specified block options, defaulting to ['ut - self.cout.set_encodings(self.output_encoding) - - input_lines = input.split('\n') - - if len(input_lines) > 1: - if input_lines[-1] != "": - input_lines.append('') # make sure there's a blank line - # so splitter buffer gets reset - - continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) - - if is_savefig: - image_file, image_directive = self.process_image(decorator) - - ret = [] - is_semicolon = False - - # Hold the execution count, if requested to do so. - if is_suppress and self.hold_count: - store_history = False - else: - store_history = True - - # Note: catch_warnings is not thread safe - with warnings.catch_warnings(record=True) as ws: - for i, line in enumerate(input_lines): - if line.endswith(';'): - is_semicolon = True - - if i == 0: - # process the first input line - if is_verbatim: - self.process_input_line('') - self.IP.execution_count += 1 # increment it anyway - else: - # only submit the line in non-verbatim mode - self.process_input_line(line, store_history=store_history) - formatted_line = '%s %s'%(input_prompt, line) - else: - # process a continuation line - if not is_verbatim: - self.process_input_line(line, store_history=store_history) - - formatted_line = '%s %s'%(continuation, line) - - if not is_suppress: - ret.append(formatted_line) - - if not is_suppress and len(rest.strip()) and is_verbatim: - # the "rest" is the standard output of the - # input, which needs to be added in - # verbatim mode - ret.append(rest) - - self.cout.seek(0) - output = self.cout.read() - if not is_suppress and not is_semicolon: - ret.append(output) - elif is_semicolon: # get spacing right - ret.append('') - - # context information - filename = self.state.document.current_source - lineno = self.state.document.current_line - - # output any exceptions raised during execution to stdout - # unless :okexcept: has been specified. - if not is_okexcept and "Traceback" in output: - s = "\nException in %s at block ending on line %s\n" % (filename, lineno) - s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" - sys.stdout.write('\n\n>>>' + ('-' * 73)) - sys.stdout.write(s) - sys.stdout.write(output) - sys.stdout.write('<<<' + ('-' * 73) + '\n\n') - - # output any warning raised during execution to stdout - # unless :okwarning: has been specified. - if not is_okwarning: - for w in ws: - s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno) - s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" - sys.stdout.write('\n\n>>>' + ('-' * 73)) - sys.stdout.write(s) - sys.stdout.write('-' * 76 + '\n') - s=warnings.formatwarning(w.message, w.category, - w.filename, w.lineno, w.line) - sys.stdout.write(s) - sys.stdout.write('<<<' + ('-' * 73) + '\n') - - self.cout.truncate(0) - return (ret, input_lines, output, is_doctest, decorator, image_file, - image_directive) - - - def process_output(self, data, output_prompt, - input_lines, output, is_doctest, decorator, image_file): - """ - Process data block for OUTPUT token. - - """ - TAB = ' ' * 4 - - if is_doctest and output is not None: - - found = output - found = found.strip() - submitted = data.strip() - - if self.directive is None: - source = 'Unavailable' - content = 'Unavailable' - else: - source = self.directive.state.document.current_source - content = self.directive.content - # Add tabs and join into a single string. - content = '\n'.join([TAB + line for line in content]) - - # Make sure the output contains the output prompt. - ind = found.find(output_prompt) - if ind < 0: - e = ('output does not contain output prompt\n\n' - 'Document source: {0}\n\n' - 'Raw content: \n{1}\n\n' - 'Input line(s):\n{TAB}{2}\n\n' - 'Output line(s):\n{TAB}{3}\n\n') - e = e.format(source, content, '\n'.join(input_lines), - repr(found), TAB=TAB) - raise RuntimeError(e) - found = found[len(output_prompt):].strip() - - # Handle the actual doctest comparison. - if decorator.strip() == '@doctest': - # Standard doctest - if found != submitted: - e = ('doctest failure\n\n' - 'Document source: {0}\n\n' - 'Raw content: \n{1}\n\n' - 'On input line(s):\n{TAB}{2}\n\n' - 'we found output:\n{TAB}{3}\n\n' - 'instead of the expected:\n{TAB}{4}\n\n') - e = e.format(source, content, '\n'.join(input_lines), - repr(found), repr(submitted), TAB=TAB) - raise RuntimeError(e) - else: - self.custom_doctest(decorator, input_lines, found, submitted) - - def process_comment(self, data): - """Process data fPblock for COMMENT token.""" - if not self.is_suppress: - return [data] - - def save_image(self, image_file): - """ - Saves the image file to disk. - """ - self.ensure_pyplot() - command = ('plt.gcf().savefig("%s", bbox_inches="tight", ' - 'dpi=100)' % image_file) - - #print 'SAVEFIG', command # dbg - self.process_input_line('bookmark ipy_thisdir', store_history=False) - self.process_input_line('cd -b ipy_savedir', store_history=False) - self.process_input_line(command, store_history=False) - self.process_input_line('cd -b ipy_thisdir', store_history=False) - self.process_input_line('bookmark -d ipy_thisdir', store_history=False) - self.clear_cout() - - def process_block(self, block): - """ - process block from the block_parser and return a list of processed lines - """ - ret = [] - output = None - input_lines = None - lineno = self.IP.execution_count - - input_prompt = self.promptin % lineno - output_prompt = self.promptout % lineno - image_file = None - image_directive = None - - for token, data in block: - if token == COMMENT: - out_data = self.process_comment(data) - elif token == INPUT: - (out_data, input_lines, output, is_doctest, decorator, - image_file, image_directive) = \ - self.process_input(data, input_prompt, lineno) - elif token == OUTPUT: - out_data = \ - self.process_output(data, output_prompt, - input_lines, output, is_doctest, - decorator, image_file) - if out_data: - ret.extend(out_data) - - # save the image files - if image_file is not None: - self.save_image(image_file) - - return ret, image_directive - - def ensure_pyplot(self): - """ - Ensures that pyplot has been imported into the embedded IPython shell. - - Also, makes sure to set the backend appropriately if not set already. - - """ - # We are here if the @figure pseudo decorator was used. Thus, it's - # possible that we could be here even if python_mplbackend were set to - # `None`. That's also strange and perhaps worthy of raising an - # exception, but for now, we just set the backend to 'agg'. - - if not self._pyplot_imported: - if 'matplotlib.backends' not in sys.modules: - # Then ipython_matplotlib was set to None but there was a - # call to the @figure decorator (and ipython_execlines did - # not set a backend). - #raise Exception("No backend was set, but @figure was used!") - import matplotlib - matplotlib.use('agg') - - # Always import pyplot into embedded shell. - self.process_input_line('import matplotlib.pyplot as plt', - store_history=False) - self._pyplot_imported = True - - def process_pure_python(self, content): - """ - content is a list of strings. it is unedited directive content - - This runs it line by line in the InteractiveShell, prepends - prompts as needed capturing stderr and stdout, then returns - the content as a list as if it were ipython code - """ - output = [] - savefig = False # keep up with this to clear figure - multiline = False # to handle line continuation - multiline_start = None - fmtin = self.promptin - - ct = 0 - - for lineno, line in enumerate(content): - - line_stripped = line.strip() - if not len(line): - output.append(line) - continue - - # handle decorators - if line_stripped.startswith('@'): - output.extend([line]) - if 'savefig' in line: - savefig = True # and need to clear figure - continue - - # handle comments - if line_stripped.startswith('#'): - output.extend([line]) - continue - - # deal with lines checking for multiline - continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2)) - if not multiline: - modified = u"%s %s" % (fmtin % ct, line_stripped) - output.append(modified) - ct += 1 - try: - ast.parse(line_stripped) - output.append(u'') - except Exception: # on a multiline - multiline = True - multiline_start = lineno - else: # still on a multiline - modified = u'%s %s' % (continuation, line) - output.append(modified) - - # if the next line is indented, it should be part of multiline - if len(content) > lineno + 1: - nextline = content[lineno + 1] - if len(nextline) - len(nextline.lstrip()) > 3: - continue - try: - mod = ast.parse( - '\n'.join(content[multiline_start:lineno+1])) - if isinstance(mod.body[0], ast.FunctionDef): - # check to see if we have the whole function - for element in mod.body[0].body: - if isinstance(element, ast.Return): - multiline = False - else: - output.append(u'') - multiline = False - except Exception: - pass - - if savefig: # clear figure if plotted - self.ensure_pyplot() - self.process_input_line('plt.clf()', store_history=False) - self.clear_cout() - savefig = False - - return output - - def custom_doctest(self, decorator, input_lines, found, submitted): - """ - Perform a specialized doctest. - - """ - from .custom_doctests import doctests - - args = decorator.split() - doctest_type = args[1] - if doctest_type in doctests: - doctests[doctest_type](self, args, input_lines, found, submitted) - else: - e = "Invalid option to @doctest: {0}".format(doctest_type) - raise Exception(e) - - -class IPythonDirective(Directive): - - has_content = True - required_arguments = 0 - optional_arguments = 4 # python, suppress, verbatim, doctest - final_argumuent_whitespace = True - option_spec = { 'python': directives.unchanged, - 'suppress' : directives.flag, - 'verbatim' : directives.flag, - 'doctest' : directives.flag, - 'okexcept': directives.flag, - 'okwarning': directives.flag, - 'output_encoding': directives.unchanged_required - } - - shell = None - - seen_docs = set() - - def get_config_options(self): - # contains sphinx configuration variables - config = self.state.document.settings.env.config - - # get config variables to set figure output directory - confdir = self.state.document.settings.env.app.confdir - savefig_dir = config.ipython_savefig_dir - source_dir = os.path.dirname(self.state.document.current_source) - if savefig_dir is None: - savefig_dir = config.html_static_path - if isinstance(savefig_dir, list): - savefig_dir = savefig_dir[0] # safe to assume only one path? - savefig_dir = os.path.join(confdir, savefig_dir) - - # get regex and prompt stuff - rgxin = config.ipython_rgxin - rgxout = config.ipython_rgxout - promptin = config.ipython_promptin - promptout = config.ipython_promptout - mplbackend = config.ipython_mplbackend - exec_lines = config.ipython_execlines - hold_count = config.ipython_holdcount - - return (savefig_dir, source_dir, rgxin, rgxout, - promptin, promptout, mplbackend, exec_lines, hold_count) - - def setup(self): - # Get configuration values. - (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, - mplbackend, exec_lines, hold_count) = self.get_config_options() - - if self.shell is None: - # We will be here many times. However, when the - # EmbeddedSphinxShell is created, its interactive shell member - # is the same for each instance. - - if mplbackend and 'matplotlib.backends' not in sys.modules: - import matplotlib - # Repeated calls to use() will not hurt us since `mplbackend` - # is the same each time. - matplotlib.use(mplbackend) - - # Must be called after (potentially) importing matplotlib and - # setting its backend since exec_lines might import pylab. - self.shell = EmbeddedSphinxShell(exec_lines, self.state) - - # Store IPython directive to enable better error messages - self.shell.directive = self - - # reset the execution count if we haven't processed this doc - #NOTE: this may be borked if there are multiple seen_doc tmp files - #check time stamp? - if self.state.document.current_source not in self.seen_docs: - self.shell.IP.history_manager.reset() - self.shell.IP.execution_count = 1 - try: - self.shell.IP.prompt_manager.width = 0 - except AttributeError: - # GH14003: class promptManager has removed after IPython 5.x - pass - self.seen_docs.add(self.state.document.current_source) - - # and attach to shell so we don't have to pass them around - self.shell.rgxin = rgxin - self.shell.rgxout = rgxout - self.shell.promptin = promptin - self.shell.promptout = promptout - self.shell.savefig_dir = savefig_dir - self.shell.source_dir = source_dir - self.shell.hold_count = hold_count - - # setup bookmark for saving figures directory - self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir, - store_history=False) - self.shell.clear_cout() - - return rgxin, rgxout, promptin, promptout - - def teardown(self): - # delete last bookmark - self.shell.process_input_line('bookmark -d ipy_savedir', - store_history=False) - self.shell.clear_cout() - - def run(self): - debug = False - - #TODO, any reason block_parser can't be a method of embeddable shell - # then we wouldn't have to carry these around - rgxin, rgxout, promptin, promptout = self.setup() - - options = self.options - self.shell.is_suppress = 'suppress' in options - self.shell.is_doctest = 'doctest' in options - self.shell.is_verbatim = 'verbatim' in options - self.shell.is_okexcept = 'okexcept' in options - self.shell.is_okwarning = 'okwarning' in options - - self.shell.output_encoding = [options.get('output_encoding', 'utf8')] - - # handle pure python code - if 'python' in self.arguments: - content = self.content - self.content = self.shell.process_pure_python(content) - - parts = '\n'.join(self.content).split('\n\n') - - lines = ['.. code-block:: ipython', ''] - figures = [] - - for part in parts: - block = block_parser(part, rgxin, rgxout, promptin, promptout) - if len(block): - rows, figure = self.shell.process_block(block) - for row in rows: - lines.extend([' %s'%line for line in row.split('\n')]) - - if figure is not None: - figures.append(figure) - - for figure in figures: - lines.append('') - lines.extend(figure.split('\n')) - lines.append('') - - if len(lines)>2: - if debug: - print('\n'.join(lines)) - else: - # This has to do with input, not output. But if we comment - # these lines out, then no IPython code will appear in the - # final output. - self.state_machine.insert_input( - lines, self.state_machine.input_lines.source(0)) - - # cleanup - self.teardown() - - return [] - -# Enable as a proper Sphinx directive -def setup(app): - setup.app = app - - app.add_directive('ipython', IPythonDirective) - app.add_config_value('ipython_savefig_dir', None, 'env') - app.add_config_value('ipython_rgxin', - re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env') - app.add_config_value('ipython_rgxout', - re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env') - app.add_config_value('ipython_promptin', 'In [%d]:', 'env') - app.add_config_value('ipython_promptout', 'Out[%d]:', 'env') - - # We could just let matplotlib pick whatever is specified as the default - # backend in the matplotlibrc file, but this would cause issues if the - # backend didn't work in headless environments. For this reason, 'agg' - # is a good default backend choice. - app.add_config_value('ipython_mplbackend', 'agg', 'env') - - # If the user sets this config value to `None`, then EmbeddedSphinxShell's - # __init__ method will treat it as []. - execlines = ['import numpy as np', 'import matplotlib.pyplot as plt'] - app.add_config_value('ipython_execlines', execlines, 'env') - - app.add_config_value('ipython_holdcount', True, 'env') - -# Simple smoke test, needs to be converted to a proper automatic test. -def test(): - - examples = [ - r""" -In [9]: pwd -Out[9]: '/home/jdhunter/py4science/book' - -In [10]: cd bookdata/ -/home/jdhunter/py4science/book/bookdata - -In [2]: from pylab import * - -In [2]: ion() - -In [3]: im = imread('stinkbug.png') - -@savefig mystinkbug.png width=4in -In [4]: imshow(im) -Out[4]: <matplotlib.image.AxesImage object at 0x39ea850> - -""", - r""" - -In [1]: x = 'hello world' - -# string methods can be -# used to alter the string -@doctest -In [2]: x.upper() -Out[2]: 'HELLO WORLD' - -@verbatim -In [3]: x.st<TAB> -x.startswith x.strip -""", - r""" - -In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ - .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' - -In [131]: print url.split('&') -['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] - -In [60]: import urllib - -""", - r"""\ - -In [133]: import numpy.random - -@suppress -In [134]: numpy.random.seed(2358) - -@doctest -In [135]: numpy.random.rand(10,2) -Out[135]: -array([[ 0.64524308, 0.59943846], - [ 0.47102322, 0.8715456 ], - [ 0.29370834, 0.74776844], - [ 0.99539577, 0.1313423 ], - [ 0.16250302, 0.21103583], - [ 0.81626524, 0.1312433 ], - [ 0.67338089, 0.72302393], - [ 0.7566368 , 0.07033696], - [ 0.22591016, 0.77731835], - [ 0.0072729 , 0.34273127]]) - -""", - - r""" -In [106]: print x -jdh - -In [109]: for i in range(10): - .....: print i - .....: - .....: -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -""", - - r""" - -In [144]: from pylab import * - -In [145]: ion() - -# use a semicolon to suppress the output -@savefig test_hist.png width=4in -In [151]: hist(np.random.randn(10000), 100); - - -@savefig test_plot.png width=4in -In [151]: plot(np.random.randn(10000), 'o'); - """, - - r""" -# use a semicolon to suppress the output -In [151]: plt.clf() - -@savefig plot_simple.png width=4in -In [151]: plot([1,2,3]) - -@savefig hist_simple.png width=4in -In [151]: hist(np.random.randn(10000), 100); - -""", - r""" -# update the current fig -In [151]: ylabel('number') - -In [152]: title('normal distribution') - - -@savefig hist_with_text.png -In [153]: grid(True) - -@doctest float -In [154]: 0.1 + 0.2 -Out[154]: 0.3 - -@doctest float -In [155]: np.arange(16).reshape(4,4) -Out[155]: -array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - -In [1]: x = np.arange(16, dtype=float).reshape(4,4) - -In [2]: x[0,0] = np.inf - -In [3]: x[0,1] = np.nan - -@doctest float -In [4]: x -Out[4]: -array([[ inf, nan, 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - - - """, - ] - # skip local-file depending first example: - examples = examples[1:] - - #ipython_directive.DEBUG = True # dbg - #options = dict(suppress=True) # dbg - options = dict() - for example in examples: - content = example.split('\n') - IPythonDirective('debug', arguments=None, options=options, - content=content, lineno=0, - content_offset=None, block_text=None, - state=None, state_machine=None, - ) - -# Run test suite as a script -if __name__=='__main__': - if not os.path.isdir('_static'): - os.mkdir('_static') - test() - print('All OK? Check figures in _static/')
https://api.github.com/repos/pandas-dev/pandas/pulls/18193
2017-11-09T16:51:43Z
2017-11-09T22:48:06Z
2017-11-09T22:48:06Z
2017-12-20T16:10:46Z
Fix for #18178 and #18187 by changing the concat of empty RangeIndex
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index ffabc7dfe81ac..185f08514641f 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -61,6 +61,7 @@ Bug Fixes - Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) - Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) - Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) +- Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4e15aa50e4319..dc4d819383dfb 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -574,9 +574,10 @@ def _concat_rangeindex_same_dtype(indexes): start = step = next = None - for obj in indexes: - if not len(obj): - continue + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: if start is None: # This is set by the first non-empty index @@ -599,8 +600,16 @@ def _concat_rangeindex_same_dtype(indexes): if step is not None: next = obj[-1] + step - if start is None: + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1]._stop if next is None else next + else: + # Here all "indexes" had 0 length, i.e. were empty. + # Simply take start, stop, and step from the last empty index. + obj = indexes[-1] start = obj._start step = obj._step - stop = obj._stop if next is None else next + stop = obj._stop + return indexes[0].__class__(start, stop, step) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index c9c294e70e7b1..fd5b4611e58d6 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1983,3 +1983,21 @@ def test_concat_will_upcast(dt, pdt): pdt(np.array([5], dtype=dt, ndmin=dims))] x = pd.concat(dfs) assert x.values.dtype == 'float64' + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = pd.DataFrame({'foo': [1]}) + df2 = pd.DataFrame({'foo': []}) + expected = pd.DataFrame({'foo': [1.0]}) + result = pd.concat([df1, df2]) + assert_frame_equal(result, expected) + + +def test_concat_empty_and_non_empty_series_regression(): + # GH 18187 regression test + s1 = pd.Series([1]) + s2 = pd.Series([]) + expected = s1 + result = pd.concat([s1, s2]) + tm.assert_series_equal(result, expected)
The `_concat_rangeindex_same_dtype` function now keeps track of the last non-empty `RangeIndex` to extract the new stop value. This fixes two issues with concatenating non-empty and empty `DataFrames` and `Series`. Two regression tests were added as well. - closes #18178 and closes #18187 - 2 regression tests added - Submission passes `git diff master -u -- "*.py" | flake8 --diff` - 1 whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18191
2017-11-09T15:06:25Z
2017-11-10T13:53:43Z
2017-11-10T13:53:42Z
2017-12-11T20:24:29Z
Fix #17965 datetime64 comparison
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 793e9bf17bac9..d4db49b6d05aa 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -73,6 +73,7 @@ Conversion Indexing ^^^^^^^^ +- Bug in a boolean comparison of a ``datetime.datetime`` and a ``datetime64[ns]`` dtype Series (:issue:`17965`) - Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`) - - diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 78eb7b3ae483e..f5d8a0da0112b 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -19,7 +19,7 @@ from hashtable cimport HashTable from pandas._libs import algos, period as periodlib, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta -from datetime import datetime, timedelta +from datetime import datetime, timedelta, date from cpython cimport PyTuple_Check, PyList_Check @@ -549,7 +549,7 @@ cpdef convert_scalar(ndarray arr, object value): if arr.descr.type_num == NPY_DATETIME: if isinstance(value, np.ndarray): pass - elif isinstance(value, datetime): + elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value elif value is None or value != value: return iNaT diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 50ee88bd82f40..a9c26ebb90359 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -2,9 +2,10 @@ import pytest -from datetime import datetime +from datetime import datetime, date import numpy as np import pandas as pd +import operator as op from pandas import (DatetimeIndex, Series, DataFrame, date_range, Index, Timedelta, Timestamp) @@ -330,3 +331,21 @@ def test_loc_datetime_length_one(self): result = df.loc['2016-10-01T00:00:00':] tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize('datetimelike', [ + Timestamp('20130101'), datetime(2013, 1, 1), + date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) + @pytest.mark.parametrize('op,expected', [ + (op.lt, [True, False, False, False]), + (op.le, [True, True, False, False]), + (op.eq, [False, True, False, False]), + (op.gt, [False, False, False, True])]) + def test_selection_by_datetimelike(self, datetimelike, op, expected): + # GH issue #17965, test for ability to compare datetime64[ns] columns + # to datetimelike + df = DataFrame({'A': [pd.Timestamp('20120101'), + pd.Timestamp('20130101'), + np.nan, pd.Timestamp('20130103')]}) + result = op(df.A, datetimelike) + expected = Series(expected, name='A') + tm.assert_series_equal(result, expected)
- [x] closes #17965 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18188
2017-11-09T11:15:07Z
2017-11-14T13:04:35Z
2017-11-14T13:04:35Z
2017-12-11T20:23:38Z
sas7bdat: Check if the SAS file has zero variables
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 943b6bb84fb47..41b9fbe5a4838 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -90,6 +90,7 @@ Bug Fixes - Bug in ``pd.read_msgpack()`` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in ``DataFrame.groupby`` where key as tuple in a ``MultiIndex`` were interpreted as a list of keys (:issue:`17979`) - Bug in :func:`pd.read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) +- Bug in :func:`pd.read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` Conversion ^^^^^^^^^^ diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 2b3a91e2062b1..1d57093585ef2 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -17,6 +17,7 @@ import pandas as pd from pandas import compat from pandas.io.common import get_filepath_or_buffer, BaseIterator +from pandas.errors import EmptyDataError import numpy as np import struct import pandas.io.sas.sas_constants as const @@ -594,6 +595,9 @@ def read(self, nrows=None): elif nrows is None: nrows = self.row_count + if len(self.column_types) == 0: + raise EmptyDataError("No columns to parse from file") + if self._current_row_in_file_index >= self.row_count: return None diff --git a/pandas/tests/io/sas/data/zero_variables.sas7bdat b/pandas/tests/io/sas/data/zero_variables.sas7bdat new file mode 100644 index 0000000000000..85fec09447ec5 Binary files /dev/null and b/pandas/tests/io/sas/data/zero_variables.sas7bdat differ diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index c3fb85811ca2a..a5546b1198fc6 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -1,9 +1,11 @@ import pandas as pd from pandas.compat import PY2 import pandas.util.testing as tm +from pandas.errors import EmptyDataError import os import io import numpy as np +import pytest class TestSAS7BDAT(object): @@ -174,3 +176,11 @@ def test_date_time(): df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) tm.assert_frame_equal(df, df0) + + +def test_zero_variables(): + # Check if the SAS file has zero variables (PR #18184) + dirpath = tm.get_data_path() + fname = os.path.join(dirpath, "zero_variables.sas7bdat") + with pytest.raises(EmptyDataError): + pd.read_sas(fname)
If the given SAS file has 0 rows, throw an error for the EmptyData file. When reading, check that the column information is available. If not, throw an error. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18184
2017-11-09T07:34:06Z
2017-11-10T14:14:33Z
2017-11-10T14:14:33Z
2017-11-10T14:14:38Z
cleanup; use timedelta instead of relativedelta where possible
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5843aaa23be57..8e507f354efcf 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -5,15 +5,19 @@ import numpy as np from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod -from pandas.core.tools.datetimes import to_datetime, normalize_date +from pandas.core.tools.datetimes import to_datetime from pandas.core.common import AbstractMethodError # import after tools, dateutil check from dateutil.relativedelta import relativedelta, weekday from dateutil.easter import easter -from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta + from pandas.util._decorators import cache_readonly +from pandas._libs import tslib +from pandas._libs.tslib import (Timestamp, Timedelta, NaT, + OutOfBoundsDatetime, + normalize_date) from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.offsets import ( ApplyTypeError, @@ -49,7 +53,7 @@ def as_timestamp(obj): return obj try: return Timestamp(obj) - except (OutOfBoundsDatetime): + except OutOfBoundsDatetime: pass return obj @@ -57,8 +61,8 @@ def as_timestamp(obj): def apply_wraps(func): @functools.wraps(func) def wrapper(self, other): - if other is tslib.NaT: - return tslib.NaT + if other is NaT: + return NaT elif isinstance(other, (timedelta, Tick, DateOffset)): # timedelta path return func(self, other) @@ -86,8 +90,8 @@ def wrapper(self, other): if not isinstance(self, Nano) and result.nanosecond != nano: if result.tz is not None: # convert to UTC - value = tslib.tz_convert_single( - result.value, 'UTC', result.tz) + value = tslib.tz_convert_single(result.value, + 'UTC', result.tz) else: value = result.value result = Timestamp(value + nano) @@ -173,6 +177,7 @@ def __init__(self, n=1, normalize=False, **kwds): def apply(self, other): if self._use_relativedelta: other = as_datetime(other) + # TODO: Do we risk losing nanoseconds here? if len(self.kwds) > 0: tzinfo = getattr(other, 'tzinfo', None) @@ -231,8 +236,8 @@ def apply_index(self, i): weeks = (self.kwds.get('weeks', 0)) * self.n if weeks: - i = (i.to_period('W') + weeks).to_timestamp() + \ - i.to_perioddelta('W') + i = ((i.to_period('W') + weeks).to_timestamp() + + i.to_perioddelta('W')) timedelta_kwds = dict((k, v) for k, v in self.kwds.items() if k in ['days', 'hours', 'minutes', @@ -1002,12 +1007,12 @@ def apply(self, other): if not self.onOffset(other): _, days_in_month = tslib.monthrange(other.year, other.month) if 1 < other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n > 0: # rollforward so subtract 1 n -= 1 elif self.day_of_month < other.day < days_in_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n < 0: # rollforward in the negative direction so add 1 n += 1 @@ -1084,11 +1089,11 @@ def onOffset(self, dt): def _apply(self, n, other): # if other.day is not day_of_month move to day_of_month and update n if other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n > 0: n -= 1 elif other.day > self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = 1 else: @@ -1141,13 +1146,13 @@ def onOffset(self, dt): def _apply(self, n, other): # if other.day is not day_of_month move to day_of_month and update n if other.day < self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = -1 else: n -= 1 elif other.day > self.day_of_month: - other += relativedelta(day=self.day_of_month) + other = other.replace(day=self.day_of_month) if n == 0: n = 1 elif n < 0: @@ -1227,12 +1232,7 @@ def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False first_weekday, _ = tslib.monthrange(dt.year, dt.month) - if first_weekday == 5: - return dt.day == 3 - elif first_weekday == 6: - return dt.day == 2 - else: - return dt.day == 1 + return dt.day == _get_firstbday(first_weekday) class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): @@ -1965,8 +1965,8 @@ def _decrement(date): date.microsecond) def _rollf(date): - if date.month != self.month or\ - date.day < tslib.monthrange(date.year, date.month)[1]: + if (date.month != self.month or + date.day < tslib.monthrange(date.year, date.month)[1]): date = _increment(date) return date @@ -2133,9 +2133,9 @@ def _offset_lwom(self): return LastWeekOfMonth(n=1, weekday=self.weekday) def isAnchored(self): - return self.n == 1 \ - and self.startingMonth is not None \ - and self.weekday is not None + return (self.n == 1 and + self.startingMonth is not None and + self.weekday is not None) def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -2145,8 +2145,8 @@ def onOffset(self, dt): if self.variation == "nearest": # We have to check the year end of "this" cal year AND the previous - return year_end == dt or \ - self.get_year_end(dt - relativedelta(months=1)) == dt + return (year_end == dt or + self.get_year_end(dt - relativedelta(months=1)) == dt) else: return year_end == dt @@ -2224,10 +2224,10 @@ def get_year_end(self, dt): return self._get_year_end_last(dt) def get_target_month_end(self, dt): - target_month = datetime( - dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) + target_month = datetime(dt.year, self.startingMonth, 1, + tzinfo=dt.tzinfo) next_month_first_of = target_month + relativedelta(months=+1) - return next_month_first_of + relativedelta(days=-1) + return next_month_first_of + timedelta(days=-1) def _get_year_end_nearest(self, dt): target_date = self.get_target_month_end(dt) @@ -2243,8 +2243,8 @@ def _get_year_end_nearest(self, dt): return backward def _get_year_end_last(self, dt): - current_year = datetime( - dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) + current_year = datetime(dt.year, self.startingMonth, 1, + tzinfo=dt.tzinfo) return current_year + self._offset_lwom @property @@ -2276,17 +2276,15 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code): elif varion_code == "L": variation = "last" else: - raise ValueError( - "Unable to parse varion_code: {code}".format(code=varion_code)) + raise ValueError("Unable to parse varion_code: " + "{code}".format(code=varion_code)) startingMonth = _month_to_int[startingMonth_code] weekday = _weekday_to_int[weekday_code] - return { - "weekday": weekday, - "startingMonth": startingMonth, - "variation": variation, - } + return {"weekday": weekday, + "startingMonth": startingMonth, + "variation": variation} @classmethod def _from_name(cls, *args): @@ -2359,10 +2357,9 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, @cache_readonly def _offset(self): - return FY5253( - startingMonth=self.startingMonth, - weekday=self.weekday, - variation=self.variation) + return FY5253(startingMonth=self.startingMonth, + weekday=self.weekday, + variation=self.variation) def isAnchored(self): return self.n == 1 and self._offset.isAnchored() @@ -2382,7 +2379,7 @@ def apply(self, other): qtr_lens = self.get_weeks(other + self._offset) for weeks in qtr_lens: - start += relativedelta(weeks=weeks) + start += timedelta(weeks=weeks) if start > other: other = start n -= 1 @@ -2399,7 +2396,7 @@ def apply(self, other): qtr_lens = self.get_weeks(other) for weeks in reversed(qtr_lens): - end -= relativedelta(weeks=weeks) + end -= timedelta(weeks=weeks) if end < other: other = end n -= 1 @@ -2442,7 +2439,7 @@ def onOffset(self, dt): current = next_year_end for qtr_len in qtr_lens[0:4]: - current += relativedelta(weeks=qtr_len) + current += timedelta(weeks=qtr_len) if dt == current: return True return False @@ -2472,8 +2469,8 @@ class Easter(DateOffset): @apply_wraps def apply(self, other): currentEaster = easter(other.year) - currentEaster = datetime( - currentEaster.year, currentEaster.month, currentEaster.day) + currentEaster = datetime(currentEaster.year, + currentEaster.month, currentEaster.day) currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo) # NOTE: easter returns a datetime.date so we have to convert to type of
- Cleanup, flake8, modernize imports - Replace usage of `other + relativedelta(...)` with `other + timedelta(...)` or `other.replace(...)` where possible. Clearer and more performant. - Use _get_firstbday in one place that had previously duplicated its logic. ``` asv continuous -f 1.1 -E virtualenv master HEAD -b timeseries [...] before after ratio [8dac6331] [c6145385] - 31.4±0.2μs 26.7±0.3μs 0.85 timeseries.Offsets.time_custom_bday_decr - 14.4±0.1ms 12.0±0.2ms 0.84 timeseries.DatetimeIndex.time_infer_freq_none - 1.76s 1.45s 0.82 timeseries.Iteration.time_iter_periodindex - 53.8±0.2μs 42.2±0.2μs 0.78 timeseries.SemiMonthOffset.time_end_decr_n - 53.1±4μs 41.1±0.3μs 0.78 timeseries.SemiMonthOffset.time_begin_incr_n - 54.6±0.2μs 41.5±0.2μs 0.76 timeseries.SemiMonthOffset.time_begin_decr_n - 3.55±0.5ms 2.70±0.01ms 0.76 timeseries.ToDatetime.time_iso8601_nosep - 51.8±0.2μs 38.6±0.2μs 0.75 timeseries.SemiMonthOffset.time_begin_decr - 53.0±3μs 38.1±0.3μs 0.72 timeseries.SemiMonthOffset.time_end_incr_n - 47.5±0.3μs 34.0±0.3μs 0.72 timeseries.SemiMonthOffset.time_end_incr - 49.2±0.2μs 34.9±0.8μs 0.71 timeseries.SemiMonthOffset.time_begin_incr - 55.8±0.1μs 39.3±1μs 0.70 timeseries.SemiMonthOffset.time_end_decr - 43.6±0.5μs 30.7±0.2μs 0.70 timeseries.SemiMonthOffset.time_begin_apply - 43.6±0.4μs 30.6±0.1μs 0.70 timeseries.SemiMonthOffset.time_end_apply ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18183
2017-11-09T04:48:46Z
2017-11-10T20:43:01Z
null
2017-11-12T23:04:07Z
DEPS: require updated python-dateutil, openpyxl
diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml index c3d3d59f895c6..57748fef1a2e5 100644 --- a/ci/environment-dev.yaml +++ b/ci/environment-dev.yaml @@ -6,8 +6,8 @@ dependencies: - Cython - NumPy - moto - - pytest - - python-dateutil + - pytest>=3.1 + - python-dateutil>=2.5.0 - python=3 - pytz - setuptools diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build index 415df13179fcf..d1cc61df0a77c 100644 --- a/ci/requirements-2.7.build +++ b/ci/requirements-2.7.build @@ -1,5 +1,5 @@ python=2.7* -python-dateutil=2.4.1 +python-dateutil=2.5.0 pytz=2013b nomkl numpy diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run index a68e1d256058d..7c10b98fb6e14 100644 --- a/ci/requirements-2.7.run +++ b/ci/requirements-2.7.run @@ -1,11 +1,11 @@ -python-dateutil=2.4.1 +python-dateutil=2.5.0 pytz=2013b numpy xlwt=0.7.5 numexpr pytables matplotlib -openpyxl=1.6.2 +openpyxl=2.4.0 xlrd=0.9.2 sqlalchemy=0.9.6 lxml diff --git a/ci/requirements-2.7_COMPAT.build b/ci/requirements-2.7_COMPAT.build index d9c932daa110b..aa767c1001196 100644 --- a/ci/requirements-2.7_COMPAT.build +++ b/ci/requirements-2.7_COMPAT.build @@ -1,5 +1,5 @@ python=2.7* numpy=1.9.2 cython=0.23 -dateutil=1.5 +python-dateutil=2.5.0 pytz=2013b diff --git a/ci/requirements-2.7_COMPAT.run b/ci/requirements-2.7_COMPAT.run index 39bf720140733..c3daed6e6e1da 100644 --- a/ci/requirements-2.7_COMPAT.run +++ b/ci/requirements-2.7_COMPAT.run @@ -1,5 +1,5 @@ numpy=1.9.2 -dateutil=1.5 +python-dateutil=2.5.0 pytz=2013b scipy=0.14.0 xlwt=0.7.5 diff --git a/ci/requirements-2.7_LOCALE.run b/ci/requirements-2.7_LOCALE.run index 978bbf6a051c5..0a809a7dd6e5d 100644 --- a/ci/requirements-2.7_LOCALE.run +++ b/ci/requirements-2.7_LOCALE.run @@ -1,8 +1,8 @@ python-dateutil -pytz=2013b +pytz numpy=1.9.2 xlwt=0.7.5 -openpyxl=1.6.2 +openpyxl=2.4.0 xlsxwriter=0.5.2 xlrd=0.9.2 bottleneck=1.0.0 diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 06b22bd8f2c63..8d4421ba2b681 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -1,11 +1,13 @@ # This file was autogenerated by scripts/convert_deps.py -# Do not modify directlybeautifulsoup4 +# Do not modify directly +beautifulsoup4 blosc bottleneck fastparquet feather-format html5lib ipython +ipykernel jinja2 lxml matplotlib diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 2fb36b7cd70d8..e9840388203b1 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -3,8 +3,8 @@ Cython NumPy moto -pytest -python-dateutil +pytest>=3.1 +python-dateutil>=2.5.0 pytz setuptools sphinx \ No newline at end of file diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 2aee11772896f..8152af84228b8 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -16,13 +16,11 @@ requirements: - cython - numpy x.x - setuptools - - pytz - - python-dateutil run: - python - numpy x.x - - python-dateutil + - python-dateutil >=2.5.0 - pytz test: diff --git a/doc/source/install.rst b/doc/source/install.rst index 7c1fde119ceaa..ae89c64b6e91e 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -200,8 +200,8 @@ Dependencies * `setuptools <https://setuptools.readthedocs.io/en/latest/>`__ * `NumPy <http://www.numpy.org>`__: 1.9.0 or higher -* `python-dateutil <http://labix.org/python-dateutil>`__: 1.5 or higher -* `pytz <http://pytz.sourceforge.net/>`__: Needed for time zone support +* `python-dateutil <//https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher +* `pytz <http://pytz.sourceforge.net/>`__ .. _install.recommended_dependencies: @@ -244,8 +244,8 @@ Optional Dependencies * For Excel I/O: * `xlrd/xlwt <http://www.python-excel.org/>`__: Excel reading (xlrd) and writing (xlwt) - * `openpyxl <http://packages.python.org/openpyxl/>`__: openpyxl version 1.6.1 - or higher (but lower than 2.0.0), or version 2.2 or higher, for writing .xlsx files (xlrd >= 0.9.0) + * `openpyxl <http://https://openpyxl.readthedocs.io/en/default/>`__: openpyxl version 2.4.0 + for writing .xlsx files (xlrd >= 0.9.0) * `XlsxWriter <https://pypi.python.org/pypi/XlsxWriter>`__: Alternative Excel writer * `Jinja2 <http://jinja.pocoo.org/>`__: Template engine for conditional HTML formatting. diff --git a/doc/source/io.rst b/doc/source/io.rst index 2aeafd99f6e72..f96e33dbf9882 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2935,7 +2935,7 @@ Writing Excel Files to Memory +++++++++++++++++++++++++++++ Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or -``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`. Pandas also supports Openpyxl >= 2.2. +``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`. .. code-block:: python @@ -2991,9 +2991,7 @@ files if `Xlsxwriter`_ is not available. To specify which writer you want to use, you can pass an engine keyword argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: -- ``openpyxl``: This includes stable support for Openpyxl from 1.6.1. However, - it is advised to use version 2.2 and higher, especially when working with - styles. +- ``openpyxl``: version 2.4 or higher is required - ``xlsxwriter`` - ``xlwt`` diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index e8f2823f32edd..5e605ecb7d8d5 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -86,9 +86,22 @@ Backwards incompatible API changes - :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`) - :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`) - The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`) -- +.. _whatsnew_0220.api_breaking.deps: + +Dependencies have increased minimum versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We have updated our minimum supported versions of dependencies (:issue:`15184`). +If installed, we now require: + +-----------------+-----------------+----------+ + | Package | Minimum Version | Required | + +=================+=================+==========+ + | python-dateutil | 2.5.0 | X | + +-----------------+-----------------+----------+ + | openpyxl | 2.4.0 | | + +-----------------+-----------------+----------+ diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index a615e098135a9..2deb29dabe764 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -396,25 +396,13 @@ def raise_with_traceback(exc, traceback=Ellipsis): If traceback is not passed, uses sys.exc_info() to get traceback.""" -# http://stackoverflow.com/questions/4126348 -# Thanks to @martineau at SO - +# dateutil minimum version import dateutil -if PY2 and LooseVersion(dateutil.__version__) == '2.0': - # dateutil brokenness - raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' - 'install version 1.5 or 2.1+!') - +if LooseVersion(dateutil.__version__) < '2.5': + raise ImportError('dateutil 2.5.0 is the minimum required version') from dateutil import parser as _date_parser -if LooseVersion(dateutil.__version__) < '2.0': - - @functools.wraps(_date_parser.parse) - def parse_date(timestr, *args, **kwargs): - timestr = bytes(timestr) - return _date_parser.parse(timestr, *args, **kwargs) -else: - parse_date = _date_parser.parse +parse_date = _date_parser.parse # https://github.com/pandas-dev/pandas/pull/9123 diff --git a/pandas/compat/openpyxl_compat.py b/pandas/compat/openpyxl_compat.py deleted file mode 100644 index 87cf52cf00fef..0000000000000 --- a/pandas/compat/openpyxl_compat.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Detect incompatible version of OpenPyXL - -GH7169 -""" - -from distutils.version import LooseVersion - -start_ver = '1.6.1' -stop_ver = '2.0.0' - - -def is_compat(major_ver=1): - """Detect whether the installed version of openpyxl is supported - - Parameters - ---------- - ver : int - 1 requests compatibility status among the 1.x.y series - 2 requests compatibility status of 2.0.0 and later - Returns - ------- - compat : bool - ``True`` if openpyxl is installed and is a compatible version. - ``False`` otherwise. - """ - import openpyxl - ver = LooseVersion(openpyxl.__version__) - if major_ver == 1: - return LooseVersion(start_ver) <= ver < LooseVersion(stop_ver) - elif major_ver == 2: - return LooseVersion(stop_ver) <= ver - else: - raise ValueError('cannot test for openpyxl compatibility with ver {0}' - .format(major_ver)) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index fec916dc52d20..882130bedcbf0 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -28,7 +28,6 @@ from pandas.core import config from pandas.io.formats.printing import pprint_thing import pandas.compat as compat -import pandas.compat.openpyxl_compat as openpyxl_compat from warnings import warn from distutils.version import LooseVersion from pandas.util._decorators import Appender, deprecate_kwarg @@ -185,22 +184,6 @@ def _get_default_writer(ext): def get_writer(engine_name): - if engine_name == 'openpyxl': - try: - import openpyxl - - # with version-less openpyxl engine - # make sure we make the intelligent choice for the user - if LooseVersion(openpyxl.__version__) < '2.0.0': - return _writers['openpyxl1'] - elif LooseVersion(openpyxl.__version__) < '2.2.0': - return _writers['openpyxl20'] - else: - return _writers['openpyxl22'] - except ImportError: - # fall through to normal exception handling below - pass - try: return _writers[engine_name] except KeyError: @@ -828,20 +811,15 @@ def close(self): return self.save() -class _Openpyxl1Writer(ExcelWriter): - engine = 'openpyxl1' +class _OpenpyxlWriter(ExcelWriter): + engine = 'openpyxl' supported_extensions = ('.xlsx', '.xlsm') - openpyxl_majorver = 1 def __init__(self, path, engine=None, **engine_kwargs): - if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver): - raise ValueError('Installed openpyxl is not supported at this ' - 'time. Use {majorver}.x.y.' - .format(majorver=self.openpyxl_majorver)) # Use the openpyxl module as the Excel writer. from openpyxl.workbook import Workbook - super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs) + super(_OpenpyxlWriter, self).__init__(path, **engine_kwargs) # Create workbook object with default optimized_write=True. self.book = Workbook() @@ -861,72 +839,6 @@ def save(self): """ return self.book.save(self.path) - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - # Write the frame cells using openpyxl. - from openpyxl.cell import get_column_letter - - sheet_name = self._get_sheet_name(sheet_name) - - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] - else: - wks = self.book.create_sheet() - wks.title = sheet_name - self.sheets[sheet_name] = wks - - for cell in cells: - colletter = get_column_letter(startcol + cell.col + 1) - xcell = wks.cell("{col}{row}".format(col=colletter, - row=startrow + cell.row + 1)) - if (isinstance(cell.val, compat.string_types) and - xcell.data_type_for_value(cell.val) != xcell.TYPE_STRING): - xcell.set_value_explicit(cell.val) - else: - xcell.value = _conv_value(cell.val) - style = None - if cell.style: - style = self._convert_to_style(cell.style) - for field in style.__fields__: - xcell.style.__setattr__(field, - style.__getattribute__(field)) - - if isinstance(cell.val, datetime): - xcell.style.number_format.format_code = self.datetime_format - elif isinstance(cell.val, date): - xcell.style.number_format.format_code = self.date_format - - if cell.mergestart is not None and cell.mergeend is not None: - cletterstart = get_column_letter(startcol + cell.col + 1) - cletterend = get_column_letter(startcol + cell.mergeend + 1) - - wks.merge_cells('{start}{row}:{end}{mergestart}' - .format(start=cletterstart, - row=startrow + cell.row + 1, - end=cletterend, - mergestart=startrow + - cell.mergestart + 1)) - - # Excel requires that the format of the first cell in a merged - # range is repeated in the rest of the merged range. - if style: - first_row = startrow + cell.row + 1 - last_row = startrow + cell.mergestart + 1 - first_col = startcol + cell.col + 1 - last_col = startcol + cell.mergeend + 1 - - for row in range(first_row, last_row + 1): - for col in range(first_col, last_col + 1): - if row == first_row and col == first_col: - # Ignore first cell. It is already handled. - continue - colletter = get_column_letter(col) - xcell = wks.cell("{col}{row}" - .format(col=colletter, row=row)) - for field in style.__fields__: - xcell.style.__setattr__( - field, style.__getattribute__(field)) - @classmethod def _convert_to_style(cls, style_dict): """ @@ -948,88 +860,6 @@ def _convert_to_style(cls, style_dict): return xls_style - -register_writer(_Openpyxl1Writer) - - -class _OpenpyxlWriter(_Openpyxl1Writer): - engine = 'openpyxl' - - -register_writer(_OpenpyxlWriter) - - -class _Openpyxl20Writer(_Openpyxl1Writer): - """ - Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565). - """ - engine = 'openpyxl20' - openpyxl_majorver = 2 - - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - # Write the frame cells using openpyxl. - from openpyxl.cell import get_column_letter - - sheet_name = self._get_sheet_name(sheet_name) - - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] - else: - wks = self.book.create_sheet() - wks.title = sheet_name - self.sheets[sheet_name] = wks - - for cell in cells: - colletter = get_column_letter(startcol + cell.col + 1) - xcell = wks["{col}{row}" - .format(col=colletter, row=startrow + cell.row + 1)] - xcell.value = _conv_value(cell.val) - style_kwargs = {} - - # Apply format codes before cell.style to allow override - if isinstance(cell.val, datetime): - style_kwargs.update(self._convert_to_style_kwargs({ - 'number_format': {'format_code': self.datetime_format}})) - elif isinstance(cell.val, date): - style_kwargs.update(self._convert_to_style_kwargs({ - 'number_format': {'format_code': self.date_format}})) - - if cell.style: - style_kwargs.update(self._convert_to_style_kwargs(cell.style)) - - if style_kwargs: - xcell.style = xcell.style.copy(**style_kwargs) - - if cell.mergestart is not None and cell.mergeend is not None: - cletterstart = get_column_letter(startcol + cell.col + 1) - cletterend = get_column_letter(startcol + cell.mergeend + 1) - - wks.merge_cells('{start}{row}:{end}{mergestart}' - .format(start=cletterstart, - row=startrow + cell.row + 1, - end=cletterend, - mergestart=startrow + - cell.mergestart + 1)) - - # Excel requires that the format of the first cell in a merged - # range is repeated in the rest of the merged range. - if style_kwargs: - first_row = startrow + cell.row + 1 - last_row = startrow + cell.mergestart + 1 - first_col = startcol + cell.col + 1 - last_col = startcol + cell.mergeend + 1 - - for row in range(first_row, last_row + 1): - for col in range(first_col, last_col + 1): - if row == first_row and col == first_col: - # Ignore first cell. It is already handled. - continue - colletter = get_column_letter(col) - xcell = wks["{col}{row}" - .format(col=colletter, row=row)] - xcell.style = xcell.style.copy(**style_kwargs) - @classmethod def _convert_to_style_kwargs(cls, style_dict): """ @@ -1341,13 +1171,7 @@ def _convert_to_number_format(cls, number_format_dict): ------- number_format : str """ - try: - # >= 2.0.0 < 2.1.0 - from openpyxl.styles import NumberFormat - return NumberFormat(**number_format_dict) - except: - # >= 2.1.0 - return number_format_dict['format_code'] + return number_format_dict['format_code'] @classmethod def _convert_to_protection(cls, protection_dict): @@ -1367,17 +1191,6 @@ def _convert_to_protection(cls, protection_dict): return Protection(**protection_dict) - -register_writer(_Openpyxl20Writer) - - -class _Openpyxl22Writer(_Openpyxl20Writer): - """ - Note: Support for OpenPyxl v2.2 is currently EXPERIMENTAL (GH7565). - """ - engine = 'openpyxl22' - openpyxl_majorver = 2 - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None): # Write the frame cells using openpyxl. @@ -1443,7 +1256,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, setattr(xcell, k, v) -register_writer(_Openpyxl22Writer) +register_writer(_OpenpyxlWriter) class _XlwtWriter(ExcelWriter): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index a1287c3102b77..6c72e65b1021c 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1160,9 +1160,9 @@ class TestDatetimeParsingWrappers(object): @pytest.mark.parametrize('cache', [True, False]) def test_parsers(self, cache): + # dateutil >= 2.5.0 defaults to yearfirst=True # https://github.com/dateutil/dateutil/issues/217 - import dateutil - yearfirst = dateutil.__version__ >= LooseVersion('2.5.0') + yearfirst = True cases = {'2011-01-01': datetime(2011, 1, 1), '2Q2005': datetime(2005, 4, 1), diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index d33136a86faad..96117b3c21a9b 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -1,6 +1,4 @@ # pylint: disable=E1101 -import functools -import operator import os import sys import warnings @@ -17,12 +15,12 @@ import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Index, MultiIndex -from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems +from pandas.compat import u, range, map, BytesIO, iteritems from pandas.core.config import set_option, get_option from pandas.io.common import URLError from pandas.io.excel import ( - ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer, - _Openpyxl20Writer, _Openpyxl22Writer, register_writer, _XlsxWriter + ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter, + register_writer, _XlsxWriter ) from pandas.io.formats.excel import ExcelFormatter from pandas.io.parsers import read_csv @@ -1926,207 +1924,10 @@ def test_path_localpath(self): tm.assert_frame_equal(df, result) -def raise_wrapper(major_ver): - def versioned_raise_wrapper(orig_method): - @functools.wraps(orig_method) - def wrapped(self, *args, **kwargs): - _skip_if_no_openpyxl() - if openpyxl_compat.is_compat(major_ver=major_ver): - orig_method(self, *args, **kwargs) - else: - msg = (r'Installed openpyxl is not supported at this ' - r'time\. Use.+') - with tm.assert_raises_regex(ValueError, msg): - orig_method(self, *args, **kwargs) - return wrapped - return versioned_raise_wrapper - - -def raise_on_incompat_version(major_ver): - def versioned_raise_on_incompat_version(cls): - methods = filter(operator.methodcaller( - 'startswith', 'test_'), dir(cls)) - for method in methods: - setattr(cls, method, raise_wrapper( - major_ver)(getattr(cls, method))) - return cls - return versioned_raise_on_incompat_version - - -@raise_on_incompat_version(1) class TestOpenpyxlTests(ExcelWriterBase): + engine_name = 'openpyxl' ext = '.xlsx' - engine_name = 'openpyxl1' - check_skip = staticmethod(lambda *args, **kwargs: None) - - def test_to_excel_styleconverter(self): - _skip_if_no_openpyxl() - if not openpyxl_compat.is_compat(major_ver=1): - pytest.skip('incompatible openpyxl version') - - import openpyxl - - hstyle = {"font": {"bold": True}, - "borders": {"top": "thin", - "right": "thin", - "bottom": "thin", - "left": "thin"}, - "alignment": {"horizontal": "center", "vertical": "top"}} - - xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle) - assert xlsx_style.font.bold - assert (openpyxl.style.Border.BORDER_THIN == - xlsx_style.borders.top.border_style) - assert (openpyxl.style.Border.BORDER_THIN == - xlsx_style.borders.right.border_style) - assert (openpyxl.style.Border.BORDER_THIN == - xlsx_style.borders.bottom.border_style) - assert (openpyxl.style.Border.BORDER_THIN == - xlsx_style.borders.left.border_style) - assert (openpyxl.style.Alignment.HORIZONTAL_CENTER == - xlsx_style.alignment.horizontal) - assert (openpyxl.style.Alignment.VERTICAL_TOP == - xlsx_style.alignment.vertical) - - -def skip_openpyxl_gt21(cls): - """Skip test case if openpyxl >= 2.2""" - - @classmethod - def setup_class(cls): - _skip_if_no_openpyxl() - import openpyxl - ver = openpyxl.__version__ - if (not (LooseVersion(ver) >= LooseVersion('2.0.0') and - LooseVersion(ver) < LooseVersion('2.2.0'))): - pytest.skip("openpyxl %s >= 2.2" % str(ver)) - - cls.setup_class = setup_class - return cls - - -@raise_on_incompat_version(2) -@skip_openpyxl_gt21 -class TestOpenpyxl20Tests(ExcelWriterBase): - ext = '.xlsx' - engine_name = 'openpyxl20' - check_skip = staticmethod(lambda *args, **kwargs: None) - - def test_to_excel_styleconverter(self): - import openpyxl - from openpyxl import styles - - hstyle = { - "font": { - "color": '00FF0000', - "bold": True, - }, - "borders": { - "top": "thin", - "right": "thin", - "bottom": "thin", - "left": "thin", - }, - "alignment": { - "horizontal": "center", - "vertical": "top", - }, - "fill": { - "patternType": 'solid', - 'fgColor': { - 'rgb': '006666FF', - 'tint': 0.3, - }, - }, - "number_format": { - "format_code": "0.00" - }, - "protection": { - "locked": True, - "hidden": False, - }, - } - - font_color = styles.Color('00FF0000') - font = styles.Font(bold=True, color=font_color) - side = styles.Side(style=styles.borders.BORDER_THIN) - border = styles.Border(top=side, right=side, bottom=side, left=side) - alignment = styles.Alignment(horizontal='center', vertical='top') - fill_color = styles.Color(rgb='006666FF', tint=0.3) - fill = styles.PatternFill(patternType='solid', fgColor=fill_color) - - # ahh openpyxl API changes - ver = openpyxl.__version__ - if ver >= LooseVersion('2.0.0') and ver < LooseVersion('2.1.0'): - number_format = styles.NumberFormat(format_code='0.00') - else: - number_format = '0.00' # XXX: Only works with openpyxl-2.1.0 - - protection = styles.Protection(locked=True, hidden=False) - - kw = _Openpyxl20Writer._convert_to_style_kwargs(hstyle) - assert kw['font'] == font - assert kw['border'] == border - assert kw['alignment'] == alignment - assert kw['fill'] == fill - assert kw['number_format'] == number_format - assert kw['protection'] == protection - - def test_write_cells_merge_styled(self): - from pandas.io.formats.excel import ExcelCell - from openpyxl import styles - - sheet_name = 'merge_styled' - - sty_b1 = {'font': {'color': '00FF0000'}} - sty_a2 = {'font': {'color': '0000FF00'}} - - initial_cells = [ - ExcelCell(col=1, row=0, val=42, style=sty_b1), - ExcelCell(col=0, row=1, val=99, style=sty_a2), - ] - - sty_merged = {'font': {'color': '000000FF', 'bold': True}} - sty_kwargs = _Openpyxl20Writer._convert_to_style_kwargs(sty_merged) - openpyxl_sty_merged = styles.Style(**sty_kwargs) - merge_cells = [ - ExcelCell(col=0, row=0, val='pandas', - mergestart=1, mergeend=1, style=sty_merged), - ] - - with ensure_clean('.xlsx') as path: - writer = _Openpyxl20Writer(path) - writer.write_cells(initial_cells, sheet_name=sheet_name) - writer.write_cells(merge_cells, sheet_name=sheet_name) - - wks = writer.sheets[sheet_name] - xcell_b1 = wks['B1'] - xcell_a2 = wks['A2'] - assert xcell_b1.style == openpyxl_sty_merged - assert xcell_a2.style == openpyxl_sty_merged - - -def skip_openpyxl_lt22(cls): - """Skip test case if openpyxl < 2.2""" - - @classmethod - def setup_class(cls): - _skip_if_no_openpyxl() - import openpyxl - ver = openpyxl.__version__ - if LooseVersion(ver) < LooseVersion('2.2.0'): - pytest.skip("openpyxl %s < 2.2" % str(ver)) - - cls.setup_class = setup_class - return cls - - -@raise_on_incompat_version(2) -@skip_openpyxl_lt22 -class TestOpenpyxl22Tests(ExcelWriterBase): - ext = '.xlsx' - engine_name = 'openpyxl22' - check_skip = staticmethod(lambda *args, **kwargs: None) + check_skip = staticmethod(_skip_if_no_openpyxl) def test_to_excel_styleconverter(self): from openpyxl import styles @@ -2174,7 +1975,7 @@ def test_to_excel_styleconverter(self): protection = styles.Protection(locked=True, hidden=False) - kw = _Openpyxl22Writer._convert_to_style_kwargs(hstyle) + kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle) assert kw['font'] == font assert kw['border'] == border assert kw['alignment'] == alignment @@ -2183,9 +1984,6 @@ def test_to_excel_styleconverter(self): assert kw['protection'] == protection def test_write_cells_merge_styled(self): - if not openpyxl_compat.is_compat(major_ver=2): - pytest.skip('incompatible openpyxl version') - from pandas.io.formats.excel import ExcelCell sheet_name = 'merge_styled' @@ -2199,7 +1997,7 @@ def test_write_cells_merge_styled(self): ] sty_merged = {'font': {'color': '000000FF', 'bold': True}} - sty_kwargs = _Openpyxl22Writer._convert_to_style_kwargs(sty_merged) + sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged) openpyxl_sty_merged = sty_kwargs['font'] merge_cells = [ ExcelCell(col=0, row=0, val='pandas', @@ -2207,7 +2005,7 @@ def test_write_cells_merge_styled(self): ] with ensure_clean('.xlsx') as path: - writer = _Openpyxl22Writer(path) + writer = _OpenpyxlWriter(path) writer.write_cells(initial_cells, sheet_name=sheet_name) writer.write_cells(merge_cells, sheet_name=sheet_name) @@ -2322,7 +2120,7 @@ def test_column_format(self): try: read_num_format = cell.number_format - except: + except Exception: read_num_format = cell.style.number_format._format_code assert read_num_format == num_format @@ -2366,9 +2164,7 @@ def test_ExcelWriter_dispatch(self): writer_klass = _XlsxWriter except ImportError: _skip_if_no_openpyxl() - if not openpyxl_compat.is_compat(major_ver=1): - pytest.skip('incompatible openpyxl version') - writer_klass = _Openpyxl1Writer + writer_klass = _OpenpyxlWriter with ensure_clean('.xlsx') as path: writer = ExcelWriter(path) @@ -2461,10 +2257,6 @@ def custom_converter(css): pytest.importorskip('jinja2') pytest.importorskip(engine) - if engine == 'openpyxl' and openpyxl_compat.is_compat(major_ver=1): - pytest.xfail('openpyxl1 does not support some openpyxl2-compatible ' - 'style dicts') - # Prepare spreadsheets df = DataFrame(np.random.randn(10, 3)) @@ -2482,9 +2274,6 @@ def custom_converter(css): # For other engines, we only smoke test return openpyxl = pytest.importorskip('openpyxl') - if not openpyxl_compat.is_compat(major_ver=2): - pytest.skip('incompatible openpyxl version') - wb = openpyxl.load_workbook(path) # (1) compare DataFrame.to_excel and Styler.to_excel when unstyled diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index dab508de335c4..e23911e8d2003 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -16,7 +16,7 @@ import pandas.util.testing as tm from pandas.tseries import offsets, frequencies -from pandas._libs.tslibs.timezones import get_timezone +from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz from pandas._libs.tslibs import conversion, period from pandas.compat import long, PY3 @@ -359,9 +359,7 @@ def test_conversion(self): '2014-01-01 00:00:00.000000001']) def test_repr(self, date, freq): # dateutil zone change (only matters for repr) - if (dateutil.__version__ >= LooseVersion('2.3') and - (dateutil.__version__ <= LooseVersion('2.4.0') or - dateutil.__version__ >= LooseVersion('2.6.0'))): + if dateutil.__version__ >= LooseVersion('2.6.0'): timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific'] else: @@ -1381,7 +1379,6 @@ def test_timestamp_to_datetime_explicit_pytz(self): def test_timestamp_to_datetime_explicit_dateutil(self): tm._skip_if_windows_python_3() - from pandas._libs.tslibs.timezones import dateutil_gettz as gettz stamp = Timestamp('20090415', tz=gettz('US/Eastern'), freq='D') dtval = stamp.to_pydatetime() assert stamp == dtval diff --git a/setup.py b/setup.py index ba948abf4302b..57131255884de 100755 --- a/setup.py +++ b/setup.py @@ -19,8 +19,6 @@ import versioneer cmdclass = versioneer.get_cmdclass() -PY3 = sys.version_info[0] >= 3 - def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' @@ -46,7 +44,7 @@ def is_platform_mac(): min_numpy_ver = '1.9.0' setuptools_kwargs = { 'install_requires': [ - 'python-dateutil >= 2' if PY3 else 'python-dateutil', + 'python-dateutil >= 2.5.0', 'pytz >= 2011k', 'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver), ],
closes #15184 reminder to change: https://github.com/conda-forge/pandas-feedstock on release
https://api.github.com/repos/pandas-dev/pandas/pulls/18182
2017-11-08T22:23:10Z
2017-12-04T11:35:45Z
2017-12-04T11:35:45Z
2017-12-04T16:13:17Z
CLN: remove obsolete scripts
diff --git a/scripts/bench_join.R b/scripts/bench_join.R deleted file mode 100644 index edba277f0295c..0000000000000 --- a/scripts/bench_join.R +++ /dev/null @@ -1,50 +0,0 @@ -library(xts) - -iterations <- 50 - -ns = c(100, 1000, 10000, 100000, 1000000) -kinds = c("outer", "left", "inner") - -result = matrix(0, nrow=3, ncol=length(ns)) -n <- 100000 -pct.overlap <- 0.2 - -k <- 1 - -for (ni in 1:length(ns)){ - n <- ns[ni] - rng1 <- 1:n - offset <- as.integer(n * pct.overlap) - rng2 <- rng1 + offset - x <- xts(matrix(rnorm(n * k), nrow=n, ncol=k), - as.POSIXct(Sys.Date()) + rng1) - y <- xts(matrix(rnorm(n * k), nrow=n, ncol=k), - as.POSIXct(Sys.Date()) + rng2) - timing <- numeric() - for (i in 1:3) { - kind = kinds[i] - for(j in 1:iterations) { - gc() # just to be sure - timing[j] <- system.time(merge(x,y,join=kind))[3] - } - #timing <- system.time(for (j in 1:iterations) merge.xts(x, y, join=kind), - # gcFirst=F) - #timing <- as.list(timing) - result[i, ni] <- mean(timing) * 1000 - #result[i, ni] = (timing$elapsed / iterations) * 1000 - } -} - -rownames(result) <- kinds -colnames(result) <- log10(ns) - -mat <- matrix(rnorm(500000), nrow=100000, ncol=5) -set.seed(12345) -indexer <- sample(1:100000) - -timing <- rep(0, 10) -for (i in 1:10) { - gc() - timing[i] = system.time(mat[indexer,])[3] -} - diff --git a/scripts/bench_join.py b/scripts/bench_join.py deleted file mode 100644 index f9d43772766d8..0000000000000 --- a/scripts/bench_join.py +++ /dev/null @@ -1,211 +0,0 @@ -from pandas.compat import range, lrange -import numpy as np -import pandas._libs.lib as lib -from pandas import * -from copy import deepcopy -import time - -n = 1000000 -K = 1 -pct_overlap = 0.2 - -a = np.arange(n, dtype=np.int64) -b = np.arange(n * pct_overlap, n * (1 + pct_overlap), dtype=np.int64) - -dr1 = DatetimeIndex('1/1/2000', periods=n, offset=offsets.Minute()) -dr2 = DatetimeIndex( - dr1[int(pct_overlap * n)], periods=n, offset=offsets.Minute(2)) - -aobj = a.astype(object) -bobj = b.astype(object) - -av = np.random.randn(n) -bv = np.random.randn(n) - -avf = np.random.randn(n, K) -bvf = np.random.randn(n, K) - -a_series = Series(av, index=a) -b_series = Series(bv, index=b) - -a_frame = DataFrame(avf, index=a, columns=lrange(K)) -b_frame = DataFrame(bvf, index=b, columns=lrange(K, 2 * K)) - - -def do_left_join(a, b, av, bv): - out = np.empty((len(a), 2)) - lib.left_join_1d(a, b, av, bv, out) - return out - - -def do_outer_join(a, b, av, bv): - result_index, aindexer, bindexer = lib.outer_join_indexer(a, b) - result = np.empty((2, len(result_index))) - lib.take_1d(av, aindexer, result[0]) - lib.take_1d(bv, bindexer, result[1]) - return result_index, result - - -def do_inner_join(a, b, av, bv): - result_index, aindexer, bindexer = lib.inner_join_indexer(a, b) - result = np.empty((2, len(result_index))) - lib.take_1d(av, aindexer, result[0]) - lib.take_1d(bv, bindexer, result[1]) - return result_index, result - -from line_profiler import LineProfiler -prof = LineProfiler() - -from pandas.util.testing import set_trace - - -def do_left_join_python(a, b, av, bv): - indexer, mask = lib.ordered_left_join_int64(a, b) - - n, ak = av.shape - _, bk = bv.shape - result_width = ak + bk - - result = np.empty((result_width, n), dtype=np.float64) - result[:ak] = av.T - - bchunk = result[ak:] - _take_multi(bv.T, indexer, bchunk) - np.putmask(bchunk, np.tile(mask, bk), np.nan) - return result - - -def _take_multi(data, indexer, out): - if not data.flags.c_contiguous: - data = data.copy() - for i in range(data.shape[0]): - data[i].take(indexer, out=out[i]) - - -def do_left_join_multi(a, b, av, bv): - n, ak = av.shape - _, bk = bv.shape - result = np.empty((n, ak + bk), dtype=np.float64) - lib.left_join_2d(a, b, av, bv, result) - return result - - -def do_outer_join_multi(a, b, av, bv): - n, ak = av.shape - _, bk = bv.shape - result_index, rindexer, lindexer = lib.outer_join_indexer(a, b) - result = np.empty((len(result_index), ak + bk), dtype=np.float64) - lib.take_join_contiguous(av, bv, lindexer, rindexer, result) - # result = np.empty((ak + bk, len(result_index)), dtype=np.float64) - # lib.take_axis0(av, rindexer, out=result[:ak].T) - # lib.take_axis0(bv, lindexer, out=result[ak:].T) - return result_index, result - - -def do_inner_join_multi(a, b, av, bv): - n, ak = av.shape - _, bk = bv.shape - result_index, rindexer, lindexer = lib.inner_join_indexer(a, b) - result = np.empty((len(result_index), ak + bk), dtype=np.float64) - lib.take_join_contiguous(av, bv, lindexer, rindexer, result) - # result = np.empty((ak + bk, len(result_index)), dtype=np.float64) - # lib.take_axis0(av, rindexer, out=result[:ak].T) - # lib.take_axis0(bv, lindexer, out=result[ak:].T) - return result_index, result - - -def do_left_join_multi_v2(a, b, av, bv): - indexer, mask = lib.ordered_left_join_int64(a, b) - bv_taken = bv.take(indexer, axis=0) - np.putmask(bv_taken, mask.repeat(bv.shape[1]), np.nan) - return np.concatenate((av, bv_taken), axis=1) - - -def do_left_join_series(a, b): - return b.reindex(a.index) - - -def do_left_join_frame(a, b): - a.index._indexMap = None - b.index._indexMap = None - return a.join(b, how='left') - - -# a = np.array([1, 2, 3, 4, 5], dtype=np.int64) -# b = np.array([0, 3, 5, 7, 9], dtype=np.int64) -# print(lib.inner_join_indexer(a, b)) - -out = np.empty((10, 120000)) - - -def join(a, b, av, bv, how="left"): - func_dict = {'left': do_left_join_multi, - 'outer': do_outer_join_multi, - 'inner': do_inner_join_multi} - - f = func_dict[how] - return f(a, b, av, bv) - - -def bench_python(n=100000, pct_overlap=0.20, K=1): - import gc - ns = [2, 3, 4, 5, 6] - iterations = 200 - pct_overlap = 0.2 - kinds = ['outer', 'left', 'inner'] - - all_results = {} - for logn in ns: - n = 10 ** logn - a = np.arange(n, dtype=np.int64) - b = np.arange(n * pct_overlap, n * pct_overlap + n, dtype=np.int64) - - avf = np.random.randn(n, K) - bvf = np.random.randn(n, K) - - a_frame = DataFrame(avf, index=a, columns=lrange(K)) - b_frame = DataFrame(bvf, index=b, columns=lrange(K, 2 * K)) - - all_results[logn] = result = {} - - for kind in kinds: - gc.disable() - elapsed = 0 - _s = time.clock() - for i in range(iterations): - if i % 10 == 0: - elapsed += time.clock() - _s - gc.collect() - _s = time.clock() - a_frame.join(b_frame, how=kind) - # join(a, b, avf, bvf, how=kind) - elapsed += time.clock() - _s - gc.enable() - result[kind] = (elapsed / iterations) * 1000 - - return DataFrame(all_results, index=kinds) - - -def bench_xts(n=100000, pct_overlap=0.20): - from pandas.rpy.common import r - r('a <- 5') - - xrng = '1:%d' % n - - start = n * pct_overlap + 1 - end = n + start - 1 - yrng = '%d:%d' % (start, end) - - r('library(xts)') - - iterations = 500 - - kinds = ['left', 'outer', 'inner'] - result = {} - for kind in kinds: - r('x <- xts(rnorm(%d), as.POSIXct(Sys.Date()) + %s)' % (n, xrng)) - r('y <- xts(rnorm(%d), as.POSIXct(Sys.Date()) + %s)' % (n, yrng)) - stmt = 'for (i in 1:%d) merge(x, y, join="%s")' % (iterations, kind) - elapsed = r('as.list(system.time(%s, gcFirst=F))$elapsed' % stmt)[0] - result[kind] = (elapsed / iterations) * 1000 - return Series(result) diff --git a/scripts/bench_join_multi.py b/scripts/bench_join_multi.py deleted file mode 100644 index b19da6a2c47d8..0000000000000 --- a/scripts/bench_join_multi.py +++ /dev/null @@ -1,32 +0,0 @@ -from pandas import * - -import numpy as np -from pandas.compat import zip, range, lzip -from pandas.util.testing import rands -import pandas._libs.lib as lib - -N = 100000 - -key1 = [rands(10) for _ in range(N)] -key2 = [rands(10) for _ in range(N)] - -zipped = lzip(key1, key2) - - -def _zip(*args): - arr = np.empty(N, dtype=object) - arr[:] = lzip(*args) - return arr - - -def _zip2(*args): - return lib.list_to_object_array(lzip(*args)) - -index = MultiIndex.from_arrays([key1, key2]) -to_join = DataFrame({'j1': np.random.randn(100000)}, index=index) - -data = DataFrame({'A': np.random.randn(500000), - 'key1': np.repeat(key1, 5), - 'key2': np.repeat(key2, 5)}) - -# data.join(to_join, on=['key1', 'key2']) diff --git a/scripts/bench_refactor.py b/scripts/bench_refactor.py deleted file mode 100644 index dafba371e995a..0000000000000 --- a/scripts/bench_refactor.py +++ /dev/null @@ -1,51 +0,0 @@ -from pandas import * -from pandas.compat import range -try: - import pandas.core.internals as internals - reload(internals) - import pandas.core.frame as frame - reload(frame) - from pandas.core.frame import DataFrame as DataMatrix -except ImportError: - pass - -N = 1000 -K = 500 - - -def horribly_unconsolidated(): - index = np.arange(N) - - df = DataMatrix(index=index) - - for i in range(K): - df[i] = float(K) - - return df - - -def bench_reindex_index(df, it=100): - new_idx = np.arange(0, N, 2) - for i in range(it): - df.reindex(new_idx) - - -def bench_reindex_columns(df, it=100): - new_cols = np.arange(0, K, 2) - for i in range(it): - df.reindex(columns=new_cols) - - -def bench_join_index(df, it=10): - left = df.reindex(index=np.arange(0, N, 2), - columns=np.arange(K // 2)) - right = df.reindex(columns=np.arange(K // 2 + 1, K)) - for i in range(it): - joined = left.join(right) - -if __name__ == '__main__': - df = horribly_unconsolidated() - left = df.reindex(index=np.arange(0, N, 2), - columns=np.arange(K // 2)) - right = df.reindex(columns=np.arange(K // 2 + 1, K)) - bench_join_index(df) diff --git a/scripts/boxplot_test.py b/scripts/boxplot_test.py deleted file mode 100644 index 3704f7b60dc60..0000000000000 --- a/scripts/boxplot_test.py +++ /dev/null @@ -1,14 +0,0 @@ -import matplotlib.pyplot as plt - -import random -import pandas.util.testing as tm -tm.N = 1000 -df = tm.makeTimeDataFrame() -import string -foo = list(string.letters[:5]) * 200 -df['indic'] = list(string.letters[:5]) * 200 -random.shuffle(foo) -df['indic2'] = foo -df.boxplot(by=['indic', 'indic2'], fontsize=8, rot=90) - -plt.show() diff --git a/scripts/count_code.sh b/scripts/count_code.sh deleted file mode 100755 index 991faf2e8711b..0000000000000 --- a/scripts/count_code.sh +++ /dev/null @@ -1 +0,0 @@ -cloc pandas --force-lang=Python,pyx --not-match-f="parser.c|lib.c|tslib.c|sandbox.c|hashtable.c|sparse.c|algos.c|index.c" \ No newline at end of file diff --git a/scripts/faster_xs.py b/scripts/faster_xs.py deleted file mode 100644 index 2bb6271124c4f..0000000000000 --- a/scripts/faster_xs.py +++ /dev/null @@ -1,15 +0,0 @@ -import numpy as np - -import pandas.util.testing as tm - -from pandas.core.internals import _interleaved_dtype - -df = tm.makeDataFrame() - -df['E'] = 'foo' -df['F'] = 'foo' -df['G'] = 2 -df['H'] = df['A'] > 0 - -blocks = df._data.blocks -items = df.columns diff --git a/scripts/file_sizes.py b/scripts/file_sizes.py deleted file mode 100644 index de03c72ffbd09..0000000000000 --- a/scripts/file_sizes.py +++ /dev/null @@ -1,208 +0,0 @@ -from __future__ import print_function -import os -import sys - -import numpy as np -import matplotlib.pyplot as plt - -from pandas import DataFrame -from pandas.util.testing import set_trace -from pandas import compat - -dirs = [] -names = [] -lengths = [] - -if len(sys.argv) > 1: - loc = sys.argv[1] -else: - loc = '.' -walked = os.walk(loc) - - -def _should_count_file(path): - return path.endswith('.py') or path.endswith('.pyx') - - -def _is_def_line(line): - """def/cdef/cpdef, but not `cdef class`""" - return (line.endswith(':') and not 'class' in line.split() and - (line.startswith('def ') or - line.startswith('cdef ') or - line.startswith('cpdef ') or - ' def ' in line or ' cdef ' in line or ' cpdef ' in line)) - - -class LengthCounter(object): - """ - should add option for subtracting nested function lengths?? - """ - def __init__(self, lines): - self.lines = lines - self.pos = 0 - self.counts = [] - self.n = len(lines) - - def get_counts(self): - self.pos = 0 - self.counts = [] - while self.pos < self.n: - line = self.lines[self.pos] - self.pos += 1 - if _is_def_line(line): - level = _get_indent_level(line) - self._count_function(indent_level=level) - return self.counts - - def _count_function(self, indent_level=1): - indent = ' ' * indent_level - - def _end_of_function(line): - return (line != '' and - not line.startswith(indent) and - not line.startswith('#')) - - start_pos = self.pos - while self.pos < self.n: - line = self.lines[self.pos] - if _end_of_function(line): - self._push_count(start_pos) - return - - self.pos += 1 - - if _is_def_line(line): - self._count_function(indent_level=indent_level + 1) - - # end of file - self._push_count(start_pos) - - def _push_count(self, start_pos): - func_lines = self.lines[start_pos:self.pos] - - if len(func_lines) > 300: - set_trace() - - # remove blank lines at end - while len(func_lines) > 0 and func_lines[-1] == '': - func_lines = func_lines[:-1] - - # remove docstrings and comments - clean_lines = [] - in_docstring = False - for line in func_lines: - line = line.strip() - if in_docstring and _is_triplequote(line): - in_docstring = False - continue - - if line.startswith('#'): - continue - - if _is_triplequote(line): - in_docstring = True - continue - - self.counts.append(len(func_lines)) - - -def _get_indent_level(line): - level = 0 - while line.startswith(' ' * level): - level += 1 - return level - - -def _is_triplequote(line): - return line.startswith('"""') or line.startswith("'''") - - -def _get_file_function_lengths(path): - lines = [x.rstrip() for x in open(path).readlines()] - counter = LengthCounter(lines) - return counter.get_counts() - -# def test_get_function_lengths(): -text = """ -class Foo: - -def foo(): - def bar(): - a = 1 - - b = 2 - - c = 3 - - foo = 'bar' - -def x(): - a = 1 - - b = 3 - - c = 7 - - pass -""" - -expected = [5, 8, 7] - -lines = [x.rstrip() for x in text.splitlines()] -counter = LengthCounter(lines) -result = counter.get_counts() -assert(result == expected) - - -def doit(): - for directory, _, files in walked: - print(directory) - for path in files: - if not _should_count_file(path): - continue - - full_path = os.path.join(directory, path) - print(full_path) - lines = len(open(full_path).readlines()) - - dirs.append(directory) - names.append(path) - lengths.append(lines) - - result = DataFrame({'dirs': dirs, 'names': names, - 'lengths': lengths}) - - -def doit2(): - counts = {} - for directory, _, files in walked: - print(directory) - for path in files: - if not _should_count_file(path) or path.startswith('test_'): - continue - - full_path = os.path.join(directory, path) - counts[full_path] = _get_file_function_lengths(full_path) - - return counts - -counts = doit2() - -# counts = _get_file_function_lengths('pandas/tests/test_series.py') - -all_counts = [] -for k, v in compat.iteritems(counts): - all_counts.extend(v) -all_counts = np.array(all_counts) - -fig = plt.figure(figsize=(10, 5)) -ax = fig.add_subplot(111) -ax.hist(all_counts, bins=100) -n = len(all_counts) -nmore = (all_counts > 50).sum() -ax.set_title('%s function lengths, n=%d' % ('pandas', n)) -ax.set_ylabel('N functions') -ax.set_xlabel('Function length') -ax.text(100, 300, '%.3f%% with > 50 lines' % ((n - nmore) / float(n)), - fontsize=18) -plt.show() diff --git a/scripts/gen_release_notes.py b/scripts/gen_release_notes.py deleted file mode 100644 index 7e4ffca59a0ab..0000000000000 --- a/scripts/gen_release_notes.py +++ /dev/null @@ -1,95 +0,0 @@ -from __future__ import print_function -import sys -import json -from pandas.io.common import urlopen -from datetime import datetime - - -class Milestone(object): - - def __init__(self, title, number): - self.title = title - self.number = number - - def __eq__(self, other): - if isinstance(other, Milestone): - return self.number == other.number - return False - - -class Issue(object): - - def __init__(self, title, labels, number, milestone, body, state): - self.title = title - self.labels = set([x['name'] for x in labels]) - self.number = number - self.milestone = milestone - self.body = body - self.closed = state == 'closed' - - def __eq__(self, other): - if isinstance(other, Issue): - return self.number == other.number - return False - - -def get_issues(): - all_issues = [] - page_number = 1 - while True: - iss = _get_page(page_number) - if len(iss) == 0: - break - page_number += 1 - all_issues.extend(iss) - return all_issues - - -def _get_page(page_number): - gh_url = ('https://api.github.com/repos/pandas-dev/pandas/issues?' - 'milestone=*&state=closed&assignee=*&page=%d') % page_number - with urlopen(gh_url) as resp: - rs = resp.readlines()[0] - jsondata = json.loads(rs) - issues = [Issue(x['title'], x['labels'], x['number'], - get_milestone(x['milestone']), x['body'], x['state']) - for x in jsondata] - return issues - - -def get_milestone(data): - if data is None: - return None - return Milestone(data['title'], data['number']) - - -def collate_label(issues, label): - lines = [] - for x in issues: - if label in x.labels: - lines.append('\t- %s(#%d)' % (x.title, x.number)) - - return '\n'.join(lines) - - -def release_notes(milestone): - issues = get_issues() - - headers = ['New Features', 'Improvements to existing features', - 'API Changes', 'Bug fixes'] - labels = ['New', 'Enhancement', 'API-Change', 'Bug'] - - rs = 'pandas %s' % milestone - rs += '\n' + ('=' * len(rs)) - rs += '\n\n **Release date:** %s' % datetime.today().strftime('%B %d, %Y') - for i, h in enumerate(headers): - rs += '\n\n**%s**\n\n' % h - l = labels[i] - rs += collate_label(issues, l) - - return rs - -if __name__ == '__main__': - - rs = release_notes(sys.argv[1]) - print(rs) diff --git a/scripts/git-mrb b/scripts/git-mrb deleted file mode 100644 index c15e6dbf9f51a..0000000000000 --- a/scripts/git-mrb +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -"""git-mrb: merge remote branch. - -git mrb [remote:branch OR remote-branch] [onto] [upstream] - -remote must be locally available, and branch must exist in that remote. - -If 'onto' branch isn't given, default is 'master'. - -If 'upstream' repository isn't given, default is 'origin'. - -You can separate the remote and branch spec with either a : or a -. - -Taken from IPython project -""" -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -from subprocess import check_call -import sys - -#----------------------------------------------------------------------------- -# Functions -#----------------------------------------------------------------------------- - -def sh(cmd): - cmd = cmd.format(**shvars) - print('$', cmd) - check_call(cmd, shell=True) - -#----------------------------------------------------------------------------- -# Main Script -#----------------------------------------------------------------------------- - -argv = sys.argv[1:] -narg = len(argv) - -try: - branch_spec = argv[0] - sep = ':' if ':' in branch_spec else '-' - remote, branch = branch_spec.split(':', 1) - if not branch: - raise ValueError('Branch spec %s invalid, branch not found' % - branch_spec) -except: - import traceback as tb - tb.print_exc() - print(__doc__) - sys.exit(1) - -onto = argv[1] if narg >= 2 else 'master' -upstream = argv[1] if narg == 3 else 'origin' - -# Git doesn't like ':' in branch names. -if sep == ':': - branch_spec = branch_spec.replace(':', '-') - -# Global used by sh -shvars = dict(remote=remote, branch_spec=branch_spec, branch=branch, - onto=onto, upstream=upstream) - -# Start git calls. -sh('git fetch {remote}') -sh('git checkout -b {branch_spec} {onto}') -sh('git merge {remote}/{branch}') - -print(""" -************************************************************* - Run test suite. If tests pass, run the following to merge: - -git checkout {onto} -git merge {branch_spec} -git push {upstream} {onto} - -************************************************************* -""".format(**shvars)) - -ans = raw_input("Revert to master and delete temporary branch? [Y/n]: ") -if ans.strip().lower() in ('', 'y', 'yes'): - sh('git checkout {onto}') - sh('git branch -D {branch_spec}') \ No newline at end of file diff --git a/scripts/git_code_churn.py b/scripts/git_code_churn.py deleted file mode 100644 index 18c9b244a6ba0..0000000000000 --- a/scripts/git_code_churn.py +++ /dev/null @@ -1,34 +0,0 @@ -import subprocess -import os -import re -import sys - -import numpy as np - -from pandas import * - - -if __name__ == '__main__': - from vbench.git import GitRepo - repo = GitRepo('/Users/wesm/code/pandas') - churn = repo.get_churn_by_file() - - file_include = [] - for path in churn.major_axis: - if path.endswith('.pyx') or path.endswith('.py'): - file_include.append(path) - commits_include = [sha for sha in churn.minor_axis - if 'LF' not in repo.messages[sha]] - commits_include.remove('dcf3490') - - clean_churn = churn.reindex(major=file_include, minor=commits_include) - - by_commit = clean_churn.sum('major').sum(1) - - by_date = by_commit.groupby(repo.commit_date).sum() - - by_date = by_date.drop([datetime(2011, 6, 10)]) - - # clean out days where I touched Cython - - by_date = by_date[by_date < 5000] diff --git a/scripts/groupby_sample.py b/scripts/groupby_sample.py deleted file mode 100644 index 42008858d3cad..0000000000000 --- a/scripts/groupby_sample.py +++ /dev/null @@ -1,54 +0,0 @@ -from pandas import * -import numpy as np -import string -import pandas.compat as compat - -g1 = np.array(list(string.letters))[:-1] -g2 = np.arange(510) -df_small = DataFrame({'group1': ["a", "b", "a", "a", "b", "c", "c", "c", "c", - "c", "a", "a", "a", "b", "b", "b", "b"], - 'group2': [1, 2, 3, 4, 1, 3, 5, 6, 5, 4, 1, 2, 3, 4, 3, 2, 1], - 'value': ["apple", "pear", "orange", "apple", - "banana", "durian", "lemon", "lime", - "raspberry", "durian", "peach", "nectarine", - "banana", "lemon", "guava", "blackberry", - "grape"]}) -value = df_small['value'].values.repeat(3) -df = DataFrame({'group1': g1.repeat(4000 * 5), - 'group2': np.tile(g2, 400 * 5), - 'value': value.repeat(4000 * 5)}) - - -def random_sample(): - grouped = df.groupby(['group1', 'group2'])['value'] - from random import choice - choose = lambda group: choice(group.index) - indices = grouped.apply(choose) - return df.reindex(indices) - - -def random_sample_v2(): - grouped = df.groupby(['group1', 'group2'])['value'] - from random import choice - choose = lambda group: choice(group.index) - indices = [choice(v) for k, v in compat.iteritems(grouped.groups)] - return df.reindex(indices) - - -def do_shuffle(arr): - from random import shuffle - result = arr.copy().values - shuffle(result) - return result - - -def shuffle_uri(df, grouped): - perm = np.r_[tuple([np.random.permutation( - idxs) for idxs in compat.itervalues(grouped.groups)])] - df['state_permuted'] = np.asarray(df.ix[perm]['value']) - -df2 = df.copy() -grouped = df2.groupby('group1') -shuffle_uri(df2, grouped) - -df2['state_perm'] = grouped['value'].transform(do_shuffle) diff --git a/scripts/groupby_speed.py b/scripts/groupby_speed.py deleted file mode 100644 index 3be9fac12418e..0000000000000 --- a/scripts/groupby_speed.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import print_function -from pandas import * - -rng = DatetimeIndex('1/3/2011', '11/30/2011', offset=offsets.Minute()) - -df = DataFrame(np.random.randn(len(rng), 5), index=rng, - columns=list('OHLCV')) - -rng5 = DatetimeIndex('1/3/2011', '11/30/2011', offset=offsets.Minute(5)) -gp = rng5.asof -grouped = df.groupby(gp) - - -def get1(dt): - k = gp(dt) - return grouped.get_group(k) - - -def get2(dt): - k = gp(dt) - return df.ix[grouped.groups[k]] - - -def f(): - for i, date in enumerate(df.index): - if i % 10000 == 0: - print(i) - get1(date) - - -def g(): - for i, date in enumerate(df.index): - if i % 10000 == 0: - print(i) - get2(date) diff --git a/scripts/groupby_test.py b/scripts/groupby_test.py deleted file mode 100644 index f640a6ed79503..0000000000000 --- a/scripts/groupby_test.py +++ /dev/null @@ -1,145 +0,0 @@ -from collections import defaultdict - -from numpy import nan -import numpy as np - -from pandas import * - -import pandas._libs.lib as tseries -import pandas.core.groupby as gp -import pandas.util.testing as tm -from pandas.compat import range -reload(gp) - -""" - -k = 1000 -values = np.random.randn(8 * k) -key1 = np.array(['foo', 'bar', 'baz', 'bar', 'foo', 'baz', 'bar', 'baz'] * k, - dtype=object) -key2 = np.array(['b', 'b', 'b', 'b', 'a', 'a', 'a', 'a' ] * k, - dtype=object) -shape, labels, idicts = gp.labelize(key1, key2) - -print(tseries.group_labels(key1)) - -# print(shape) -# print(labels) -# print(idicts) - -result = tseries.group_aggregate(values, labels, shape) - -print(tseries.groupby_indices(key2)) - -df = DataFrame({'key1' : key1, - 'key2' : key2, - 'v1' : values, - 'v2' : values}) -k1 = df['key1'] -k2 = df['key2'] - -# del df['key1'] -# del df['key2'] - -# r2 = gp.multi_groupby(df, np.sum, k1, k2) - -# print(result) - -gen = gp.generate_groups(df['v1'], labels, shape, axis=1, - factory=DataFrame) - -res = defaultdict(dict) -for a, gen1 in gen: - for b, group in gen1: - print(a, b) - print(group) - # res[b][a] = group['values'].sum() - res[b][a] = group.sum() - -res = DataFrame(res) - -grouped = df.groupby(['key1', 'key2']) -""" - -# data = {'A' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan], -# 'B' : ['A', 'B'] * 6, -# 'C' : np.random.randn(12)} -# df = DataFrame(data) -# df['C'][2:10:2] = nan - -# single column -# grouped = df.drop(['B'], axis=1).groupby('A') -# exp = {} -# for cat, group in grouped: -# exp[cat] = group['C'].sum() -# exp = DataFrame({'C' : exp}) -# result = grouped.sum() - -# grouped = df.groupby(['A', 'B']) -# expd = {} -# for cat1, cat2, group in grouped: -# expd.setdefault(cat1, {})[cat2] = group['C'].sum() -# exp = DataFrame(expd).T.stack() -# result = grouped.sum()['C'] - -# print('wanted') -# print(exp) -# print('got') -# print(result) - -# tm.N = 10000 - -# mapping = {'A': 0, 'C': 1, 'B': 0, 'D': 1} -# tf = lambda x: x - x.mean() - -# df = tm.makeTimeDataFrame() -# ts = df['A'] - -# # grouped = df.groupby(lambda x: x.strftime('%m/%y')) -# grouped = df.groupby(mapping, axis=1) -# groupedT = df.T.groupby(mapping, axis=0) - -# r1 = groupedT.transform(tf).T -# r2 = grouped.transform(tf) - -# fillit = lambda x: x.fillna(method='pad') - -# f = lambda x: x - -# transformed = df.groupby(lambda x: x.strftime('%m/%y')).transform(lambda -# x: x) - -# def ohlc(group): -# return Series([group[0], group.max(), group.min(), group[-1]], -# index=['open', 'high', 'low', 'close']) -# grouper = [lambda x: x.year, lambda x: x.month] -# dr = DateRange('1/1/2000', '1/1/2002') -# ts = Series(np.random.randn(len(dr)), index=dr) - -# import string - -# k = 20 -# n = 1000 - -# keys = list(string.letters[:k]) - -# df = DataFrame({'A' : np.tile(keys, n), -# 'B' : np.repeat(keys[:k/2], n * 2), -# 'C' : np.random.randn(k * n)}) - -# def f(): -# for x in df.groupby(['A', 'B']): -# pass - -a = np.arange(100).repeat(100) -b = np.tile(np.arange(100), 100) -index = MultiIndex.from_arrays([a, b]) -s = Series(np.random.randn(len(index)), index) -df = DataFrame({'A': s}) -df['B'] = df.index.get_level_values(0) -df['C'] = df.index.get_level_values(1) - - -def f(): - for x in df.groupby(['B', 'B']): - pass diff --git a/scripts/hdfstore_panel_perf.py b/scripts/hdfstore_panel_perf.py deleted file mode 100644 index c66e9506fc4c5..0000000000000 --- a/scripts/hdfstore_panel_perf.py +++ /dev/null @@ -1,17 +0,0 @@ -from pandas import * -from pandas.util.testing import rands -from pandas.compat import range - -i, j, k = 7, 771, 5532 - -panel = Panel(np.random.randn(i, j, k), - items=[rands(10) for _ in range(i)], - major_axis=DatetimeIndex('1/1/2000', periods=j, - offset=offsets.Minute()), - minor_axis=[rands(10) for _ in range(k)]) - - -store = HDFStore('test.h5') -store.put('test_panel', panel, table=True) - -retrieved = store['test_panel'] diff --git a/scripts/json_manip.py b/scripts/json_manip.py deleted file mode 100644 index 7ff4547825568..0000000000000 --- a/scripts/json_manip.py +++ /dev/null @@ -1,423 +0,0 @@ -""" - -Tasks -------- - -Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers. - -Example -~~~~~~~~~~~~~ - - *give me a list of all the fields called 'id' in this stupid, gnarly - thing* - - >>> Q('id',gnarly_data) - ['id1','id2','id3'] - - -Observations: ---------------------- - -1) 'simple data structures' exist and are common. They are tedious - to search. - -2) The DOM is another nested / treeish structure, and jQuery selector is - a good tool for that. - -3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These - analyses are valuable and worth doing. - -3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty - things, and those analyses are also worth doing! - -3c) Some analyses are best done using 'one-off' and custom code in C, Python, - or another 'real' programming language. - -4) Arbitrary transforms are tedious and error prone. SQL is one solution, - XSLT is another, - -5) the XPATH/XML/XSLT family is.... not universally loved :) They are - very complete, and the completeness can make simple cases... gross. - -6) For really complicated data structures, we can write one-off code. Getting - 80% of the way is mostly okay. There will always have to be programmers - in the loop. - -7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT - and the like. Be wary of mission creep! Re-use when possible (e.g., can - we put the thing into a DOM using - -8) If the interface is good, people can improve performance later. - - -Simplifying ---------------- - - -1) Assuming 'jsonable' structures - -2) keys are strings or stringlike. Python allows any hashable to be a key. - for now, we pretend that doesn't happen. - -3) assumes most dicts are 'well behaved'. DAG, no cycles! - -4) assume that if people want really specialized transforms, they can do it - themselves. - -""" -from __future__ import print_function - -from collections import namedtuple -import csv -import itertools -from itertools import product -from operator import attrgetter as aget, itemgetter as iget -import operator -import sys -from pandas.compat import map, u, callable, Counter -import pandas.compat as compat - - -## note 'url' appears multiple places and not all extensions have same struct -ex1 = { - 'name': 'Gregg', - 'extensions': [ - {'id':'hello', - 'url':'url1'}, - {'id':'gbye', - 'url':'url2', - 'more': dict(url='url3')}, - ] -} - -## much longer example -ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'), - u('value'): 7}, - {u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False}, - {u('name'): u('accessibility.browsewithcaret'), u('value'): False}, - {u('name'): u('accessibility.win32.force_disabled'), u('value'): False}, - {u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False}, - {u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')}, - {u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000}, - {u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True}, - {u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False}, - {u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1}, - {u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True}, - {u('name'): u('accessibility.blockautorefresh'), u('value'): False}, - {u('name'): u('accessibility.browsewithcaret_shortcut.enabled'), - u('value'): True}, - {u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True}, - {u('name'): u('accessibility.typeaheadfind.prefillwithselection'), - u('value'): True}, - {u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')}, - {u('name'): u('accessibility.typeaheadfind'), u('value'): False}, - {u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0}, - {u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True}, - {u('name'): u('accessibility.usetexttospeech'), u('value'): u('')}, - {u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True}, - {u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False}, - {u('name'): u('isInstantiated'), u('value'): True}], - u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'), - u('isEnabled'): True}, - {u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False}, - {u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True}, - {u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True}, - {u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}], - u('fxVersion'): u('9.0'), - u('location'): u('zh-CN'), - u('operatingSystem'): u('WINNT Windows NT 5.1'), - u('surveyAnswers'): u(''), - u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'), - u('tpVersion'): u('1.2'), - u('updateChannel'): u('beta')}, - u('survey_data'): { - u('extensions'): [{u('appDisabled'): False, - u('id'): u('testpilot?labs.mozilla.com'), - u('isCompatible'): True, - u('isEnabled'): True, - u('isPlatformCompatible'): True, - u('name'): u('Test Pilot')}, - {u('appDisabled'): True, - u('id'): u('dict?www.youdao.com'), - u('isCompatible'): False, - u('isEnabled'): False, - u('isPlatformCompatible'): True, - u('name'): u('Youdao Word Capturer')}, - {u('appDisabled'): False, - u('id'): u('jqs?sun.com'), - u('isCompatible'): True, - u('isEnabled'): True, - u('isPlatformCompatible'): True, - u('name'): u('Java Quick Starter')}, - {u('appDisabled'): False, - u('id'): u('?20a82645-c095-46ed-80e3-08825760534b?'), - u('isCompatible'): True, - u('isEnabled'): True, - u('isPlatformCompatible'): True, - u('name'): u('Microsoft .NET Framework Assistant')}, - {u('appDisabled'): False, - u('id'): u('?a0d7ccb3-214d-498b-b4aa-0e8fda9a7bf7?'), - u('isCompatible'): True, - u('isEnabled'): True, - u('isPlatformCompatible'): True, - u('name'): u('WOT')}], - u('version_number'): 1}} - -# class SurveyResult(object): - -# def __init__(self, record): -# self.record = record -# self.metadata, self.survey_data = self._flatten_results() - -# def _flatten_results(self): -# survey_data = self.record['survey_data'] -# extensions = DataFrame(survey_data['extensions']) - -def denorm(queries,iterable_of_things,default=None): - """ - 'repeat', or 'stutter' to 'tableize' for downstream. - (I have no idea what a good word for this is!) - - Think ``kronecker`` products, or: - - ``SELECT single,multiple FROM table;`` - - single multiple - ------- --------- - id1 val1 - id1 val2 - - - Args: - - queries: iterable of ``Q`` queries. - iterable_of_things: to be queried. - - Returns: - - list of 'stuttered' output, where if a query returns - a 'single', it gets repeated appropriately. - - - """ - - def _denorm(queries,thing): - fields = [] - results = [] - for q in queries: - #print(q) - r = Ql(q,thing) - #print("-- result: ", r) - if not r: - r = [default] - if isinstance(r[0], type({})): - fields.append(sorted(r[0].keys())) # dicty answers - else: - fields.append([q]) # stringy answer - - results.append(r) - - #print(results) - #print(fields) - flist = list(flatten(*map(iter,fields))) - - prod = itertools.product(*results) - for p in prod: - U = dict() - for (ii,thing) in enumerate(p): - #print(ii,thing) - if isinstance(thing, type({})): - U.update(thing) - else: - U[fields[ii][0]] = thing - - yield U - - return list(flatten(*[_denorm(queries,thing) for thing in iterable_of_things])) - - -def default_iget(fields,default=None,): - """ itemgetter with 'default' handling, that *always* returns lists - - API CHANGES from ``operator.itemgetter`` - - Note: Sorry to break the iget api... (fields vs *fields) - Note: *always* returns a list... unlike itemgetter, - which can return tuples or 'singles' - """ - myiget = operator.itemgetter(*fields) - L = len(fields) - def f(thing): - try: - ans = list(myiget(thing)) - if L < 2: - ans = [ans,] - return ans - except KeyError: - # slower! - return [thing.get(x,default) for x in fields] - - f.__doc__ = "itemgetter with default %r for fields %r" %(default,fields) - f.__name__ = "default_itemgetter" - return f - - -def flatten(*stack): - """ - helper function for flattening iterables of generators in a - sensible way. - """ - stack = list(stack) - while stack: - try: x = next(stack[0]) - except StopIteration: - stack.pop(0) - continue - if hasattr(x,'next') and callable(getattr(x,'next')): - stack.insert(0, x) - - #if isinstance(x, (GeneratorType,listerator)): - else: yield x - - -def _Q(filter_, thing): - """ underlying machinery for Q function recursion """ - T = type(thing) - if isinstance({}, T): - for k,v in compat.iteritems(thing): - #print(k,v) - if filter_ == k: - if isinstance(v, type([])): - yield iter(v) - else: - yield v - - if type(v) in (type({}),type([])): - yield Q(filter_,v) - - elif isinstance([], T): - for k in thing: - #print(k) - yield Q(filter_,k) - - else: - # no recursion. - pass - -def Q(filter_,thing): - """ - type(filter): - - list: a flattened list of all searches (one list) - - dict: dict with vals each of which is that search - - Notes: - - [1] 'parent thing', with space, will do a descendent - [2] this will come back 'flattened' jQuery style - [3] returns a generator. Use ``Ql`` if you want a list. - - """ - if isinstance(filter_, type([])): - return flatten(*[_Q(x,thing) for x in filter_]) - elif isinstance(filter_, type({})): - d = dict.fromkeys(list(filter_.keys())) - #print(d) - for k in d: - #print(flatten(Q(k,thing))) - d[k] = Q(k,thing) - - return d - - else: - if " " in filter_: # i.e. "antecendent post" - parts = filter_.strip().split() - r = None - for p in parts: - r = Ql(p,thing) - thing = r - - return r - - else: # simple. - return flatten(_Q(filter_,thing)) - -def Ql(filter_,thing): - """ same as Q, but returns a list, not a generator """ - res = Q(filter_,thing) - - if isinstance(filter_, type({})): - for k in res: - res[k] = list(res[k]) - return res - - else: - return list(res) - - - -def countit(fields,iter_of_iter,default=None): - """ - note: robust to fields not being in i_of_i, using ``default`` - """ - C = Counter() # needs hashables - T = namedtuple("Thing",fields) - get = default_iget(*fields,default=default) - return Counter( - (T(*get(thing)) for thing in iter_of_iter) - ) - - -## right now this works for one row... -def printout(queries,things,default=None, f=sys.stdout, **kwargs): - """ will print header and objects - - **kwargs go to csv.DictWriter - - help(csv.DictWriter) for more. - """ - - results = denorm(queries,things,default=None) - fields = set(itertools.chain(*(x.keys() for x in results))) - - W = csv.DictWriter(f=f,fieldnames=fields,**kwargs) - #print("---prod---") - #print(list(prod)) - W.writeheader() - for r in results: - W.writerow(r) - - -def test_run(): - print("\n>>> print(list(Q('url',ex1)))") - print(list(Q('url',ex1))) - assert list(Q('url',ex1)) == ['url1','url2','url3'] - assert Ql('url',ex1) == ['url1','url2','url3'] - - print("\n>>> print(list(Q(['name','id'],ex1)))") - print(list(Q(['name','id'],ex1))) - assert Ql(['name','id'],ex1) == ['Gregg','hello','gbye'] - - - print("\n>>> print(Ql('more url',ex1))") - print(Ql('more url',ex1)) - - - print("\n>>> list(Q('extensions',ex1))") - print(list(Q('extensions',ex1))) - - print("\n>>> print(Ql('extensions',ex1))") - print(Ql('extensions',ex1)) - - print("\n>>> printout(['name','extensions'],[ex1,], extrasaction='ignore')") - printout(['name','extensions'],[ex1,], extrasaction='ignore') - - print("\n\n") - - from pprint import pprint as pp - - print("-- note that the extension fields are also flattened! (and N/A) -- ") - pp(denorm(['location','fxVersion','notthere','survey_data extensions'],[ex2,], default="N/A")[:2]) - - -if __name__ == "__main__": - pass diff --git a/scripts/leak.py b/scripts/leak.py deleted file mode 100644 index 47f74bf020597..0000000000000 --- a/scripts/leak.py +++ /dev/null @@ -1,13 +0,0 @@ -from pandas import * -from pandas.compat import range -import numpy as np -import pandas.util.testing as tm -import os -import psutil - -pid = os.getpid() -proc = psutil.Process(pid) - -df = DataFrame(index=np.arange(100)) -for i in range(5000): - df[i] = 5 diff --git a/scripts/parser_magic.py b/scripts/parser_magic.py deleted file mode 100644 index 72fef39d8db65..0000000000000 --- a/scripts/parser_magic.py +++ /dev/null @@ -1,74 +0,0 @@ -from pandas.util.testing import set_trace -import pandas.util.testing as tm -import pandas.compat as compat - -from pandas import * -import ast -import inspect -import sys - - -def merge(a, b): - f, args, _ = parse_stmt(inspect.currentframe().f_back) - return DataFrame({args[0]: a, - args[1]: b}) - - -def parse_stmt(frame): - info = inspect.getframeinfo(frame) - call = info[-2][0] - mod = ast.parse(call) - body = mod.body[0] - if isinstance(body, (ast.Assign, ast.Expr)): - call = body.value - elif isinstance(body, ast.Call): - call = body - return _parse_call(call) - - -def _parse_call(call): - func = _maybe_format_attribute(call.func) - - str_args = [] - for arg in call.args: - if isinstance(arg, ast.Name): - str_args.append(arg.id) - elif isinstance(arg, ast.Call): - formatted = _format_call(arg) - str_args.append(formatted) - - return func, str_args, {} - - -def _format_call(call): - func, args, kwds = _parse_call(call) - content = '' - if args: - content += ', '.join(args) - if kwds: - fmt_kwds = ['%s=%s' % item for item in compat.iteritems(kwds)] - joined_kwds = ', '.join(fmt_kwds) - if args: - content = content + ', ' + joined_kwds - else: - content += joined_kwds - return '%s(%s)' % (func, content) - - -def _maybe_format_attribute(name): - if isinstance(name, ast.Attribute): - return _format_attribute(name) - return name.id - - -def _format_attribute(attr): - obj = attr.value - if isinstance(attr.value, ast.Attribute): - obj = _format_attribute(attr.value) - else: - obj = obj.id - return '.'.join((obj, attr.attr)) - -a = tm.makeTimeSeries() -b = tm.makeTimeSeries() -df = merge(a, b) diff --git a/scripts/preepoch_test.py b/scripts/preepoch_test.py deleted file mode 100644 index 36a3d768e671f..0000000000000 --- a/scripts/preepoch_test.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy as np -from pandas import * - - -def panda_test(): - - # generate some data - data = np.random.rand(50, 5) - # generate some dates - dates = DatetimeIndex('1/1/1969', periods=50) - # generate column headings - cols = ['A', 'B', 'C', 'D', 'E'] - - df = DataFrame(data, index=dates, columns=cols) - - # save to HDF5Store - store = HDFStore('bugzilla.h5', mode='w') - store['df'] = df # This gives: OverflowError: mktime argument out of range - store.close() - - -if __name__ == '__main__': - panda_test() diff --git a/scripts/pypistats.py b/scripts/pypistats.py deleted file mode 100644 index 41343f6d30c76..0000000000000 --- a/scripts/pypistats.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -Calculates the total number of downloads that a particular PyPI package has -received across all versions tracked by PyPI -""" - -from datetime import datetime -import locale -import sys -import xmlrpclib -import pandas as pd - -locale.setlocale(locale.LC_ALL, '') - - -class PyPIDownloadAggregator(object): - - def __init__(self, package_name, include_hidden=True): - self.package_name = package_name - self.include_hidden = include_hidden - self.proxy = xmlrpclib.Server('http://pypi.python.org/pypi') - self._downloads = {} - - @property - def releases(self): - """Retrieves the release number for each uploaded release""" - - result = self.proxy.package_releases(self.package_name, - self.include_hidden) - - if len(result) == 0: - # no matching package--search for possibles, and limit to 15 - # results - results = self.proxy.search({ - 'name': self.package_name, - 'description': self.package_name - }, 'or')[:15] - - # make sure we only get unique package names - matches = [] - for match in results: - name = match['name'] - if name not in matches: - matches.append(name) - - # if only one package was found, return it - if len(matches) == 1: - self.package_name = matches[0] - return self.releases - - error = """No such package found: %s - -Possible matches include: -%s -""" % (self.package_name, '\n'.join('\t- %s' % n for n in matches)) - - sys.exit(error) - - return result - - def get_downloads(self): - """Calculate the total number of downloads for the package""" - downloads = {} - for release in self.releases: - urls = self.proxy.release_urls(self.package_name, release) - urls = pd.DataFrame(urls) - urls['version'] = release - downloads[release] = urls - - return pd.concat(downloads, ignore_index=True) - -if __name__ == '__main__': - agg = PyPIDownloadAggregator('pandas') - - data = agg.get_downloads() - - to_omit = ['0.2b1', '0.2beta'] - - isostrings = data['upload_time'].map(lambda x: x.value) - data['upload_time'] = pd.to_datetime(isostrings) - - totals = data.groupby('version').downloads.sum() - rollup = {'0.8.0rc1': '0.8.0', - '0.8.0rc2': '0.8.0', - '0.3.0.beta': '0.3.0', - '0.3.0.beta2': '0.3.0'} - downloads = totals.groupby(lambda x: rollup.get(x, x)).sum() - - first_upload = data.groupby('version').upload_time.min() - - result = pd.DataFrame({'downloads': totals, - 'release_date': first_upload}) - result = result.sort('release_date') - result = result.drop(to_omit + list(rollup.keys())) - result.index.name = 'release' - - by_date = result.reset_index().set_index('release_date').downloads - dummy = pd.Series(index=pd.DatetimeIndex([datetime(2012, 12, 27)])) - by_date = by_date.append(dummy).shift(1).fillna(0) diff --git a/scripts/roll_median_leak.py b/scripts/roll_median_leak.py deleted file mode 100644 index 03f39e2b18372..0000000000000 --- a/scripts/roll_median_leak.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import print_function -from pandas import * - -import numpy as np -import os - -from vbench.api import Benchmark -from pandas.util.testing import rands -from pandas.compat import range -import pandas._libs.lib as lib -import pandas._sandbox as sbx -import time - -import psutil - -pid = os.getpid() -proc = psutil.Process(pid) - -lst = SparseList() -lst.append([5] * 10000) -lst.append(np.repeat(np.nan, 1000000)) - -for _ in range(10000): - print(proc.get_memory_info()) - sdf = SparseDataFrame({'A': lst.to_array()}) - chunk = sdf[sdf['A'] == 5] diff --git a/scripts/runtests.py b/scripts/runtests.py deleted file mode 100644 index e14752b43116b..0000000000000 --- a/scripts/runtests.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import print_function -import os -print(os.getpid()) -import nose -nose.main('pandas.core') diff --git a/scripts/test_py27.bat b/scripts/test_py27.bat deleted file mode 100644 index 11e3056287e31..0000000000000 --- a/scripts/test_py27.bat +++ /dev/null @@ -1,6 +0,0 @@ -SET PATH=C:\MinGW\bin;C:\Python27;C:\Python27\Scripts;%PATH% - -python setup.py clean -python setup.py build_ext -c mingw32 --inplace - -nosetests pandas \ No newline at end of file diff --git a/scripts/testmed.py b/scripts/testmed.py deleted file mode 100644 index dd3b952d58c60..0000000000000 --- a/scripts/testmed.py +++ /dev/null @@ -1,171 +0,0 @@ -## {{{ Recipe 576930 (r10): Efficient Running Median using an Indexable Skiplist - -from random import random -from math import log, ceil -from pandas.compat import range -from numpy.random import randn -from pandas.lib.skiplist import rolling_median - - -class Node(object): - __slots__ = 'value', 'next', 'width' - - def __init__(self, value, next, width): - self.value, self.next, self.width = value, next, width - - -class End(object): - 'Sentinel object that always compares greater than another object' - def __cmp__(self, other): - return 1 - -NIL = Node(End(), [], []) # Singleton terminator node - - -class IndexableSkiplist: - 'Sorted collection supporting O(lg n) insertion, removal, and lookup by rank.' - - def __init__(self, expected_size=100): - self.size = 0 - self.maxlevels = int(1 + log(expected_size, 2)) - self.head = Node('HEAD', [NIL] * self.maxlevels, [1] * self.maxlevels) - - def __len__(self): - return self.size - - def __getitem__(self, i): - node = self.head - i += 1 - for level in reversed(range(self.maxlevels)): - while node.width[level] <= i: - i -= node.width[level] - node = node.next[level] - return node.value - - def insert(self, value): - # find first node on each level where node.next[levels].value > value - chain = [None] * self.maxlevels - steps_at_level = [0] * self.maxlevels - node = self.head - for level in reversed(range(self.maxlevels)): - while node.next[level].value <= value: - steps_at_level[level] += node.width[level] - node = node.next[level] - chain[level] = node - - # insert a link to the newnode at each level - d = min(self.maxlevels, 1 - int(log(random(), 2.0))) - newnode = Node(value, [None] * d, [None] * d) - steps = 0 - for level in range(d): - prevnode = chain[level] - newnode.next[level] = prevnode.next[level] - prevnode.next[level] = newnode - newnode.width[level] = prevnode.width[level] - steps - prevnode.width[level] = steps + 1 - steps += steps_at_level[level] - for level in range(d, self.maxlevels): - chain[level].width[level] += 1 - self.size += 1 - - def remove(self, value): - # find first node on each level where node.next[levels].value >= value - chain = [None] * self.maxlevels - node = self.head - for level in reversed(range(self.maxlevels)): - while node.next[level].value < value: - node = node.next[level] - chain[level] = node - if value != chain[0].next[0].value: - raise KeyError('Not Found') - - # remove one link at each level - d = len(chain[0].next[0].next) - for level in range(d): - prevnode = chain[level] - prevnode.width[level] += prevnode.next[level].width[level] - 1 - prevnode.next[level] = prevnode.next[level].next[level] - for level in range(d, self.maxlevels): - chain[level].width[level] -= 1 - self.size -= 1 - - def __iter__(self): - 'Iterate over values in sorted order' - node = self.head.next[0] - while node is not NIL: - yield node.value - node = node.next[0] - -from collections import deque -from itertools import islice - - -class RunningMedian: - 'Fast running median with O(lg n) updates where n is the window size' - - def __init__(self, n, iterable): - from pandas.lib.skiplist import IndexableSkiplist as skiplist - - self.it = iter(iterable) - self.queue = deque(islice(self.it, n)) - self.skiplist = IndexableSkiplist(n) - for elem in self.queue: - self.skiplist.insert(elem) - - def __iter__(self): - queue = self.queue - skiplist = self.skiplist - midpoint = len(queue) // 2 - yield skiplist[midpoint] - for newelem in self.it: - oldelem = queue.popleft() - skiplist.remove(oldelem) - queue.append(newelem) - skiplist.insert(newelem) - yield skiplist[midpoint] - -N = 100000 -K = 10000 - -import time - - -def test(): - from numpy.random import randn - - arr = randn(N) - - def _test(arr, k): - meds = RunningMedian(k, arr) - return list(meds) - - _test(arr, K) - - - -def test2(): - - arr = randn(N) - - return rolling_median(arr, K) - - -def runmany(f, arr, arglist): - timings = [] - - for arg in arglist: - tot = 0 - for i in range(5): - tot += _time(f, arr, arg) - timings.append(tot / 5) - - return timings - - -def _time(f, *args): - _start = time.clock() - result = f(*args) - return time.clock() - _start - -if __name__ == '__main__': - test2() diff --git a/scripts/touchup_gh_issues.py b/scripts/touchup_gh_issues.py deleted file mode 100755 index 8aa6d426156f0..0000000000000 --- a/scripts/touchup_gh_issues.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import print_function -from collections import OrderedDict -import sys -import re - -""" -Reads in stdin, replace all occurences of '#num' or 'GH #num' with -links to github issue. dumps the issue anchors before the next -section header -""" - -pat = "((?:\s*GH\s*)?)#(\d{3,4})([^_]|$)?" -rep_pat = r"\1GH\2_\3" -anchor_pat = ".. _GH{id}: https://github.com/pandas-dev/pandas/issues/{id}" -section_pat = "^pandas\s[\d\.]+\s*$" - - -def main(): - issues = OrderedDict() - while True: - - line = sys.stdin.readline() - if not line: - break - - if re.search(section_pat, line): - for id in issues: - print(anchor_pat.format(id=id).rstrip()) - if issues: - print("\n") - issues = OrderedDict() - - for m in re.finditer(pat, line): - id = m.group(2) - if id not in issues: - issues[id] = True - print(re.sub(pat, rep_pat, line).rstrip()) - pass - -if __name__ == "__main__": - main() diff --git a/scripts/use_build_cache.py b/scripts/use_build_cache.py deleted file mode 100755 index f8c2df2a8a45d..0000000000000 --- a/scripts/use_build_cache.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import os - -""" -This script should be run from the repo root dir, it rewrites setup.py -to use the build cache directory specified in the envar BUILD_CACHE_DIR -or in a file named .build_cache_dir in the repo root directory. - -Artifacts included in the cache: -- gcc artifacts -- The .c files resulting from cythonizing pyx/d files -- 2to3 refactoring results (when run under python3) - -Tested on releases back to 0.7.0. - -""" - -try: - import argparse - argparser = argparse.ArgumentParser(description=""" - 'Program description. - """.strip()) - - argparser.add_argument('-f', '--force-overwrite', - default=False, - help='Setting this will overwrite any existing cache results for the current commit', - action='store_true') - argparser.add_argument('-d', '--debug', - default=False, - help='Report cache hits/misses', - action='store_true') - - args = argparser.parse_args() -except: - class Foo(object): - debug=False - force_overwrite=False - - args = Foo() # for 2.6, no argparse - -#print(args.accumulate(args.integers)) - -shim=""" -import os -import sys -import shutil -import warnings -import re -""" - -shim += ("BC_FORCE_OVERWRITE = %s\n" % args.force_overwrite) -shim += ("BC_DEBUG = %s\n" % args.debug) - -shim += """ -try: - if not ("develop" in sys.argv) and not ("install" in sys.argv): - 1/0 - basedir = os.path.dirname(__file__) - dotfile = os.path.join(basedir,".build_cache_dir") - BUILD_CACHE_DIR = "" - if os.path.exists(dotfile): - BUILD_CACHE_DIR = open(dotfile).readline().strip() - BUILD_CACHE_DIR = os.environ.get('BUILD_CACHE_DIR',BUILD_CACHE_DIR) - - if os.path.isdir(BUILD_CACHE_DIR): - print("--------------------------------------------------------") - print("BUILD CACHE ACTIVATED (V2). be careful, this is experimental.") - print("BUILD_CACHE_DIR: " + BUILD_CACHE_DIR ) - print("--------------------------------------------------------") - else: - BUILD_CACHE_DIR = None - - # retrieve 2to3 artifacts - if sys.version_info[0] >= 3: - from lib2to3 import refactor - from hashlib import sha1 - import shutil - import multiprocessing - pyver = "%d.%d" % (sys.version_info[:2]) - fileq = ["pandas"] - to_process = dict() - - # retrieve the hashes existing in the cache - orig_hashes=dict() - post_hashes=dict() - for path,dirs,files in os.walk(os.path.join(BUILD_CACHE_DIR,'pandas')): - for f in files: - s=f.split(".py-")[-1] - try: - prev_h,post_h,ver = s.split('-') - if ver == pyver: - orig_hashes[prev_h] = os.path.join(path,f) - post_hashes[post_h] = os.path.join(path,f) - except: - pass - - while fileq: - f = fileq.pop() - - if os.path.isdir(f): - fileq.extend([os.path.join(f,x) for x in os.listdir(f)]) - else: - if not f.endswith(".py"): - continue - else: - try: - h = sha1(open(f,"rb").read()).hexdigest() - except IOError: - to_process[h] = f - else: - if h in orig_hashes and not BC_FORCE_OVERWRITE: - src = orig_hashes[h] - if BC_DEBUG: - print("2to3 cache hit %s,%s" % (f,h)) - shutil.copyfile(src,f) - elif h not in post_hashes: - # we're not in a dev dir with already processed files - if BC_DEBUG: - print("2to3 cache miss (will process) %s,%s" % (f,h)) - to_process[h] = f - - avail_fixes = set(refactor.get_fixers_from_package("lib2to3.fixes")) - avail_fixes.discard('lib2to3.fixes.fix_next') - t=refactor.RefactoringTool(avail_fixes) - if to_process: - print("Starting 2to3 refactoring...") - for orig_h,f in to_process.items(): - if BC_DEBUG: - print("2to3 on %s" % f) - try: - t.refactor([f],True) - post_h = sha1(open(f, "rb").read()).hexdigest() - cached_fname = f + '-' + orig_h + '-' + post_h + '-' + pyver - path = os.path.join(BUILD_CACHE_DIR, cached_fname) - pathdir =os.path.dirname(path) - if BC_DEBUG: - print("cache put %s in %s" % (f, path)) - try: - os.makedirs(pathdir) - except OSError as exc: - import errno - if exc.errno == errno.EEXIST and os.path.isdir(pathdir): - pass - else: - raise - - shutil.copyfile(f, path) - - except Exception as e: - print("While processing %s 2to3 raised: %s" % (f,str(e))) - - pass - print("2to3 done refactoring.") - -except Exception as e: - if not isinstance(e,ZeroDivisionError): - print( "Exception: " + str(e)) - BUILD_CACHE_DIR = None - -class CompilationCacheMixin(object): - def __init__(self, *args, **kwds): - cache_dir = kwds.pop("cache_dir", BUILD_CACHE_DIR) - self.cache_dir = cache_dir - if not os.path.isdir(cache_dir): - raise Exception("Error: path to Cache directory (%s) is not a dir" % cache_dir) - - def _copy_from_cache(self, hash, target): - src = os.path.join(self.cache_dir, hash) - if os.path.exists(src) and not BC_FORCE_OVERWRITE: - if BC_DEBUG: - print("Cache HIT: asked to copy file %s in %s" % - (src,os.path.abspath(target))) - s = "." - for d in target.split(os.path.sep)[:-1]: - s = os.path.join(s, d) - if not os.path.exists(s): - os.mkdir(s) - shutil.copyfile(src, target) - - return True - - return False - - def _put_to_cache(self, hash, src): - target = os.path.join(self.cache_dir, hash) - if BC_DEBUG: - print( "Cache miss: asked to copy file from %s to %s" % (src,target)) - s = "." - for d in target.split(os.path.sep)[:-1]: - s = os.path.join(s, d) - if not os.path.exists(s): - os.mkdir(s) - shutil.copyfile(src, target) - - def _hash_obj(self, obj): - try: - return hash(obj) - except: - raise NotImplementedError("You must override this method") - -class CompilationCacheExtMixin(CompilationCacheMixin): - def _hash_file(self, fname): - from hashlib import sha1 - f= None - try: - hash = sha1() - hash.update(self.build_lib.encode('utf-8')) - try: - if sys.version_info[0] >= 3: - import io - f = io.open(fname, "rb") - else: - f = open(fname) - - first_line = f.readline() - # ignore cython generation timestamp header - if "Generated by Cython" not in first_line.decode('utf-8'): - hash.update(first_line) - hash.update(f.read()) - return hash.hexdigest() - - except: - raise - return None - finally: - if f: - f.close() - - except IOError: - return None - - def _hash_obj(self, ext): - from hashlib import sha1 - - sources = ext.sources - if (sources is None or - (not hasattr(sources, '__iter__')) or - isinstance(sources, str) or - sys.version[0] == 2 and isinstance(sources, unicode)): # argh - return False - - sources = list(sources) + ext.depends - hash = sha1() - try: - for fname in sources: - fhash = self._hash_file(fname) - if fhash: - hash.update(fhash.encode('utf-8')) - except: - return None - - return hash.hexdigest() - - -class CachingBuildExt(build_ext, CompilationCacheExtMixin): - def __init__(self, *args, **kwds): - CompilationCacheExtMixin.__init__(self, *args, **kwds) - kwds.pop("cache_dir", None) - build_ext.__init__(self, *args, **kwds) - - def build_extension(self, ext, *args, **kwds): - ext_path = self.get_ext_fullpath(ext.name) - build_path = os.path.join(self.build_lib, os.path.basename(ext_path)) - - hash = self._hash_obj(ext) - if hash and self._copy_from_cache(hash, ext_path): - return - - build_ext.build_extension(self, ext, *args, **kwds) - - hash = self._hash_obj(ext) - if os.path.exists(build_path): - self._put_to_cache(hash, build_path) # build_ext - if os.path.exists(ext_path): - self._put_to_cache(hash, ext_path) # develop - - def cython_sources(self, sources, extension): - import re - cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ - (extension.language and extension.language.lower() == 'c++') - target_ext = '.c' - if cplus: - target_ext = '.cpp' - - for i, s in enumerate(sources): - if not re.search("\.(pyx|pxi|pxd)$", s): - continue - ext_dir = os.path.dirname(s) - ext_basename = re.sub("\.[^\.]+$", "", os.path.basename(s)) - ext_basename += target_ext - target = os.path.join(ext_dir, ext_basename) - hash = self._hash_file(s) - sources[i] = target - if hash and self._copy_from_cache(hash, target): - continue - build_ext.cython_sources(self, [s], extension) - self._put_to_cache(hash, target) - - sources = [x for x in sources if x.startswith("pandas") or "lib." in x] - - return sources - -if BUILD_CACHE_DIR: # use the cache - cmdclass['build_ext'] = CachingBuildExt - -try: - # recent - setuptools_kwargs['use_2to3'] = True if BUILD_CACHE_DIR is None else False -except: - pass - -try: - # pre eb2234231 , ~ 0.7.0, - setuptools_args['use_2to3'] = True if BUILD_CACHE_DIR is None else False -except: - pass - -""" -def main(): - opd = os.path.dirname - opj = os.path.join - s= None - with open(opj(opd(__file__),"..","setup.py")) as f: - s = f.read() - if s: - if "BUILD CACHE ACTIVATED (V2)" in s: - print( "setup.py already wired with V2 build_cache, skipping..") - else: - SEP="\nsetup(" - before,after = s.split(SEP) - with open(opj(opd(__file__),"..","setup.py"),"wb") as f: - f.write((before + shim + SEP + after).encode('ascii')) - print(""" - setup.py was rewritten to use a build cache. - Make sure you've put the following in your .bashrc: - - export BUILD_CACHE_DIR=<an existing directory for saving cached files> - echo $BUILD_CACHE_DIR > pandas_repo_rootdir/.build_cache_dir - - Once active, build results (compilation, cythonizations and 2to3 artifacts) - will be cached in "$BUILD_CACHE_DIR" and subsequent builds should be - sped up if no changes requiring recompilation were made. - - Go ahead and run: - - python setup.py clean - python setup.py develop - - """) - -if __name__ == '__main__': - import sys - sys.exit(main()) diff --git a/scripts/winbuild_py27.bat b/scripts/winbuild_py27.bat deleted file mode 100644 index bec67c7e527ed..0000000000000 --- a/scripts/winbuild_py27.bat +++ /dev/null @@ -1,2 +0,0 @@ -SET PATH=C:\MinGW\bin;C:\Python27;C:\Python27\Scripts;%PATH% -python setup.py build -c mingw32 bdist_wininst
closes #13704
https://api.github.com/repos/pandas-dev/pandas/pulls/18181
2017-11-08T21:12:40Z
2017-11-09T12:11:12Z
2017-11-09T12:11:12Z
2017-11-09T12:12:10Z
DOC: clarify default window in rolling method
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 0325e54d18e36..66ce4f93808ed 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -344,7 +344,9 @@ The following methods are available: :meth:`~Window.sum`, Sum of values :meth:`~Window.mean`, Mean of values -The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are: +The weights used in the window are specified by the ``win_type`` keyword. +The list of recognized types are the `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/pandas/core/window.py b/pandas/core/window.py index 5143dddc5e866..345f9b035a36b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -503,6 +503,9 @@ class Window(_Window): * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). + If ``win_type=None`` all points are evenly weighted. To learn more about + different window types see `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__. """ def validate(self):
- [x] closes #17893 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18177
2017-11-08T18:35:47Z
2017-11-30T13:56:00Z
2017-11-30T13:56:00Z
2017-11-30T13:56:15Z
offsets cleanup
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 87be9fa910101..b77598ab57fc0 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -19,6 +19,7 @@ from pandas._libs.tslib import pydt_to_i8 from frequencies cimport get_freq_code from conversion cimport tz_convert_single +from timedeltas import Timedelta, delta_to_nanoseconds # --------------------------------------------------------------------- # Constants @@ -375,3 +376,21 @@ class BaseOffset(_BaseOffset): # i.e. isinstance(other, (ABCDatetimeIndex, ABCSeries)) return other - self return -self + other + +# --------------------------------------------------------------------- +# Ticks + +class _Tick(object): + _inc = Timedelta(microseconds=1000) + _prefix = None + + @property + def delta(self): + return self.n * self._inc + + @property + def nanos(self): + return delta_to_nanoseconds(self.delta) + + def isAnchored(self): + return False diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5843aaa23be57..f5aee999d9a27 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -23,7 +23,7 @@ _determine_offset, apply_index_wraps, BeginMixin, EndMixin, - BaseOffset) + BaseOffset, _Tick) import functools import operator @@ -231,8 +231,8 @@ def apply_index(self, i): weeks = (self.kwds.get('weeks', 0)) * self.n if weeks: - i = (i.to_period('W') + weeks).to_timestamp() + \ - i.to_perioddelta('W') + i = ((i.to_period('W') + weeks).to_timestamp() + + i.to_perioddelta('W')) timedelta_kwds = dict((k, v) for k, v in self.kwds.items() if k in ['days', 'hours', 'minutes', @@ -418,8 +418,8 @@ def offset(self): return self._offset def _repr_attrs(self): - if self.offset: - attrs = ['offset={offset!r}'.format(offset=self.offset)] + if self._offset: + attrs = ['offset={offset!r}'.format(offset=self._offset)] else: attrs = None out = '' @@ -494,15 +494,15 @@ def get_str(td): off_str += str(td.microseconds) + 'us' return off_str - if isinstance(self.offset, timedelta): + if isinstance(self._offset, timedelta): zero = timedelta(0, 0, 0) - if self.offset >= zero: - off_str = '+' + get_str(self.offset) + if self._offset >= zero: + off_str = '+' + get_str(self._offset) else: - off_str = '-' + get_str(-self.offset) + off_str = '-' + get_str(-self._offset) return off_str else: - return '+' + repr(self.offset) + return '+' + repr(self._offset) @apply_wraps def apply(self, other): @@ -530,12 +530,12 @@ def apply(self, other): if result.weekday() < 5: n -= k - if self.offset: - result = result + self.offset + if self._offset: + result = result + self._offset return result elif isinstance(other, (timedelta, Tick)): - return BDay(self.n, offset=self.offset + other, + return BDay(self.n, offset=self._offset + other, normalize=self.normalize) else: raise ApplyTypeError('Only know how to combine business day with ' @@ -847,12 +847,12 @@ def apply(self, other): dt_date = np_incr_dt.astype(datetime) result = datetime.combine(dt_date, date_in.time()) - if self.offset: - result = result + self.offset + if self._offset: + result = result + self._offset return result elif isinstance(other, (timedelta, Tick)): - return BDay(self.n, offset=self.offset + other, + return BDay(self.n, offset=self._offset + other, normalize=self.normalize) else: raise ApplyTypeError('Only know how to combine trading day with ' @@ -1227,12 +1227,7 @@ def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False first_weekday, _ = tslib.monthrange(dt.year, dt.month) - if first_weekday == 5: - return dt.day == 3 - elif first_weekday == 6: - return dt.day == 2 - else: - return dt.day == 1 + return dt.day == _get_firstbday(first_weekday) class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): @@ -1965,8 +1960,8 @@ def _decrement(date): date.microsecond) def _rollf(date): - if date.month != self.month or\ - date.day < tslib.monthrange(date.year, date.month)[1]: + if (date.month != self.month or + date.day < tslib.monthrange(date.year, date.month)[1]): date = _increment(date) return date @@ -2133,9 +2128,9 @@ def _offset_lwom(self): return LastWeekOfMonth(n=1, weekday=self.weekday) def isAnchored(self): - return self.n == 1 \ - and self.startingMonth is not None \ - and self.weekday is not None + return (self.n == 1 and + self.startingMonth is not None and + self.weekday is not None) def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -2145,8 +2140,8 @@ def onOffset(self, dt): if self.variation == "nearest": # We have to check the year end of "this" cal year AND the previous - return year_end == dt or \ - self.get_year_end(dt - relativedelta(months=1)) == dt + return (year_end == dt or + self.get_year_end(dt - relativedelta(months=1)) == dt) else: return year_end == dt @@ -2224,8 +2219,8 @@ def get_year_end(self, dt): return self._get_year_end_last(dt) def get_target_month_end(self, dt): - target_month = datetime( - dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) + target_month = datetime(dt.year, self.startingMonth, 1, + tzinfo=dt.tzinfo) next_month_first_of = target_month + relativedelta(months=+1) return next_month_first_of + relativedelta(days=-1) @@ -2243,8 +2238,8 @@ def _get_year_end_nearest(self, dt): return backward def _get_year_end_last(self, dt): - current_year = datetime( - dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) + current_year = datetime(dt.year, self.startingMonth, 1, + tzinfo=dt.tzinfo) return current_year + self._offset_lwom @property @@ -2472,8 +2467,8 @@ class Easter(DateOffset): @apply_wraps def apply(self, other): currentEaster = easter(other.year) - currentEaster = datetime( - currentEaster.year, currentEaster.month, currentEaster.day) + currentEaster = datetime(currentEaster.year, + currentEaster.month, currentEaster.day) currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo) # NOTE: easter returns a datetime.date so we have to convert to type of @@ -2509,10 +2504,7 @@ def f(self, other): return f -class Tick(SingleConstructorOffset): - _inc = Timedelta(microseconds=1000) - _prefix = 'undefined' - +class Tick(_Tick, SingleConstructorOffset): __gt__ = _tick_comp(operator.gt) __ge__ = _tick_comp(operator.ge) __lt__ = _tick_comp(operator.lt) @@ -2563,14 +2555,6 @@ def __ne__(self, other): else: return DateOffset.__ne__(self, other) - @property - def delta(self): - return self.n * self._inc - - @property - def nanos(self): - return delta_to_nanoseconds(self.delta) - def apply(self, other): # Timestamp can handle tz and nano sec, thus no need to use apply_wraps if isinstance(other, Timestamp): @@ -2595,9 +2579,6 @@ def apply(self, other): raise ApplyTypeError('Unhandled type: {type_str}' .format(type_str=type(other).__name__)) - def isAnchored(self): - return False - def _delta_to_tick(delta): if delta.microseconds == 0:
This is mostly a small cleanup of offsets, want to keep this fairly trivial diff from obscuring more important upcoming changes. The two non-trivial things here are 1) implement `_Tick` class in offsets.pyx with the most basic of its methods and 2) optimization of `Tick.__hash__`. You'd be surprised how big a difference this particular optimization makes: Before: ``` In [2]: day = pd.offsets.Day(2) In [3]: %timeit hash(day) 100000 loops, best of 3: 11.7 µs per loop ``` After ``` In [3]: day = pd.offsets.Day(2) In [5]: %timeit hash(day) The slowest run took 8.61 times longer than the fastest. This could mean that an intermediate result is being cached. 100000 loops, best of 3: 1.86 µs per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18174
2017-11-08T17:01:38Z
2017-11-09T00:28:24Z
null
2017-11-12T23:04:26Z
COMPAT: install dateutil from master
diff --git a/ci/requirements-3.6_NUMPY_DEV.build.sh b/ci/requirements-3.6_NUMPY_DEV.build.sh index bc92d8fca6b17..fd79142c5cebb 100644 --- a/ci/requirements-3.6_NUMPY_DEV.build.sh +++ b/ci/requirements-3.6_NUMPY_DEV.build.sh @@ -12,10 +12,7 @@ PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf pip install --pre --upgrade --timeout=60 -f $PRE_WHEELS numpy scipy # install dateutil from master - -# TODO(jreback), temp disable dateutil master has changed -# pip install -U git+git://github.com/dateutil/dateutil.git -pip install python-dateutil +pip install -U git+git://github.com/dateutil/dateutil.git # cython via pip pip install cython
xref #18157
https://api.github.com/repos/pandas-dev/pandas/pulls/18172
2017-11-08T14:58:48Z
2017-11-10T21:41:17Z
2017-11-10T21:41:17Z
2017-11-10T21:42:16Z
ENH: allow using '+' sign in the argument to `to_offset()`
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 943b6bb84fb47..6bc4530f6d786 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -23,7 +23,7 @@ Other Enhancements ^^^^^^^^^^^^^^^^^^ - Better support for ``Dataframe.style.to_excel()`` output with the ``xlsxwriter`` engine. (:issue:`16149`) -- +- :func:`pd.tseries.frequencies.to_offset()` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`) - .. _whatsnew_0220.api_breaking: diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 9d810bfb411af..2a700d52eaaf3 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -15,7 +15,7 @@ from util cimport is_integer_object # hack to handle WOM-1MON opattern = re.compile( - r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)' + r'([+\-]?\d*|[+\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)' ) _INVALID_FREQ_ERROR = "Invalid frequency: {0}" diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py index 39a9a87141753..745f3ef2ec5cb 100644 --- a/pandas/tests/tseries/test_frequencies.py +++ b/pandas/tests/tseries/test_frequencies.py @@ -169,6 +169,19 @@ def test_to_offset_leading_zero(self): result = frequencies.to_offset(freqstr) assert (result.n == -194) + def test_to_offset_leading_plus(self): + freqstr = '+1d' + result = frequencies.to_offset(freqstr) + assert (result.n == 1) + + freqstr = '+2h30min' + result = frequencies.to_offset(freqstr) + assert (result.n == 150) + + for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']: + with tm.assert_raises_regex(ValueError, 'Invalid frequency:'): + frequencies.to_offset(bad_freq) + def test_to_offset_pd_timedelta(self): # Tests for #9064 td = Timedelta(days=1, seconds=1)
- [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18171
2017-11-08T13:54:28Z
2017-11-10T14:24:59Z
2017-11-10T14:24:59Z
2017-11-10T14:25:48Z
Add test for #15966
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 5cd5a3793ab46..d3f58434a91ae 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -677,3 +677,12 @@ def test_tz_convert_and_localize(self, fn): with assert_raises_regex(ValueError, 'not valid'): df = DataFrame(index=l0) df = getattr(df, fn)('US/Pacific', level=1) + + @pytest.mark.parametrize('timestamps', [ + [Timestamp('2012-01-01 13:00:00+00:00')] * 2, + [Timestamp('2012-01-01 13:00:00')] * 2]) + def test_tz_aware_scalar_comparison(self, timestamps): + # Test for issue #15966 + df = DataFrame({'test': timestamps}) + expected = DataFrame({'test': [False, False]}) + assert_frame_equal(df == -1, expected)
- [x] adds test for #15966, but it has been fixed already - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry -- its only an added test?
https://api.github.com/repos/pandas-dev/pandas/pulls/18170
2017-11-08T13:45:27Z
2017-11-08T16:45:59Z
2017-11-08T16:45:59Z
2017-11-09T10:28:00Z
MultiIndex Doc
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 8b53c56b01ab1..e591825cec748 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -174,14 +174,14 @@ on a deeper level. Defined Levels ~~~~~~~~~~~~~~ -The repr of a ``MultiIndex`` shows ALL the defined levels of an index, even +The repr of a ``MultiIndex`` shows all the defined levels of an index, even if the they are not actually used. When slicing an index, you may notice this. For example: .. ipython:: python - # original multi-index - df.columns +   # original MultiIndex +   df.columns # sliced df[['foo','qux']].columns @@ -264,7 +264,7 @@ Passing a list of labels or tuples works similar to reindexing: Using slicers ~~~~~~~~~~~~~ -You can slice a multi-index by providing multiple indexers. +You can slice a ``MultiIndex`` by providing multiple indexers. You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`, including slices, lists of labels, labels, and boolean indexers. @@ -278,7 +278,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. You should specify all axes in the ``.loc`` specifier, meaning the indexer for the **index** and for the **columns**. There are some ambiguous cases where the passed indexer could be mis-interpreted - as indexing *both* axes, rather than into say the MuliIndex for the rows. +   as indexing *both* axes, rather than into say the ``MultiIndex`` for the rows. You should do this: @@ -286,8 +286,8 @@ As usual, **both sides** of the slicers are included as this is label indexing. df.loc[(slice('A1','A3'),.....), :] - rather than this: - +   rather than this: +  .. code-block:: python df.loc[(slice('A1','A3'),.....)] @@ -494,7 +494,7 @@ are named. s.sort_index(level='L2') On higher dimensional objects, you can sort any of the other axes by level if -they have a MultiIndex: +they have a ``MultiIndex``: .. ipython:: python
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18169
2017-11-08T13:17:48Z
2017-11-08T20:03:16Z
2017-11-08T20:03:16Z
2017-12-17T17:52:59Z
Doc
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index db1780e88baef..8b53c56b01ab1 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -196,7 +196,7 @@ highly performant. If you want to see the actual used levels. # for a specific level df[['foo','qux']].columns.get_level_values(0) -To reconstruct the multiindex with only the used levels +To reconstruct the ``MultiIndex`` with only the used levels .. versionadded:: 0.20.0
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18168
2017-11-08T12:57:41Z
2017-11-08T13:00:42Z
2017-11-08T13:00:42Z
2017-11-09T12:26:35Z
Fix groupby().count() for datetime columns
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index d4cfb6d5b1a46..2d60bfea5d56c 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -60,6 +60,7 @@ Bug Fixes - Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) - Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) - Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) +- Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 967685c4e11bf..1acc8c3ed0bbb 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -4365,7 +4365,8 @@ def count(self): ids, _, ngroups = self.grouper.group_info mask = ids != -1 - val = ((mask & ~isna(blk.get_values())) for blk in data.blocks) + val = ((mask & ~isna(np.atleast_2d(blk.get_values()))) + for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1) diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 485241d593d4f..787d99086873e 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -2,9 +2,11 @@ from __future__ import print_function import numpy as np +import pytest -from pandas import (DataFrame, Series, MultiIndex) -from pandas.util.testing import assert_series_equal +from pandas import (DataFrame, Series, MultiIndex, Timestamp, Timedelta, + Period) +from pandas.util.testing import (assert_series_equal, assert_frame_equal) from pandas.compat import (range, product as cart_product) @@ -195,3 +197,18 @@ def test_ngroup_respects_groupby_order(self): g.ngroup()) assert_series_equal(Series(df['group_index'].values), g.cumcount()) + + @pytest.mark.parametrize('datetimelike', [ + [Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)], + [Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)], + [Timedelta(x, unit="h") for x in range(1, 4)], + [Period(freq="2W", year=2017, month=x) for x in range(1, 4)]]) + def test_count_with_datetimelike(self, datetimelike): + # test for #13393, where DataframeGroupBy.count() fails + # when counting a datetimelike column. + + df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike}) + res = df.groupby('x').count() + expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) + expected.index.name = "x" + assert_frame_equal(expected, res)
- [x] closes #13393 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18167
2017-11-08T10:49:52Z
2017-11-08T20:25:47Z
2017-11-08T20:25:47Z
2017-11-09T10:27:49Z
ENH: Return locale based month_name and weekday_name values (#12805, #12806)
diff --git a/doc/source/api.rst b/doc/source/api.rst index a5e26bc948a70..dba7f6526f22a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -551,7 +551,6 @@ These can be accessed like ``Series.dt.<property>``. Series.dt.weekofyear Series.dt.dayofweek Series.dt.weekday - Series.dt.weekday_name Series.dt.dayofyear Series.dt.quarter Series.dt.is_month_start @@ -581,6 +580,8 @@ These can be accessed like ``Series.dt.<property>``. Series.dt.round Series.dt.floor Series.dt.ceil + Series.dt.month_name + Series.dt.day_name **Timedelta Properties** @@ -1723,7 +1724,6 @@ Time/Date Components DatetimeIndex.week DatetimeIndex.dayofweek DatetimeIndex.weekday - DatetimeIndex.weekday_name DatetimeIndex.quarter DatetimeIndex.tz DatetimeIndex.freq @@ -1759,6 +1759,8 @@ Time-specific operations DatetimeIndex.round DatetimeIndex.floor DatetimeIndex.ceil + DatetimeIndex.month_name + DatetimeIndex.day_name Conversion ~~~~~~~~~~ @@ -1940,7 +1942,6 @@ Properties Timestamp.tzinfo Timestamp.value Timestamp.week - Timestamp.weekday_name Timestamp.weekofyear Timestamp.year @@ -1954,6 +1955,7 @@ Methods Timestamp.combine Timestamp.ctime Timestamp.date + Timestamp.day_name Timestamp.dst Timestamp.floor Timestamp.freq @@ -1963,6 +1965,7 @@ Methods Timestamp.isocalendar Timestamp.isoformat Timestamp.isoweekday + Timestamp.month_name Timestamp.normalize Timestamp.now Timestamp.replace diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 7a19f87051746..9fbec736052ab 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -338,7 +338,8 @@ Other Enhancements - For subclassed ``DataFrames``, :func:`DataFrame.apply` will now preserve the ``Series`` subclass (if defined) when passing the data to the applied function (:issue:`19822`) - :func:`DataFrame.from_dict` now accepts a ``columns`` argument that can be used to specify the column names when ``orient='index'`` is used (:issue:`18529`) - Added option ``display.html.use_mathjax`` so `MathJax <https://www.mathjax.org/>`_ can be disabled when rendering tables in ``Jupyter`` notebooks (:issue:`19856`, :issue:`19824`) - +- :meth:`Timestamp.month_name`, :meth:`DatetimeIndex.month_name`, and :meth:`Series.dt.month_name` are now available (:issue:`12805`) +- :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`) .. _whatsnew_0230.api_breaking: @@ -677,6 +678,7 @@ Deprecations - The ``broadcast`` parameter of ``.apply()`` is deprecated in favor of ``result_type='broadcast'`` (:issue:`18577`) - The ``reduce`` parameter of ``.apply()`` is deprecated in favor of ``result_type='reduce'`` (:issue:`18577`) - The ``order`` parameter of :func:`factorize` is deprecated and will be removed in a future release (:issue:`19727`) +- :attr:`Timestamp.weekday_name`, :attr:`DatetimeIndex.weekday_name`, and :attr:`Series.dt.weekday_name` are deprecated in favor of :meth:`Timestamp.day_name`, :meth:`DatetimeIndex.day_name`, and :meth:`Series.dt.day_name` (:issue:`12806`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 9bd315b43ea9e..0901d474d044c 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -12,6 +12,8 @@ cimport numpy as cnp from numpy cimport int64_t, int32_t cnp.import_array() +from locale import LC_TIME +from strptime import LocaleTime # ---------------------------------------------------------------------- # Constants @@ -35,11 +37,18 @@ cdef int32_t* _month_offset = [ # Canonical location for other modules to find name constants MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] +# The first blank line is consistent with calendar.month_name in the calendar +# standard library +MONTHS_FULL = ['', 'January', 'February', 'March', 'April', 'May', 'June', + 'July', 'August', 'September', 'October', 'November', + 'December'] MONTH_NUMBERS = {name: num for num, name in enumerate(MONTHS)} MONTH_ALIASES = {(num + 1): name for num, name in enumerate(MONTHS)} MONTH_TO_CAL_NUM = {name: num + 1 for num, name in enumerate(MONTHS)} DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] +DAYS_FULL = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', + 'Saturday', 'Sunday'] int_to_weekday = {num: name for num, name in enumerate(DAYS)} weekday_to_int = {int_to_weekday[key]: key for key in int_to_weekday} @@ -199,3 +208,23 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil: day_of_year = mo_off + day return day_of_year + + +cpdef get_locale_names(object name_type, object locale=None): + """Returns an array of localized day or month names + + Parameters + ---------- + name_type : string, attribute of LocaleTime() in which to return localized + names + locale : string + + Returns + ------- + list of locale names + + """ + from pandas.util.testing import set_locale + + with set_locale(locale, LC_TIME): + return getattr(LocaleTime(), name_type) diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 7a4b9775bd56e..ccf67e765e079 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -13,7 +13,7 @@ cimport numpy as cnp from numpy cimport ndarray, int64_t, int32_t, int8_t cnp.import_array() - +from ccalendar import get_locale_names, MONTHS_FULL, DAYS_FULL from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, get_week_of_year, get_day_of_year) from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct, @@ -85,26 +85,27 @@ def build_field_sarray(ndarray[int64_t] dtindex): @cython.wraparound(False) @cython.boundscheck(False) -def get_date_name_field(ndarray[int64_t] dtindex, object field): +def get_date_name_field(ndarray[int64_t] dtindex, object field, + object locale=None): """ Given a int64-based datetime index, return array of strings of date name based on requested field (e.g. weekday_name) """ cdef: Py_ssize_t i, count = 0 - ndarray[object] out + ndarray[object] out, names pandas_datetimestruct dts int dow - _dayname = np.array( - ['Monday', 'Tuesday', 'Wednesday', 'Thursday', - 'Friday', 'Saturday', 'Sunday'], - dtype=np.object_) - count = len(dtindex) out = np.empty(count, dtype=object) - if field == 'weekday_name': + if field == 'day_name' or field == 'weekday_name': + if locale is None: + names = np.array(DAYS_FULL, dtype=np.object_) + else: + names = np.array(get_locale_names('f_weekday', locale), + dtype=np.object_) for i in range(count): if dtindex[i] == NPY_NAT: out[i] = np.nan @@ -112,7 +113,21 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field): dt64_to_dtstruct(dtindex[i], &dts) dow = dayofweek(dts.year, dts.month, dts.day) - out[i] = _dayname[dow] + out[i] = names[dow].capitalize() + return out + elif field == 'month_name': + if locale is None: + names = np.array(MONTHS_FULL, dtype=np.object_) + else: + names = np.array(get_locale_names('f_month', locale), + dtype=np.object_) + for i in range(count): + if dtindex[i] == NPY_NAT: + out[i] = np.nan + continue + + dt64_to_dtstruct(dtindex[i], &dts) + out[i] = names[dts.month].capitalize() return out raise ValueError("Field %s not supported" % field) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 9f4ef4e515058..be76b55fa169b 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- # cython: profile=False -import warnings from cpython cimport ( PyFloat_Check, PyComplex_Check, @@ -39,24 +38,19 @@ _nat_scalar_rules[Py_GE] = False # ---------------------------------------------------------------------- -def _make_nan_func(func_name, cls): +def _make_nan_func(func_name, doc): def f(*args, **kwargs): return np.nan f.__name__ = func_name - f.__doc__ = getattr(cls, func_name).__doc__ + f.__doc__ = doc return f -def _make_nat_func(func_name, cls): +def _make_nat_func(func_name, doc): def f(*args, **kwargs): return NaT - f.__name__ = func_name - if isinstance(cls, str): - # passed the literal docstring directly - f.__doc__ = cls - else: - f.__doc__ = getattr(cls, func_name).__doc__ + f.__doc__ = doc return f @@ -318,11 +312,40 @@ class NaTType(_NaT): # These are the ones that can get their docstrings from datetime. # nan methods - weekday = _make_nan_func('weekday', datetime) - isoweekday = _make_nan_func('isoweekday', datetime) + weekday = _make_nan_func('weekday', datetime.weekday.__doc__) + isoweekday = _make_nan_func('isoweekday', datetime.isoweekday.__doc__) + month_name = _make_nan_func('month_name', # noqa:E128 + """ + Return the month name of the Timestamp with specified locale. + + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the month name + + Returns + ------- + month_name : string + + .. versionadded:: 0.23.0 + """) + day_name = _make_nan_func('day_name', # noqa:E128 + """ + Return the day name of the Timestamp with specified locale. + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the day name + + Returns + ------- + day_name : string + + .. versionadded:: 0.23.0 + """) # _nat_methods - date = _make_nat_func('date', datetime) + date = _make_nat_func('date', datetime.date.__doc__) utctimetuple = _make_error_func('utctimetuple', datetime) timetz = _make_error_func('timetz', datetime) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 421f781483290..5bb53cf20b478 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -24,7 +24,7 @@ cimport ccalendar from conversion import tz_localize_to_utc, date_normalize from conversion cimport (tz_convert_single, _TSObject, convert_to_tsobject, convert_datetime_to_tsobject) -from fields import get_date_field, get_start_end_field +from fields import get_start_end_field, get_date_name_field from nattype import NaT from nattype cimport NPY_NAT from np_datetime import OutOfBoundsDatetime @@ -352,6 +352,16 @@ cdef class _Timestamp(datetime): field, freqstr, month_kw) return out[0] + cpdef _get_date_name_field(self, object field, object locale): + cdef: + int64_t val + ndarray out + + val = self._maybe_convert_value_to_local() + out = get_date_name_field(np.array([val], dtype=np.int64), + field, locale=locale) + return out[0] + @property def _repr_base(self): return '{date} {time}'.format(date=self._date_repr, @@ -714,12 +724,50 @@ class Timestamp(_Timestamp): def dayofweek(self): return self.weekday() + def day_name(self, locale=None): + """ + Return the day name of the Timestamp with specified locale. + + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the day name + + Returns + ------- + day_name : string + + .. versionadded:: 0.23.0 + """ + return self._get_date_name_field('day_name', locale) + + def month_name(self, locale=None): + """ + Return the month name of the Timestamp with specified locale. + + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the month name + + Returns + ------- + month_name : string + + .. versionadded:: 0.23.0 + """ + return self._get_date_name_field('month_name', locale) + @property def weekday_name(self): - cdef dict wdays = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', - 3: 'Thursday', 4: 'Friday', 5: 'Saturday', - 6: 'Sunday'} - return wdays[self.weekday()] + """ + .. deprecated:: 0.23.0 + Use ``Timestamp.day_name()`` instead + """ + warnings.warn("`weekday_name` is deprecated and will be removed in a " + "future version. Use `day_name` instead", + DeprecationWarning) + return self.day_name() @property def dayofyear(self): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index c9b446b97e956..6b97ee90cd93c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -231,7 +231,6 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, week dayofweek weekday - weekday_name quarter tz freq @@ -260,6 +259,8 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, to_pydatetime to_series to_frame + month_name + day_name Notes ----- @@ -318,7 +319,7 @@ def _add_comparison_methods(cls): _datetimelike_methods = ['to_period', 'tz_localize', 'tz_convert', 'normalize', 'strftime', 'round', 'floor', - 'ceil'] + 'ceil', 'month_name', 'day_name'] _is_numeric_dtype = False _infer_as_myclass = True @@ -1713,7 +1714,7 @@ def freq(self, value): weekday_name = _field_accessor( 'weekday_name', 'weekday_name', - "The name of day in a week (ex: Friday)\n\n.. versionadded:: 0.18.1") + "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0") dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year") @@ -2097,6 +2098,58 @@ def to_julian_date(self): self.nanosecond / 3600.0 / 1e+9 ) / 24.0) + def month_name(self, locale=None): + """ + Return the month names of the DateTimeIndex with specified locale. + + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the month name + + Returns + ------- + month_names : Index + Index of month names + + .. versionadded:: 0.23.0 + """ + values = self.asi8 + if self.tz is not None: + if self.tz is not utc: + values = self._local_timestamps() + + result = fields.get_date_name_field(values, 'month_name', + locale=locale) + result = self._maybe_mask_results(result) + return Index(result, name=self.name) + + def day_name(self, locale=None): + """ + Return the day names of the DateTimeIndex with specified locale. + + Parameters + ---------- + locale : string, default None (English locale) + locale determining the language in which to return the day name + + Returns + ------- + month_names : Index + Index of day names + + .. versionadded:: 0.23.0 + """ + values = self.asi8 + if self.tz is not None: + if self.tz is not utc: + values = self._local_timestamps() + + result = fields.get_date_name_field(values, 'day_name', + locale=locale) + result = self._maybe_mask_results(result) + return Index(result, name=self.name) + DatetimeIndex._add_comparison_methods() DatetimeIndex._add_numeric_methods_disabled() diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 2013b5e6cd6dd..a65b80efc7911 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -1,3 +1,6 @@ +import locale +import calendar + import pytest import numpy as np @@ -87,7 +90,6 @@ def test_range_edges(self): class TestDatetime64(object): def test_datetimeindex_accessors(self): - dti_naive = DatetimeIndex(freq='D', start=datetime(1998, 1, 1), periods=365) # GH 13303 @@ -134,23 +136,6 @@ def test_datetimeindex_accessors(self): assert not dti.is_year_end[0] assert dti.is_year_end[364] - # GH 11128 - assert dti.weekday_name[4] == u'Monday' - assert dti.weekday_name[5] == u'Tuesday' - assert dti.weekday_name[6] == u'Wednesday' - assert dti.weekday_name[7] == u'Thursday' - assert dti.weekday_name[8] == u'Friday' - assert dti.weekday_name[9] == u'Saturday' - assert dti.weekday_name[10] == u'Sunday' - - assert Timestamp('2016-04-04').weekday_name == u'Monday' - assert Timestamp('2016-04-05').weekday_name == u'Tuesday' - assert Timestamp('2016-04-06').weekday_name == u'Wednesday' - assert Timestamp('2016-04-07').weekday_name == u'Thursday' - assert Timestamp('2016-04-08').weekday_name == u'Friday' - assert Timestamp('2016-04-09').weekday_name == u'Saturday' - assert Timestamp('2016-04-10').weekday_name == u'Sunday' - assert len(dti.year) == 365 assert len(dti.month) == 365 assert len(dti.day) == 365 @@ -256,6 +241,54 @@ def test_datetimeindex_accessors(self): assert dates.weekofyear.tolist() == expected assert [d.weekofyear for d in dates] == expected + # GH 12806 + @pytest.mark.parametrize('time_locale', [ + None] if tm.get_locales() is None else [None] + tm.get_locales()) + def test_datetime_name_accessors(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'] + expected_months = ['January', 'February', 'March', 'April', 'May', + 'June', 'July', 'August', 'September', + 'October', 'November', 'December'] + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + # GH 11128 + dti = DatetimeIndex(freq='D', start=datetime(1998, 1, 1), + periods=365) + english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'] + for day, name, eng_name in zip(range(4, 11), + expected_days, + english_days): + name = name.capitalize() + assert dti.weekday_name[day] == eng_name + assert dti.day_name(locale=time_locale)[day] == name + ts = Timestamp(datetime(2016, 4, day)) + assert ts.weekday_name == eng_name + assert ts.day_name(locale=time_locale) == name + dti = dti.append(DatetimeIndex([pd.NaT])) + assert np.isnan(dti.day_name(locale=time_locale)[-1]) + ts = Timestamp(pd.NaT) + assert np.isnan(ts.day_name(locale=time_locale)) + + # GH 12805 + dti = DatetimeIndex(freq='M', start='2012', end='2013') + result = dti.month_name(locale=time_locale) + expected = Index([month.capitalize() for month in expected_months]) + tm.assert_index_equal(result, expected) + for date, expected in zip(dti, expected_months): + result = date.month_name(locale=time_locale) + assert result == expected.capitalize() + dti = dti.append(DatetimeIndex([pd.NaT])) + assert np.isnan(dti.month_name(locale=time_locale)[-1]) + def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 504a76f259e55..0acf7acb19c0d 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -4,6 +4,7 @@ import pytest import dateutil import calendar +import locale import numpy as np from dateutil.tz import tzutc @@ -21,7 +22,7 @@ from pandas.errors import OutOfBoundsDatetime from pandas.compat import long, PY3 from pandas.compat.numpy import np_datetime64_compat -from pandas import Timestamp, Period, Timedelta +from pandas import Timestamp, Period, Timedelta, NaT class TestTimestampProperties(object): @@ -95,13 +96,33 @@ def check(value, equal): for end in ends: assert getattr(ts, end) - @pytest.mark.parametrize('data, expected', - [(Timestamp('2017-08-28 23:00:00'), 'Monday'), - (Timestamp('2017-08-28 23:00:00', tz='EST'), - 'Monday')]) - def test_weekday_name(self, data, expected): + # GH 12806 + @pytest.mark.parametrize('data', + [Timestamp('2017-08-28 23:00:00'), + Timestamp('2017-08-28 23:00:00', tz='EST')]) + @pytest.mark.parametrize('time_locale', [ + None] if tm.get_locales() is None else [None] + tm.get_locales()) + def test_names(self, data, time_locale): # GH 17354 - assert data.weekday_name == expected + # Test .weekday_name, .day_name(), .month_name + with tm.assert_produces_warning(DeprecationWarning, + check_stacklevel=False): + assert data.weekday_name == 'Monday' + if time_locale is None: + expected_day = 'Monday' + expected_month = 'August' + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_day = calendar.day_name[0].capitalize() + expected_month = calendar.month_name[8].capitalize() + + assert data.day_name(time_locale) == expected_day + assert data.month_name(time_locale) == expected_month + + # Test NaT + nan_ts = Timestamp(NaT) + assert np.isnan(nan_ts.day_name(time_locale)) + assert np.isnan(nan_ts.month_name(time_locale)) @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) def test_is_leap_year(self, tz): diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 93c8ebc5f05df..3abc0f724db25 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -1,6 +1,8 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 +import locale +import calendar import pytest from datetime import datetime, date @@ -32,7 +34,7 @@ def test_dt_namespace_accessor(self): ok_for_dt = DatetimeIndex._datetimelike_ops ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize', 'tz_convert', 'normalize', 'strftime', 'round', - 'floor', 'ceil', 'weekday_name'] + 'floor', 'ceil', 'day_name', 'month_name'] ok_for_td = TimedeltaIndex._datetimelike_ops ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds', 'round', 'floor', 'ceil'] @@ -274,6 +276,46 @@ def test_dt_accessor_no_new_attributes(self): "You cannot add any new attribute"): s.dt.xlabel = "a" + @pytest.mark.parametrize('time_locale', [ + None] if tm.get_locales() is None else [None] + tm.get_locales()) + def test_dt_accessor_datetime_name_accessors(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'] + expected_months = ['January', 'February', 'March', 'April', 'May', + 'June', 'July', 'August', 'September', + 'October', 'November', 'December'] + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + s = Series(DatetimeIndex(freq='D', start=datetime(1998, 1, 1), + periods=365)) + english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'] + for day, name, eng_name in zip(range(4, 11), + expected_days, + english_days): + name = name.capitalize() + assert s.dt.weekday_name[day] == eng_name + assert s.dt.day_name(locale=time_locale)[day] == name + s = s.append(Series([pd.NaT])) + assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1]) + + s = Series(DatetimeIndex(freq='M', start='2012', end='2013')) + result = s.dt.month_name(locale=time_locale) + expected = Series([month.capitalize() for month in expected_months]) + tm.assert_series_equal(result, expected) + for s_date, expected in zip(s, expected_months): + result = s_date.month_name(locale=time_locale) + assert result == expected.capitalize() + s = s.append(Series([pd.NaT])) + assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1]) + def test_strftime(self): # GH 10086 s = Series(date_range('20130101', periods=5))
closes #12805 closes #12806 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I am not sure what locales are available on the CI machines, but the test will run all available locales.
https://api.github.com/repos/pandas-dev/pandas/pulls/18164
2017-11-08T04:29:11Z
2018-03-04T20:15:38Z
2018-03-04T20:15:38Z
2018-03-05T11:27:29Z
#18058: improve DatetimeIndex.date performance
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 9614a63332609..2ca2416f58b57 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -89,6 +89,9 @@ def time_dti_factorize(self): def time_dti_tz_factorize(self): self.dti_tz.factorize() + def time_dti_time(self): + self.rng.time + def time_timestamp_tzinfo_cons(self): self.rng5[0] @@ -107,6 +110,11 @@ def time_infer_freq_daily(self): def time_infer_freq_business(self): infer_freq(self.b_freq) + def time_to_date(self): + self.rng.date + + def time_to_pydatetime(self): + self.rng.to_pydatetime() class TimeDatetimeConverter(object): goal_time = 0.2 diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 4f403ff8053a7..b845e84d433f7 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -91,6 +91,8 @@ Performance Improvements - Converting a ``Series`` of ``Timedelta`` objects to days, seconds, etc... sped up through vectorization of underlying methods (:issue:`18092`) - The overriden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`) - ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`) +- Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`) +- .. _whatsnew_0220.docs: diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 5e3eb1f00b18c..a119e22b8e3ee 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -23,7 +23,7 @@ cimport util from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, PyDateTime_IMPORT, - timedelta, datetime) + timedelta, datetime, date) # import datetime C API PyDateTime_IMPORT # this is our datetime.pxd @@ -80,10 +80,37 @@ cdef inline object create_datetime_from_ts( return datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz) +cdef inline object create_date_from_ts( + int64_t value, pandas_datetimestruct dts, + object tz, object freq): + """ convenience routine to construct a datetime.date from its parts """ + return date(dts.year, dts.month, dts.day) -def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False): - # convert an i8 repr to an ndarray of datetimes or Timestamp (if box == - # True) + +def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, + box="datetime"): + """ + Convert an i8 repr to an ndarray of datetimes, date or Timestamp + + Parameters + ---------- + arr : array of i8 + tz : str, default None + convert to this timezone + freq : str/Offset, default None + freq to convert + box : {'datetime', 'timestamp', 'date'}, default 'datetime' + If datetime, convert to datetime.datetime + If date, convert to datetime.date + If Timestamp, convert to pandas.Timestamp + + Returns + ------- + result : array of dtype specified by box + """ + + assert ((box == "datetime") or (box == "date") or (box == "timestamp")), \ + "box must be one of 'datetime', 'date' or 'timestamp'" cdef: Py_ssize_t i, n = len(arr) @@ -94,13 +121,17 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False): ndarray[object] result = np.empty(n, dtype=object) object (*func_create)(int64_t, pandas_datetimestruct, object, object) - if box and is_string_object(freq): - from pandas.tseries.frequencies import to_offset - freq = to_offset(freq) + if box == "date": + assert (tz is None), "tz should be None when converting to date" - if box: + func_create = create_date_from_ts + elif box == "timestamp": func_create = create_timestamp_from_ts - else: + + if is_string_object(freq): + from pandas.tseries.frequencies import to_offset + freq = to_offset(freq) + elif box == "datetime": func_create = create_datetime_from_ts if tz is not None: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index fe306b51de8d0..7f9245bb31530 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -405,7 +405,7 @@ def convert_to_pydatetime(x, axis): else: shape = x.shape x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(), - box=True) + box="timestamp") x = x.reshape(shape) elif x.dtype == _TD_DTYPE: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index a2ed2ff9bce5e..e08bf4a625bce 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1237,7 +1237,7 @@ def __iter__(self): end_i = min((i + 1) * chunksize, length) converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, - box=True) + box="timestamp") for v in converted: yield v @@ -1687,8 +1687,7 @@ def date(self): Returns numpy array of python datetime.date objects (namely, the date part of Timestamps without timezone information). """ - return self._maybe_mask_results(libalgos.arrmap_object( - self.asobject.values, lambda x: x.date())) + return libts.ints_to_pydatetime(self.normalize().asi8, box="date") def normalize(self): """
- [x] closes #18058 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18163
2017-11-08T04:06:11Z
2017-11-22T11:25:38Z
2017-11-22T11:25:37Z
2017-11-24T00:48:15Z
Modernize indexes.timedeltas, indexes.datetimeindex
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b3244b4a7c316..78869de318dce 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2,9 +2,11 @@ from __future__ import division import operator import warnings -from datetime import time, datetime -from datetime import timedelta +from datetime import time, datetime, timedelta + import numpy as np +from pytz import utc + from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( @@ -56,10 +58,6 @@ from pandas._libs.tslibs import timezones -def _utc(): - import pytz - return pytz.utc - # -------- some conversion wrapper functions @@ -67,7 +65,6 @@ def _field_accessor(name, field, docstring=None): def f(self): values = self.asi8 if self.tz is not None: - utc = _utc() if self.tz is not utc: values = self._local_timestamps() @@ -563,8 +560,6 @@ def _convert_for_op(self, value): raise ValueError('Passed item and index have different timezone') def _local_timestamps(self): - utc = _utc() - if self.is_monotonic: return libts.tz_convert(self.asi8, utc, self.tz) else: @@ -825,7 +820,6 @@ def _add_delta(self, delta): tz = 'UTC' if self.tz is not None else None result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') - utc = _utc() if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) return result @@ -879,7 +873,6 @@ def astype(self, dtype, copy=True): raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype) def _get_time_micros(self): - utc = _utc() values = self.asi8 if self.tz is not None and self.tz is not utc: values = self._local_timestamps() diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 729edc81bb642..0b4e0000826f1 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -36,6 +36,26 @@ join as libjoin, Timedelta, NaT, iNaT) +def _field_accessor(name, alias, docstring=None): + def f(self): + if self.hasnans: + result = np.empty(len(self), dtype='float64') + mask = self._isnan + imask = ~mask + result.flat[imask] = np.array([getattr(Timedelta(val), alias) + for val in self.asi8[imask]]) + result[mask] = np.nan + else: + result = np.array([getattr(Timedelta(val), alias) + for val in self.asi8], dtype='int64') + + return Index(result, name=self.name) + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + def _td_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert timedelta-like to timedelta64 @@ -381,46 +401,17 @@ def _format_native_types(self, na_rep=u('NaT'), nat_rep=na_rep, justify='all').get_result() - def _get_field(self, m): - - values = self.asi8 - hasnans = self.hasnans - if hasnans: - result = np.empty(len(self), dtype='float64') - mask = self._isnan - imask = ~mask - result.flat[imask] = np.array( - [getattr(Timedelta(val), m) for val in values[imask]]) - result[mask] = np.nan - else: - result = np.array([getattr(Timedelta(val), m) - for val in values], dtype='int64') - return Index(result, name=self.name) - - @property - def days(self): - """ Number of days for each element. """ - return self._get_field('days') - - @property - def seconds(self): - """ Number of seconds (>= 0 and less than 1 day) for each element. """ - return self._get_field('seconds') - - @property - def microseconds(self): - """ - Number of microseconds (>= 0 and less than 1 second) for each - element. """ - return self._get_field('microseconds') - - @property - def nanoseconds(self): - """ - Number of nanoseconds (>= 0 and less than 1 microsecond) for each - element. - """ - return self._get_field('nanoseconds') + days = _field_accessor("days", "days", + " Number of days for each element. ") + seconds = _field_accessor("seconds", "seconds", + " Number of seconds (>= 0 and less than 1 day) " + "for each element. ") + microseconds = _field_accessor("microseconds", "microseconds", + "\nNumber of microseconds (>= 0 and less " + "than 1 second) for each\nelement. ") + nanoseconds = _field_accessor("nanoseconds", "nanoseconds", + "\nNumber of nanoseconds (>= 0 and less " + "than 1 microsecond) for each\nelement.\n") @property def components(self):
Two things here. They should be separate PRs, but so it goes. indexes.datetimes and indexes.periods use a _field_accessor pattern to define several properties in their index classes. This updates TimedeltaIndex to use the same pattern, getting rid of the `_get_field` method that was used before. indexes.datetimes has a _utc() function that I think is designed to lazily import pytz. That ship has sailed elsewhere, so this removes that extra call.
https://api.github.com/repos/pandas-dev/pandas/pulls/18161
2017-11-08T01:32:55Z
2017-11-08T11:11:33Z
2017-11-08T11:11:33Z
2017-12-11T20:20:20Z
CI: don't show miniconda output on install / numpy 1.14 compat
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..4d8a371ba2994 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,9 +34,9 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -q -O miniconda.sh || exit 1 else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 diff --git a/ci/requirements-3.6_NUMPY_DEV.build.sh b/ci/requirements-3.6_NUMPY_DEV.build.sh index fd79142c5cebb..bc92d8fca6b17 100644 --- a/ci/requirements-3.6_NUMPY_DEV.build.sh +++ b/ci/requirements-3.6_NUMPY_DEV.build.sh @@ -12,7 +12,10 @@ PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf pip install --pre --upgrade --timeout=60 -f $PRE_WHEELS numpy scipy # install dateutil from master -pip install -U git+git://github.com/dateutil/dateutil.git + +# TODO(jreback), temp disable dateutil master has changed +# pip install -U git+git://github.com/dateutil/dateutil.git +pip install python-dateutil # cython via pip pip install cython diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 3853ac017044c..4a201d065c0b6 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -381,17 +381,20 @@ def raise_with_traceback(exc, traceback=Ellipsis): # http://stackoverflow.com/questions/4126348 # Thanks to @martineau at SO -from dateutil import parser as _date_parser import dateutil + +if PY2 and LooseVersion(dateutil.__version__) == '2.0': + # dateutil brokenness + raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' + 'install version 1.5 or 2.1+!') + +from dateutil import parser as _date_parser if LooseVersion(dateutil.__version__) < '2.0': + @functools.wraps(_date_parser.parse) def parse_date(timestr, *args, **kwargs): timestr = bytes(timestr) return _date_parser.parse(timestr, *args, **kwargs) -elif PY2 and LooseVersion(dateutil.__version__) == '2.0': - # dateutil brokenness - raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' - 'install version 1.5 or 2.1+!') else: parse_date = _date_parser.parse diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index abb528f0d2179..5adcd3b6855ce 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -10,6 +10,8 @@ from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, compat, concat, option_context) from pandas.compat import u +from pandas import _np_version_under1p14 + from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, @@ -531,7 +533,12 @@ def test_astype_str(self): assert_frame_equal(result, expected) result = DataFrame([1.12345678901234567890]).astype(tt) - expected = DataFrame(['1.12345678901']) + if _np_version_under1p14: + # < 1.14 truncates + expected = DataFrame(['1.12345678901']) + else: + # >= 1.14 preserves the full repr + expected = DataFrame(['1.1234567890123457']) assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series])
closes #18123
https://api.github.com/repos/pandas-dev/pandas/pulls/18157
2017-11-07T21:13:26Z
2017-11-08T20:46:55Z
2017-11-08T20:46:55Z
2017-12-11T20:13:55Z
restrict columns to read for pandas.read_parquet
diff --git a/doc/source/io.rst b/doc/source/io.rst index 8656e617b8173..5d6b00a4db72e 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4538,6 +4538,16 @@ Read from a parquet file. result.dtypes +Read only certain columns of a parquet file. + +.. ipython:: python + + result = pd.read_parquet('example_pa.parquet', engine='pyarrow', columns=['a', 'b']) + result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) + + result.dtypes + + .. ipython:: python :suppress: diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 6044f25ca5147..be40894aea656 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -81,6 +81,7 @@ I/O - Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) - Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) Plotting ^^^^^^^^ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4b507b7f5df6f..ef95e32cc241e 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -76,9 +76,9 @@ def write(self, df, path, compression='snappy', table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs) - def read(self, path): + def read(self, path, columns=None): path, _, _ = get_filepath_or_buffer(path) - return self.api.parquet.read_table(path).to_pandas() + return self.api.parquet.read_table(path, columns=columns).to_pandas() class FastParquetImpl(object): @@ -115,9 +115,9 @@ def write(self, df, path, compression='snappy', **kwargs): self.api.write(path, df, compression=compression, **kwargs) - def read(self, path): + def read(self, path, columns=None): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas() + return self.api.ParquetFile(path).to_pandas(columns=columns) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -178,7 +178,7 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): return impl.write(df, path, compression=compression) -def read_parquet(path, engine='auto', **kwargs): +def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. @@ -188,6 +188,10 @@ def read_parquet(path, engine='auto', **kwargs): ---------- path : string File path + columns: list, default=None + If not None, only these columns will be read from the file. + + .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first @@ -201,4 +205,4 @@ def read_parquet(path, engine='auto', **kwargs): """ impl = get_engine(engine) - return impl.read(path) + return impl.read(path, columns=columns) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ecd4e8f719014..9a4edf38e2ef4 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -192,7 +192,7 @@ def check_round_trip(self, df, engine, expected=None, **kwargs): with tm.ensure_clean() as path: df.to_parquet(path, engine, **kwargs) - result = read_parquet(path, engine) + result = read_parquet(path, engine, **kwargs) if expected is None: expected = df @@ -200,7 +200,7 @@ def check_round_trip(self, df, engine, expected=None, **kwargs): # repeat to_parquet(df, path, engine, **kwargs) - result = pd.read_parquet(path, engine) + result = pd.read_parquet(path, engine, **kwargs) if expected is None: expected = df @@ -282,6 +282,15 @@ def test_compression(self, engine, compression): df = pd.DataFrame({'A': [1, 2, 3]}) self.check_round_trip(df, engine, compression=compression) + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({'string': list('abc'), + 'int': list(range(1, 4))}) + + expected = pd.DataFrame({'string': list('abc')}) + self.check_round_trip(df, engine, expected=expected, + compression=None, columns=["string"]) + class TestParquetPyArrow(Base):
- [ x ] closes #18154 - [ x ] tests added / passed - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18155
2017-11-07T20:09:57Z
2017-11-08T20:11:34Z
2017-11-08T20:11:34Z
2017-11-08T20:11:35Z
lint import order
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..8398cd17adb4f 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -108,7 +108,7 @@ time pip install pytest-xdist moto if [ "$LINT" ]; then conda install flake8 - pip install cpplint + pip install cpplint flake8-import-order fi if [ "$COVERAGE" ]; then
Expecting lots and lots of flake8 complaints...
https://api.github.com/repos/pandas-dev/pandas/pulls/18151
2017-11-07T17:38:53Z
2017-11-09T18:13:35Z
null
2017-12-08T19:40:50Z
Fix 18121: Add .pxd linting to lint.sh
diff --git a/ci/lint.sh b/ci/lint.sh index 43d6ea0c118b0..c26b5f00d5a48 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -9,7 +9,7 @@ RET=0 if [ "$LINT" ]; then # pandas/_libs/src is C code, so no need to search there. - echo "Linting *.py" + echo "Linting *.py" flake8 pandas --filename=*.py --exclude pandas/_libs/src if [ $? -ne "0" ]; then RET=1 @@ -38,10 +38,20 @@ if [ "$LINT" ]; then if [ $? -ne "0" ]; then RET=1 fi - done echo "Linting *.pxi.in DONE" + echo "Linting *.pxd" + for path in '_libs' + do + echo "linting -> pandas/$path" + flake8 pandas/$path --filename=*.pxd --select=E501,E302,E203,E111,E114,E221,E303,E231,E126,F403 + if [ $? -ne "0" ]; then + RET=1 + fi + done + echo "Linting *.pxd DONE" + # readability/casting: Warnings about C casting instead of C++ casting # runtime/int: Warnings about using C number types instead of C++ ones # build/include_subdir: Warnings about prefacing included header files with directory diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd index a5ba610dc89dc..3fc3625a06634 100644 --- a/pandas/_libs/src/datetime.pxd +++ b/pandas/_libs/src/datetime.pxd @@ -44,8 +44,9 @@ cdef extern from "datetime/np_datetime.h": npy_int64 year npy_int32 month, day, hour, min, sec, us, ps, as - npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, - pandas_datetimestruct *d) nogil + npy_datetime pandas_datetimestruct_to_datetime( + PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *d) nogil + void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil @@ -58,12 +59,12 @@ cdef extern from "datetime/np_datetime.h": cdef extern from "datetime/np_datetime_strings.h": int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit, - NPY_CASTING casting, pandas_datetimestruct *out, + NPY_CASTING casting, + pandas_datetimestruct *out, int *out_local, int *out_tzoffset, PANDAS_DATETIMEUNIT *out_bestunit, npy_bool *out_special) - cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: cdef int result @@ -89,5 +90,6 @@ cdef inline int _cstring_to_dts(char *val, int length, result = parse_iso_8601_datetime(val, length, PANDAS_FR_ns, NPY_UNSAFE_CASTING, - dts, out_local, out_tzoffset, &out_bestunit, &special) + dts, out_local, out_tzoffset, + &out_bestunit, &special) return result diff --git a/pandas/_libs/src/numpy.pxd b/pandas/_libs/src/numpy.pxd index 9ab3b9b1b81ae..6fa2bc6af9d1f 100644 --- a/pandas/_libs/src/numpy.pxd +++ b/pandas/_libs/src/numpy.pxd @@ -152,7 +152,8 @@ cdef extern from "numpy/arrayobject.h": npy_intp NPY_MAX_ELSIZE - ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + ctypedef void (*PyArray_VectorUnaryFunc)( + void *, void *, npy_intp, void *, void *) ctypedef class numpy.dtype [object PyArray_Descr]: # Use PyDataType_* macros when possible, however there are no macros @@ -225,7 +226,9 @@ cdef extern from "numpy/arrayobject.h": if copy_shape: # Allocate new buffer for strides and shape info. # This is allocated as one block, strides first. - info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) + info.strides = <Py_ssize_t*>stdlib.malloc( + sizeof(Py_ssize_t) * <size_t>ndim * 2) + info.shape = info.strides + ndim for i in range(ndim): info.strides[i] = PyArray_STRIDES(self)[i] @@ -275,7 +278,8 @@ cdef extern from "numpy/arrayobject.h": elif t == NPY_CLONGDOUBLE: f = "Zg" elif t == NPY_OBJECT: f = "O" else: - raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + raise ValueError( + u"unknown dtype code in numpy.pxd (%d)" % t) info.format = f return else: @@ -294,7 +298,6 @@ cdef extern from "numpy/arrayobject.h": stdlib.free(info.strides) # info.shape was stored after info.strides in the same block - ctypedef signed char npy_bool ctypedef signed char npy_byte @@ -462,7 +465,6 @@ cdef extern from "numpy/arrayobject.h": bint PyArray_ISBEHAVED(ndarray) bint PyArray_ISBEHAVED_RO(ndarray) - bint PyDataType_ISNOTSWAPPED(dtype) bint PyDataType_ISBYTESWAPPED(dtype) @@ -475,7 +477,6 @@ cdef extern from "numpy/arrayobject.h": # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) # bint PyArray_HasArrayInterface(op, out) - bint PyArray_IsZeroDim(object) # Cannot be supported due to ## ## in macro: # bint PyArray_IsScalar(object, verbatim work) @@ -502,24 +503,28 @@ cdef extern from "numpy/arrayobject.h": unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) bint PyArray_EquivByteorders(int b1, int b2) object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, + int typenum, void* data) #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) object PyArray_ToScalar(void* data, ndarray arr) void* PyArray_GETPTR1(ndarray m, npy_intp i) void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) - void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) + void* PyArray_GETPTR4(ndarray m, npy_intp i, + npy_intp j, npy_intp k, npy_intp l) void PyArray_XDECREF_ERR(ndarray) # Cannot be supported due to out arg # void PyArray_DESCR_REPLACE(descr) - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_FromObject(object op, int type, + int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, + int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, + int min_depth, int max_depth) object PyArray_Cast(ndarray mp, int type_num) object PyArray_Take(ndarray ap, object items, int axis) @@ -598,8 +603,8 @@ cdef extern from "numpy/arrayobject.h": object PyArray_Dumps (object, int) int PyArray_ValidType (int) void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) - #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, + void *, int, int, object) #dtype PyArray_DescrNew (dtype) dtype PyArray_DescrNewFromType (int) double PyArray_GetPriority (object, double) @@ -611,7 +616,8 @@ cdef extern from "numpy/arrayobject.h": int PyArray_Broadcast (broadcast) void PyArray_FillObjectArray (ndarray, object) int PyArray_FillWithScalar (ndarray, object) - npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + npy_bool PyArray_CheckStrides ( + int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) dtype PyArray_DescrNewByteorder (dtype, char) object PyArray_IterAllButAxis (object, int *) #object PyArray_CheckFromAny (object, dtype, int, int, int, object) @@ -782,9 +788,11 @@ cdef inline object PyArray_MultiIterNew4(a, b, c, d): return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) + return PyArray_MultiIterNew(5, <void*>a, <void*>b, + <void*>c, <void*> d, <void*> e) -cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: +cdef inline char* _util_dtypestring(dtype descr, char* f, + char* end, int* offset) except NULL: # Recursive utility function used in __getbuffer__ to get format # string. The new location in the format string is returned. @@ -800,7 +808,8 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset child, new_offset = fields if (end - f) - (new_offset - offset[0]) < 15: - raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + raise RuntimeError( + u"Format string allocated too short, see comment in numpy.pxd") if ((child.byteorder == '>' and little_endian) or (child.byteorder == '<' and not little_endian)): @@ -860,7 +869,8 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset cdef extern from "numpy/ufuncobject.h": - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, + npy_intp *, void *) ctypedef extern class numpy.ufunc [object PyUFuncObject]: cdef: @@ -968,14 +978,14 @@ cdef extern from "numpy/ufuncobject.h": cdef inline void set_array_base(ndarray arr, object base): - cdef PyObject* baseptr - if base is None: - baseptr = NULL - else: - Py_INCREF(base) # important to do this before decref below! - baseptr = <PyObject*>base - Py_XDECREF(arr.base) - arr.base = baseptr + cdef PyObject* baseptr + if base is None: + baseptr = NULL + else: + Py_INCREF(base) # important to do this before decref below! + baseptr = <PyObject*>base + Py_XDECREF(arr.base) + arr.base = baseptr cdef inline object get_array_base(ndarray arr): if arr.base is NULL: diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index f5fc684df24ce..84d6dddf338a5 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -115,7 +115,8 @@ cdef inline bint _checknull_old(object val): cdef double INF = <double> np.inf cdef double NEGINF = -INF try: - return val is None or (cpython.PyFloat_Check(val) and (val != val or val == INF or val == NEGINF)) + return val is None or (cpython.PyFloat_Check(val) and + (val != val or val == INF or val == NEGINF)) except ValueError: return False
- [ ] closes #18121 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18150
2017-11-07T16:20:45Z
2017-11-09T12:47:31Z
2017-11-09T12:47:30Z
2017-11-09T12:47:35Z
COMPAT: 32-bit indexer
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 1d44a4bd95c4b..d20ed66c06ce9 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -501,7 +501,7 @@ def test_get_indexer_non_unique(self): result = idx1.get_indexer_non_unique(idx2) expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp) - expected_missing = np.array([2, 3], dtype=np.intp) + expected_missing = np.array([2, 3], dtype=np.int64) tm.assert_numpy_array_equal(result[0], expected_indexer) tm.assert_numpy_array_equal(result[1], expected_missing)
xref #17755
https://api.github.com/repos/pandas-dev/pandas/pulls/18149
2017-11-07T13:10:38Z
2017-11-07T15:08:31Z
2017-11-07T15:08:31Z
2017-12-12T02:38:30Z
melt moved into its own module
diff --git a/pandas/core/api.py b/pandas/core/api.py index 2f818a400162b..1f46aaa40e9eb 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -24,8 +24,8 @@ from pandas.core.panel import Panel, WidePanel from pandas.core.panel4d import Panel4D from pandas.core.reshape.reshape import ( - pivot_simple as pivot, get_dummies, - lreshape, wide_to_long) + pivot_simple as pivot, get_dummies) +from pandas.core.reshape.melt import lreshape, wide_to_long from pandas.core.indexing import IndexSlice from pandas.core.tools.numeric import to_numeric diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 70f1ff0a5380d..ded3c51edbece 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4637,7 +4637,7 @@ def unstack(self, level=-1, fill_value=None): other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): - from pandas.core.reshape.reshape import melt + from pandas.core.reshape.melt import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py index c75e0341918bb..99286d807a205 100644 --- a/pandas/core/reshape/api.py +++ b/pandas/core/reshape/api.py @@ -1,7 +1,7 @@ # flake8: noqa from pandas.core.reshape.concat import concat -from pandas.core.reshape.reshape import melt +from pandas.core.reshape.melt import melt from pandas.core.reshape.merge import ( merge, ordered_merge, merge_ordered, merge_asof) from pandas.core.reshape.pivot import pivot_table, crosstab diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py new file mode 100644 index 0000000000000..846d04221fe7f --- /dev/null +++ b/pandas/core/reshape/melt.py @@ -0,0 +1,386 @@ +# pylint: disable=E1101,E1103 +# pylint: disable=W0703,W0622,W0613,W0201 +import numpy as np + +from pandas.core.dtypes.common import is_list_like +from pandas import compat +from pandas.core.categorical import Categorical + +from pandas.core.frame import DataFrame +from pandas.core.index import MultiIndex + +from pandas.core.frame import _shared_docs +from pandas.util._decorators import Appender + +import re +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.missing import notna + + +@Appender(_shared_docs['melt'] % + dict(caller='pd.melt(df, ', + versionadded="", + other='DataFrame.melt')) +def melt(frame, id_vars=None, value_vars=None, var_name=None, + value_name='value', col_level=None): + # TODO: what about the existing index? + if id_vars is not None: + if not is_list_like(id_vars): + id_vars = [id_vars] + elif (isinstance(frame.columns, MultiIndex) and + not isinstance(id_vars, list)): + raise ValueError('id_vars must be a list of tuples when columns' + ' are a MultiIndex') + else: + id_vars = list(id_vars) + else: + id_vars = [] + + if value_vars is not None: + if not is_list_like(value_vars): + value_vars = [value_vars] + elif (isinstance(frame.columns, MultiIndex) and + not isinstance(value_vars, list)): + raise ValueError('value_vars must be a list of tuples when' + ' columns are a MultiIndex') + else: + value_vars = list(value_vars) + frame = frame.loc[:, id_vars + value_vars] + else: + frame = frame.copy() + + if col_level is not None: # allow list or other? + # frame is a copy + frame.columns = frame.columns.get_level_values(col_level) + + if var_name is None: + if isinstance(frame.columns, MultiIndex): + if len(frame.columns.names) == len(set(frame.columns.names)): + var_name = frame.columns.names + else: + var_name = ['variable_{i}'.format(i=i) + for i in range(len(frame.columns.names))] + else: + var_name = [frame.columns.name if frame.columns.name is not None + else 'variable'] + if isinstance(var_name, compat.string_types): + var_name = [var_name] + + N, K = frame.shape + K -= len(id_vars) + + mdata = {} + for col in id_vars: + mdata[col] = np.tile(frame.pop(col).values, K) + + mcolumns = id_vars + var_name + [value_name] + + mdata[value_name] = frame.values.ravel('F') + for i, col in enumerate(var_name): + # asanyarray will keep the columns as an Index + mdata[col] = np.asanyarray(frame.columns + ._get_level_values(i)).repeat(N) + + return DataFrame(mdata, columns=mcolumns) + + +def lreshape(data, groups, dropna=True, label=None): + """ + Reshape long-format data to wide. Generalized inverse of DataFrame.pivot + + Parameters + ---------- + data : DataFrame + groups : dict + {new_name : list_of_columns} + dropna : boolean, default True + + Examples + -------- + >>> import pandas as pd + >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], + ... 'team': ['Red Sox', 'Yankees'], + ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) + >>> data + hr1 hr2 team year1 year2 + 0 514 545 Red Sox 2007 2008 + 1 573 526 Yankees 2007 2008 + + >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) + team year hr + 0 Red Sox 2007 514 + 1 Yankees 2007 573 + 2 Red Sox 2008 545 + 3 Yankees 2008 526 + + Returns + ------- + reshaped : DataFrame + """ + if isinstance(groups, dict): + keys = list(groups.keys()) + values = list(groups.values()) + else: + keys, values = zip(*groups) + + all_cols = list(set.union(*[set(x) for x in values])) + id_cols = list(data.columns.difference(all_cols)) + + K = len(values[0]) + + for seq in values: + if len(seq) != K: + raise ValueError('All column lists must be same length') + + mdata = {} + pivot_cols = [] + + for target, names in zip(keys, values): + to_concat = [data[col].values for col in names] + mdata[target] = _concat._concat_compat(to_concat) + pivot_cols.append(target) + + for col in id_cols: + mdata[col] = np.tile(data[col].values, K) + + if dropna: + mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) + for c in pivot_cols: + mask &= notna(mdata[c]) + if not mask.all(): + mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata)) + + return DataFrame(mdata, columns=id_cols + pivot_cols) + + +def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): + r""" + Wide panel to long format. Less flexible but more user-friendly than melt. + + With stubnames ['A', 'B'], this function expects to find one or more + group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,... + You specify what you want to call this suffix in the resulting long format + with `j` (for example `j='year'`) + + Each row of these wide variables are assumed to be uniquely identified by + `i` (can be a single column name or a list of column names) + + All remaining variables in the data frame are left intact. + + Parameters + ---------- + df : DataFrame + The wide-format DataFrame + stubnames : str or list-like + The stub name(s). The wide format variables are assumed to + start with the stub names. + i : str or list-like + Column(s) to use as id variable(s) + j : str + The name of the subobservation variable. What you wish to name your + suffix in the long format. + sep : str, default "" + A character indicating the separation of the variable names + in the wide format, to be stripped from the names in the long format. + For example, if your column names are A-suffix1, A-suffix2, you + can strip the hypen by specifying `sep='-'` + + .. versionadded:: 0.20.0 + + suffix : str, default '\\d+' + A regular expression capturing the wanted suffixes. '\\d+' captures + numeric suffixes. Suffixes with no numbers could be specified with the + negated character class '\\D+'. You can also further disambiguate + suffixes, for example, if your wide variables are of the form + Aone, Btwo,.., and you have an unrelated column Arating, you can + ignore the last one by specifying `suffix='(!?one|two)'` + + .. versionadded:: 0.20.0 + + Returns + ------- + DataFrame + A DataFrame that contains each stub name as a variable, with new index + (i, j) + + Examples + -------- + >>> import pandas as pd + >>> import numpy as np + >>> np.random.seed(123) + >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, + ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, + ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, + ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, + ... "X" : dict(zip(range(3), np.random.randn(3))) + ... }) + >>> df["id"] = df.index + >>> df + A1970 A1980 B1970 B1980 X id + 0 a d 2.5 3.2 -1.085631 0 + 1 b e 1.2 1.3 0.997345 1 + 2 c f 0.7 0.1 0.282978 2 + >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") + ... # doctest: +NORMALIZE_WHITESPACE + X A B + id year + 0 1970 -1.085631 a 2.5 + 1 1970 0.997345 b 1.2 + 2 1970 0.282978 c 0.7 + 0 1980 -1.085631 d 3.2 + 1 1980 0.997345 e 1.3 + 2 1980 0.282978 f 0.1 + + With multuple id columns + + >>> df = pd.DataFrame({ + ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], + ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], + ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] + ... }) + >>> df + birth famid ht1 ht2 + 0 1 1 2.8 3.4 + 1 2 1 2.9 3.8 + 2 3 1 2.2 2.9 + 3 1 2 2.0 3.2 + 4 2 2 1.8 2.8 + 5 3 2 1.9 2.4 + 6 1 3 2.2 3.3 + 7 2 3 2.3 3.4 + 8 3 3 2.1 2.9 + >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') + >>> l + ... # doctest: +NORMALIZE_WHITESPACE + ht + famid birth age + 1 1 1 2.8 + 2 3.4 + 2 1 2.9 + 2 3.8 + 3 1 2.2 + 2 2.9 + 2 1 1 2.0 + 2 3.2 + 2 1 1.8 + 2 2.8 + 3 1 1.9 + 2 2.4 + 3 1 1 2.2 + 2 3.3 + 2 1 2.3 + 2 3.4 + 3 1 2.1 + 2 2.9 + + Going from long back to wide just takes some creative use of `unstack` + + >>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack() + >>> w.columns = pd.Index(w.columns).str.join('') + >>> w.reset_index() + famid birth ht1 ht2 + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + + Less wieldy column names are also handled + + >>> np.random.seed(0) + >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), + ... 'A(quarterly)-2011': np.random.rand(3), + ... 'B(quarterly)-2010': np.random.rand(3), + ... 'B(quarterly)-2011': np.random.rand(3), + ... 'X' : np.random.randint(3, size=3)}) + >>> df['id'] = df.index + >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... + 0 0.548814 0.544883 0.437587 ... + 1 0.715189 0.423655 0.891773 ... + 2 0.602763 0.645894 0.963663 ... + X id + 0 0 0 + 1 1 1 + 2 1 2 + + >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', + ... j='year', sep='-') + ... # doctest: +NORMALIZE_WHITESPACE + X A(quarterly) B(quarterly) + id year + 0 2010 0 0.548814 0.437587 + 1 2010 1 0.715189 0.891773 + 2 2010 1 0.602763 0.963663 + 0 2011 0 0.544883 0.383442 + 1 2011 1 0.423655 0.791725 + 2 2011 1 0.645894 0.528895 + + If we have many columns, we could also use a regex to find our + stubnames and pass that list on to wide_to_long + + >>> stubnames = sorted( + ... set([match[0] for match in df.columns.str.findall( + ... r'[A-B]\(.*\)').values if match != [] ]) + ... ) + >>> list(stubnames) + ['A(quarterly)', 'B(quarterly)'] + + Notes + ----- + All extra variables are left untouched. This simply uses + `pandas.melt` under the hood, but is hard-coded to "do the right thing" + in a typicaly case. + """ + def get_var_names(df, stub, sep, suffix): + regex = "^{stub}{sep}{suffix}".format( + stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) + return df.filter(regex=regex).columns.tolist() + + def melt_stub(df, stub, i, j, value_vars, sep): + newdf = melt(df, id_vars=i, value_vars=value_vars, + value_name=stub.rstrip(sep), var_name=j) + newdf[j] = Categorical(newdf[j]) + newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") + + return newdf.set_index(i + [j]) + + if any(map(lambda s: s in df.columns.tolist(), stubnames)): + raise ValueError("stubname can't be identical to a column name") + + if not is_list_like(stubnames): + stubnames = [stubnames] + else: + stubnames = list(stubnames) + + if not is_list_like(i): + i = [i] + else: + i = list(i) + + if df[i].duplicated().any(): + raise ValueError("the id variables need to uniquely identify each row") + + value_vars = list(map(lambda stub: + get_var_names(df, stub, sep, suffix), stubnames)) + + value_vars_flattened = [e for sublist in value_vars for e in sublist] + id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) + + melted = [] + for s, v in zip(stubnames, value_vars): + melted.append(melt_stub(df, s, i, j, v, sep)) + melted = melted[0].join(melted[1:], how='outer') + + if len(i) == 1: + new = df[id_vars].set_index(i).join(melted) + return new + + new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) + + return new diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index b8885820f4a49..96738afbca9e3 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -4,7 +4,6 @@ from pandas import compat from functools import partial import itertools -import re import numpy as np @@ -14,7 +13,6 @@ needs_i8_conversion, is_sparse) from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.missing import notna -import pandas.core.dtypes.concat as _concat from pandas.core.series import Series from pandas.core.frame import DataFrame @@ -30,8 +28,6 @@ import pandas.core.algorithms as algos from pandas._libs import algos as _algos, reshape as _reshape -from pandas.core.frame import _shared_docs -from pandas.util._decorators import Appender from pandas.core.index import Index, MultiIndex, _get_na_value @@ -700,375 +696,6 @@ def _convert_level_number(level_num, columns): return result -@Appender(_shared_docs['melt'] % - dict(caller='pd.melt(df, ', - versionadded="", - other='DataFrame.melt')) -def melt(frame, id_vars=None, value_vars=None, var_name=None, - value_name='value', col_level=None): - # TODO: what about the existing index? - if id_vars is not None: - if not is_list_like(id_vars): - id_vars = [id_vars] - elif (isinstance(frame.columns, MultiIndex) and - not isinstance(id_vars, list)): - raise ValueError('id_vars must be a list of tuples when columns' - ' are a MultiIndex') - else: - id_vars = list(id_vars) - else: - id_vars = [] - - if value_vars is not None: - if not is_list_like(value_vars): - value_vars = [value_vars] - elif (isinstance(frame.columns, MultiIndex) and - not isinstance(value_vars, list)): - raise ValueError('value_vars must be a list of tuples when' - ' columns are a MultiIndex') - else: - value_vars = list(value_vars) - frame = frame.loc[:, id_vars + value_vars] - else: - frame = frame.copy() - - if col_level is not None: # allow list or other? - # frame is a copy - frame.columns = frame.columns.get_level_values(col_level) - - if var_name is None: - if isinstance(frame.columns, MultiIndex): - if len(frame.columns.names) == len(set(frame.columns.names)): - var_name = frame.columns.names - else: - var_name = ['variable_{i}'.format(i=i) - for i in range(len(frame.columns.names))] - else: - var_name = [frame.columns.name if frame.columns.name is not None - else 'variable'] - if isinstance(var_name, compat.string_types): - var_name = [var_name] - - N, K = frame.shape - K -= len(id_vars) - - mdata = {} - for col in id_vars: - mdata[col] = np.tile(frame.pop(col).values, K) - - mcolumns = id_vars + var_name + [value_name] - - mdata[value_name] = frame.values.ravel('F') - for i, col in enumerate(var_name): - # asanyarray will keep the columns as an Index - mdata[col] = np.asanyarray(frame.columns - ._get_level_values(i)).repeat(N) - - return DataFrame(mdata, columns=mcolumns) - - -def lreshape(data, groups, dropna=True, label=None): - """ - Reshape long-format data to wide. Generalized inverse of DataFrame.pivot - - Parameters - ---------- - data : DataFrame - groups : dict - {new_name : list_of_columns} - dropna : boolean, default True - - Examples - -------- - >>> import pandas as pd - >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], - ... 'team': ['Red Sox', 'Yankees'], - ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) - >>> data - hr1 hr2 team year1 year2 - 0 514 545 Red Sox 2007 2008 - 1 573 526 Yankees 2007 2008 - - >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) - team year hr - 0 Red Sox 2007 514 - 1 Yankees 2007 573 - 2 Red Sox 2008 545 - 3 Yankees 2008 526 - - Returns - ------- - reshaped : DataFrame - """ - if isinstance(groups, dict): - keys = list(groups.keys()) - values = list(groups.values()) - else: - keys, values = zip(*groups) - - all_cols = list(set.union(*[set(x) for x in values])) - id_cols = list(data.columns.difference(all_cols)) - - K = len(values[0]) - - for seq in values: - if len(seq) != K: - raise ValueError('All column lists must be same length') - - mdata = {} - pivot_cols = [] - - for target, names in zip(keys, values): - to_concat = [data[col].values for col in names] - mdata[target] = _concat._concat_compat(to_concat) - pivot_cols.append(target) - - for col in id_cols: - mdata[col] = np.tile(data[col].values, K) - - if dropna: - mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) - for c in pivot_cols: - mask &= notna(mdata[c]) - if not mask.all(): - mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata)) - - return DataFrame(mdata, columns=id_cols + pivot_cols) - - -def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): - r""" - Wide panel to long format. Less flexible but more user-friendly than melt. - - With stubnames ['A', 'B'], this function expects to find one or more - group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,... - You specify what you want to call this suffix in the resulting long format - with `j` (for example `j='year'`) - - Each row of these wide variables are assumed to be uniquely identified by - `i` (can be a single column name or a list of column names) - - All remaining variables in the data frame are left intact. - - Parameters - ---------- - df : DataFrame - The wide-format DataFrame - stubnames : str or list-like - The stub name(s). The wide format variables are assumed to - start with the stub names. - i : str or list-like - Column(s) to use as id variable(s) - j : str - The name of the subobservation variable. What you wish to name your - suffix in the long format. - sep : str, default "" - A character indicating the separation of the variable names - in the wide format, to be stripped from the names in the long format. - For example, if your column names are A-suffix1, A-suffix2, you - can strip the hypen by specifying `sep='-'` - - .. versionadded:: 0.20.0 - - suffix : str, default '\\d+' - A regular expression capturing the wanted suffixes. '\\d+' captures - numeric suffixes. Suffixes with no numbers could be specified with the - negated character class '\\D+'. You can also further disambiguate - suffixes, for example, if your wide variables are of the form - Aone, Btwo,.., and you have an unrelated column Arating, you can - ignore the last one by specifying `suffix='(!?one|two)'` - - .. versionadded:: 0.20.0 - - Returns - ------- - DataFrame - A DataFrame that contains each stub name as a variable, with new index - (i, j) - - Examples - -------- - >>> import pandas as pd - >>> import numpy as np - >>> np.random.seed(123) - >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, - ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, - ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, - ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, - ... "X" : dict(zip(range(3), np.random.randn(3))) - ... }) - >>> df["id"] = df.index - >>> df - A1970 A1980 B1970 B1980 X id - 0 a d 2.5 3.2 -1.085631 0 - 1 b e 1.2 1.3 0.997345 1 - 2 c f 0.7 0.1 0.282978 2 - >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") - ... # doctest: +NORMALIZE_WHITESPACE - X A B - id year - 0 1970 -1.085631 a 2.5 - 1 1970 0.997345 b 1.2 - 2 1970 0.282978 c 0.7 - 0 1980 -1.085631 d 3.2 - 1 1980 0.997345 e 1.3 - 2 1980 0.282978 f 0.1 - - With multuple id columns - - >>> df = pd.DataFrame({ - ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], - ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], - ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], - ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] - ... }) - >>> df - birth famid ht1 ht2 - 0 1 1 2.8 3.4 - 1 2 1 2.9 3.8 - 2 3 1 2.2 2.9 - 3 1 2 2.0 3.2 - 4 2 2 1.8 2.8 - 5 3 2 1.9 2.4 - 6 1 3 2.2 3.3 - 7 2 3 2.3 3.4 - 8 3 3 2.1 2.9 - >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') - >>> l - ... # doctest: +NORMALIZE_WHITESPACE - ht - famid birth age - 1 1 1 2.8 - 2 3.4 - 2 1 2.9 - 2 3.8 - 3 1 2.2 - 2 2.9 - 2 1 1 2.0 - 2 3.2 - 2 1 1.8 - 2 2.8 - 3 1 1.9 - 2 2.4 - 3 1 1 2.2 - 2 3.3 - 2 1 2.3 - 2 3.4 - 3 1 2.1 - 2 2.9 - - Going from long back to wide just takes some creative use of `unstack` - - >>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack() - >>> w.columns = pd.Index(w.columns).str.join('') - >>> w.reset_index() - famid birth ht1 ht2 - 0 1 1 2.8 3.4 - 1 1 2 2.9 3.8 - 2 1 3 2.2 2.9 - 3 2 1 2.0 3.2 - 4 2 2 1.8 2.8 - 5 2 3 1.9 2.4 - 6 3 1 2.2 3.3 - 7 3 2 2.3 3.4 - 8 3 3 2.1 2.9 - - Less wieldy column names are also handled - - >>> np.random.seed(0) - >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), - ... 'A(quarterly)-2011': np.random.rand(3), - ... 'B(quarterly)-2010': np.random.rand(3), - ... 'B(quarterly)-2011': np.random.rand(3), - ... 'X' : np.random.randint(3, size=3)}) - >>> df['id'] = df.index - >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS - A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... - 0 0.548814 0.544883 0.437587 ... - 1 0.715189 0.423655 0.891773 ... - 2 0.602763 0.645894 0.963663 ... - X id - 0 0 0 - 1 1 1 - 2 1 2 - - >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', - ... j='year', sep='-') - ... # doctest: +NORMALIZE_WHITESPACE - X A(quarterly) B(quarterly) - id year - 0 2010 0 0.548814 0.437587 - 1 2010 1 0.715189 0.891773 - 2 2010 1 0.602763 0.963663 - 0 2011 0 0.544883 0.383442 - 1 2011 1 0.423655 0.791725 - 2 2011 1 0.645894 0.528895 - - If we have many columns, we could also use a regex to find our - stubnames and pass that list on to wide_to_long - - >>> stubnames = sorted( - ... set([match[0] for match in df.columns.str.findall( - ... r'[A-B]\(.*\)').values if match != [] ]) - ... ) - >>> list(stubnames) - ['A(quarterly)', 'B(quarterly)'] - - Notes - ----- - All extra variables are left untouched. This simply uses - `pandas.melt` under the hood, but is hard-coded to "do the right thing" - in a typicaly case. - """ - def get_var_names(df, stub, sep, suffix): - regex = "^{stub}{sep}{suffix}".format( - stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) - return df.filter(regex=regex).columns.tolist() - - def melt_stub(df, stub, i, j, value_vars, sep): - newdf = melt(df, id_vars=i, value_vars=value_vars, - value_name=stub.rstrip(sep), var_name=j) - newdf[j] = Categorical(newdf[j]) - newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") - - return newdf.set_index(i + [j]) - - if any(map(lambda s: s in df.columns.tolist(), stubnames)): - raise ValueError("stubname can't be identical to a column name") - - if not is_list_like(stubnames): - stubnames = [stubnames] - else: - stubnames = list(stubnames) - - if not is_list_like(i): - i = [i] - else: - i = list(i) - - if df[i].duplicated().any(): - raise ValueError("the id variables need to uniquely identify each row") - - value_vars = list(map(lambda stub: - get_var_names(df, stub, sep, suffix), stubnames)) - - value_vars_flattened = [e for sublist in value_vars for e in sublist] - id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) - - melted = [] - for s, v in zip(stubnames, value_vars): - melted.append(melt_stub(df, s, i, j, v, sep)) - melted = melted[0].join(melted[1:], how='outer') - - if len(i) == 1: - new = df[id_vars].set_index(i).join(melted) - return new - - new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) - - return new - - def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False): """ diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index fc9f89934b4ea..2722c3e92d85a 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -11,8 +11,8 @@ from pandas.util.testing import assert_frame_equal -from pandas.core.reshape.reshape import ( - melt, lreshape, get_dummies, wide_to_long) +from pandas.core.reshape.reshape import get_dummies +from pandas.core.reshape.melt import melt, lreshape, wide_to_long import pandas.util.testing as tm from pandas.compat import range, u
This pull request is to make PR #17677 easier and was requested by @jreback [here](https://github.com/pandas-dev/pandas/pull/17677#pullrequestreview-73974610)
https://api.github.com/repos/pandas-dev/pandas/pulls/18148
2017-11-07T13:08:06Z
2017-11-12T15:59:19Z
2017-11-12T15:59:18Z
2017-11-23T16:56:59Z
Index tests indexing
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index cc6eeb44c99c9..36f691903d233 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -410,89 +410,6 @@ def test_sort_values(self): assert ordered[::-1].is_monotonic tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp)) - def test_take(self): - dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), - datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] - - for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = DatetimeIndex(start='2010-01-01 09:00', - end='2010-02-01 09:00', freq='H', tz=tz, - name='idx') - expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) - - taken1 = idx.take([5, 6, 8, 12]) - taken2 = idx[[5, 6, 8, 12]] - - for taken in [taken1, taken2]: - tm.assert_index_equal(taken, expected) - assert isinstance(taken, DatetimeIndex) - assert taken.freq is None - assert taken.tz == expected.tz - assert taken.name == expected.name - - def test_take_fill_value(self): - # GH 12631 - idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], - name='xxx') - result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx') - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], - name='xxx') - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx') - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - def test_take_fill_value_with_timezone(self): - idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000']) f = index.asof diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 9416b08f9654a..4ce9441d87970 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -1,3 +1,4 @@ +from datetime import datetime import pytest import pytz @@ -5,7 +6,10 @@ import pandas as pd import pandas.util.testing as tm import pandas.compat as compat -from pandas import notna, Index, DatetimeIndex, datetime, date_range +from pandas import notna, Index, DatetimeIndex, date_range, Timestamp +from pandas.tseries.offsets import CDay, BDay + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestDatetimeIndex(object): @@ -239,3 +243,234 @@ def test_delete_slice(self): assert result.name == expected.name assert result.freq == expected.freq assert result.tz == expected.tz + + def test_getitem(self): + idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', + tz='Asia/Tokyo', name='idx') + + for idx in [idx1, idx2]: + result = idx[0] + assert result == Timestamp('2011-01-01', tz=idx.tz) + + result = idx[0:5] + expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[0:10:2] + expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[-20:-5:3] + expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[4::-1] + expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', + '2011-01-02', '2011-01-01'], + freq='-1D', tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_take(self): + # GH 10295 + idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', + tz='Asia/Tokyo', name='idx') + + for idx in [idx1, idx2]: + result = idx.take([0]) + assert result == Timestamp('2011-01-01', tz=idx.tz) + + result = idx.take([0, 1, 2]) + expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([0, 2, 4]) + expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([7, 4, 1]) + expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([3, 2, 5]) + expected = DatetimeIndex(['2011-01-04', '2011-01-03', + '2011-01-06'], + freq=None, tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + result = idx.take([-3, 2, 5]) + expected = DatetimeIndex(['2011-01-29', '2011-01-03', + '2011-01-06'], + freq=None, tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_take_invalid_kwargs(self): + idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + indices = [1, 6, 5, 9, 10, 13, 15, 3] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + # TODO: This method came from test_datetime; de-dup with version above + def test_take2(self): + dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), + datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] + + for tz in [None, 'US/Eastern', 'Asia/Tokyo']: + idx = DatetimeIndex(start='2010-01-01 09:00', + end='2010-02-01 09:00', freq='H', tz=tz, + name='idx') + expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) + + taken1 = idx.take([5, 6, 8, 12]) + taken2 = idx[[5, 6, 8, 12]] + + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, DatetimeIndex) + assert taken.freq is None + assert taken.tz == expected.tz + assert taken.name == expected.name + + def test_take_fill_value(self): + # GH 12631 + idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], + name='xxx') + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx') + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], + name='xxx') + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx') + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + def test_take_fill_value_with_timezone(self): + idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + +class TestBusinessDatetimeIndexIndexing(object): + def setup_method(self, method): + self.rng = pd.bdate_range(START, END) + + def test_getitem(self): + smaller = self.rng[:5] + exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) + tm.assert_index_equal(smaller, exp) + + assert smaller.offset == self.rng.offset + + sliced = self.rng[::5] + assert sliced.offset == BDay() * 5 + + fancy_indexed = self.rng[[4, 3, 2, 1, 0]] + assert len(fancy_indexed) == 5 + assert isinstance(fancy_indexed, DatetimeIndex) + assert fancy_indexed.freq is None + + # 32-bit vs. 64-bit platforms + assert self.rng[4] == self.rng[np.int_(4)] + + def test_getitem_matplotlib_hackaround(self): + values = self.rng[:, None] + expected = self.rng.values[:, None] + tm.assert_numpy_array_equal(values, expected) + + +class TestCustomDatetimeIndexIndexing(object): + def setup_method(self, method): + self.rng = pd.bdate_range(START, END, freq='C') + + def test_getitem(self): + smaller = self.rng[:5] + exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) + tm.assert_index_equal(smaller, exp) + assert smaller.offset == self.rng.offset + + sliced = self.rng[::5] + assert sliced.offset == CDay() * 5 + + fancy_indexed = self.rng[[4, 3, 2, 1, 0]] + assert len(fancy_indexed) == 5 + assert isinstance(fancy_indexed, DatetimeIndex) + assert fancy_indexed.freq is None + + # 32-bit vs. 64-bit platforms + assert self.rng[4] == self.rng[np.int_(4)] + + def test_getitem_matplotlib_hackaround(self): + values = self.rng[:, None] + expected = self.rng.values[:, None] + tm.assert_numpy_array_equal(values, expected) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 6e66e4a36f905..9e4f8d979ca99 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -537,40 +537,6 @@ def test_order(self): tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None - def test_getitem(self): - idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', - tz='Asia/Tokyo', name='idx') - - for idx in [idx1, idx2]: - result = idx[0] - assert result == Timestamp('2011-01-01', tz=idx.tz) - - result = idx[0:5] - expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[0:10:2] - expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[-20:-5:3] - expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[4::-1] - expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', - '2011-01-02', '2011-01-01'], - freq='-1D', tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') @@ -605,64 +571,6 @@ def test_drop_duplicates(self): res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) - def test_take(self): - # GH 10295 - idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', - tz='Asia/Tokyo', name='idx') - - for idx in [idx1, idx2]: - result = idx.take([0]) - assert result == Timestamp('2011-01-01', tz=idx.tz) - - result = idx.take([0, 1, 2]) - expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([0, 2, 4]) - expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([7, 4, 1]) - expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([3, 2, 5]) - expected = DatetimeIndex(['2011-01-04', '2011-01-03', - '2011-01-06'], - freq=None, tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - result = idx.take([-3, 2, 5]) - expected = DatetimeIndex(['2011-01-29', '2011-01-03', - '2011-01-06'], - freq=None, tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - def test_take_invalid_kwargs(self): - idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - indices = [1, 6, 5, 9, 10, 13, 15, 3] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - def test_infer_freq(self): # GH 11018 for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', @@ -792,29 +700,6 @@ def test_repr(self): # only really care that it works repr(self.rng) - def test_getitem(self): - smaller = self.rng[:5] - exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) - tm.assert_index_equal(smaller, exp) - - assert smaller.offset == self.rng.offset - - sliced = self.rng[::5] - assert sliced.offset == BDay() * 5 - - fancy_indexed = self.rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert self.rng[4] == self.rng[np.int_(4)] - - def test_getitem_matplotlib_hackaround(self): - values = self.rng[:, None] - expected = self.rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) - def test_shift(self): shifted = self.rng.shift(5) assert shifted[0] == self.rng[5] @@ -864,7 +749,6 @@ def test_identical(self): class TestCustomDatetimeIndex(object): - def setup_method(self, method): self.rng = bdate_range(START, END, freq='C') @@ -884,28 +768,6 @@ def test_repr(self): # only really care that it works repr(self.rng) - def test_getitem(self): - smaller = self.rng[:5] - exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) - tm.assert_index_equal(smaller, exp) - assert smaller.offset == self.rng.offset - - sliced = self.rng[::5] - assert sliced.offset == CDay() * 5 - - fancy_indexed = self.rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert self.rng[4] == self.rng[np.int_(4)] - - def test_getitem_matplotlib_hackaround(self): - values = self.rng[:, None] - expected = self.rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) - def test_shift(self): shifted = self.rng.shift(5) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 3d6fba982f560..1d44a4bd95c4b 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import pytest @@ -9,6 +9,7 @@ from pandas._libs import tslib, tslibs from pandas import (PeriodIndex, Series, DatetimeIndex, period_range, Period) +from pandas._libs import period as libperiod class TestGetItem(object): @@ -504,3 +505,98 @@ def test_get_indexer_non_unique(self): tm.assert_numpy_array_equal(result[0], expected_indexer) tm.assert_numpy_array_equal(result[1], expected_missing) + + # TODO: This method came from test_period; de-dup with version above + def test_get_loc2(self): + idx = pd.period_range('2000-01-01', periods=3) + + for method in [None, 'pad', 'backfill', 'nearest']: + assert idx.get_loc(idx[1], method) == 1 + assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1 + assert idx.get_loc(idx[1].to_timestamp(), method) == 1 + assert idx.get_loc(idx[1].to_timestamp() + .to_pydatetime(), method) == 1 + assert idx.get_loc(str(idx[1]), method) == 1 + + idx = pd.period_range('2000-01-01', periods=5)[::2] + assert idx.get_loc('2000-01-02T12', method='nearest', + tolerance='1 day') == 1 + assert idx.get_loc('2000-01-02T12', method='nearest', + tolerance=pd.Timedelta('1D')) == 1 + assert idx.get_loc('2000-01-02T12', method='nearest', + tolerance=np.timedelta64(1, 'D')) == 1 + assert idx.get_loc('2000-01-02T12', method='nearest', + tolerance=timedelta(1)) == 1 + with tm.assert_raises_regex(ValueError, + 'unit abbreviation w/o a number'): + idx.get_loc('2000-01-10', method='nearest', tolerance='foo') + + msg = 'Input has different freq from PeriodIndex\\(freq=D\\)' + with tm.assert_raises_regex(ValueError, msg): + idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour') + with pytest.raises(KeyError): + idx.get_loc('2000-01-10', method='nearest', tolerance='1 day') + with pytest.raises( + ValueError, + match='list-like tolerance size must match target index size'): + idx.get_loc('2000-01-10', method='nearest', + tolerance=[pd.Timedelta('1 day').to_timedelta64(), + pd.Timedelta('1 day').to_timedelta64()]) + + # TODO: This method came from test_period; de-dup with version above + def test_get_indexer2(self): + idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start') + tm.assert_numpy_array_equal(idx.get_indexer(idx), + np.array([0, 1, 2], dtype=np.intp)) + + target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12', + '2000-01-02T01'], freq='H') + tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), + np.array([-1, 0, 1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), + np.array([0, 1, 2], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), + np.array([0, 1, 1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', + tolerance='1 hour'), + np.array([0, -1, 1], dtype=np.intp)) + + msg = 'Input has different freq from PeriodIndex\\(freq=H\\)' + with tm.assert_raises_regex(ValueError, msg): + idx.get_indexer(target, 'nearest', tolerance='1 minute') + + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', + tolerance='1 day'), + np.array([0, 1, 1], dtype=np.intp)) + tol_raw = [pd.Timedelta('1 hour'), + pd.Timedelta('1 hour'), + np.timedelta64(1, 'D'), ] + tm.assert_numpy_array_equal( + idx.get_indexer(target, 'nearest', + tolerance=[np.timedelta64(x) for x in tol_raw]), + np.array([0, -1, 1], dtype=np.intp)) + tol_bad = [pd.Timedelta('2 hour').to_timedelta64(), + pd.Timedelta('1 hour').to_timedelta64(), + np.timedelta64(1, 'M'), ] + with pytest.raises( + libperiod.IncompatibleFrequency, + match='Input has different freq from'): + idx.get_indexer(target, 'nearest', tolerance=tol_bad) + + def test_indexing(self): + # GH 4390, iat incorrectly indexing + index = period_range('1/1/2001', periods=10) + s = Series(np.random.randn(10), index=index) + expected = s[index[0]] + result = s.iat[0] + assert expected == result + + def test_period_index_indexer(self): + # GH4125 + idx = pd.period_range('2002-01', '2003-12', freq='M') + df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx) + tm.assert_frame_equal(df, df.loc[idx]) + tm.assert_frame_equal(df, df.loc[list(idx)]) + tm.assert_frame_equal(df, df.loc[list(idx)]) + tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]]) + tm.assert_frame_equal(df, df.loc[list(idx)]) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index ae500e66359b4..e5ee078d3558d 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -1,15 +1,12 @@ import pytest import numpy as np -from numpy.random import randn -from datetime import timedelta import pandas as pd from pandas.util import testing as tm from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT, Index, Period, Int64Index, Series, DataFrame, date_range, offsets, compat) -from pandas.core.indexes.period import IncompatibleFrequency from ..datetimelike import DatetimeLike @@ -64,42 +61,6 @@ def test_pickle_round_trip(self): result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) - def test_get_loc(self): - idx = pd.period_range('2000-01-01', periods=3) - - for method in [None, 'pad', 'backfill', 'nearest']: - assert idx.get_loc(idx[1], method) == 1 - assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1 - assert idx.get_loc(idx[1].to_timestamp(), method) == 1 - assert idx.get_loc(idx[1].to_timestamp() - .to_pydatetime(), method) == 1 - assert idx.get_loc(str(idx[1]), method) == 1 - - idx = pd.period_range('2000-01-01', periods=5)[::2] - assert idx.get_loc('2000-01-02T12', method='nearest', - tolerance='1 day') == 1 - assert idx.get_loc('2000-01-02T12', method='nearest', - tolerance=pd.Timedelta('1D')) == 1 - assert idx.get_loc('2000-01-02T12', method='nearest', - tolerance=np.timedelta64(1, 'D')) == 1 - assert idx.get_loc('2000-01-02T12', method='nearest', - tolerance=timedelta(1)) == 1 - with tm.assert_raises_regex(ValueError, - 'unit abbreviation w/o a number'): - idx.get_loc('2000-01-10', method='nearest', tolerance='foo') - - msg = 'Input has different freq from PeriodIndex\\(freq=D\\)' - with tm.assert_raises_regex(ValueError, msg): - idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour') - with pytest.raises(KeyError): - idx.get_loc('2000-01-10', method='nearest', tolerance='1 day') - with pytest.raises( - ValueError, - match='list-like tolerance size must match target index size'): - idx.get_loc('2000-01-10', method='nearest', - tolerance=[pd.Timedelta('1 day').to_timedelta64(), - pd.Timedelta('1 day').to_timedelta64()]) - def test_where(self): i = self.create_index() result = i.where(notna(i)) @@ -142,45 +103,6 @@ def test_where_other(self): result = i.where(notna(i2), i2.values) tm.assert_index_equal(result, i2) - def test_get_indexer(self): - idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start') - tm.assert_numpy_array_equal(idx.get_indexer(idx), - np.array([0, 1, 2], dtype=np.intp)) - - target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12', - '2000-01-02T01'], freq='H') - tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), - np.array([-1, 0, 1], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), - np.array([0, 1, 2], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), - np.array([0, 1, 1], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', - tolerance='1 hour'), - np.array([0, -1, 1], dtype=np.intp)) - - msg = 'Input has different freq from PeriodIndex\\(freq=H\\)' - with tm.assert_raises_regex(ValueError, msg): - idx.get_indexer(target, 'nearest', tolerance='1 minute') - - tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', - tolerance='1 day'), - np.array([0, 1, 1], dtype=np.intp)) - tol_raw = [pd.Timedelta('1 hour'), - pd.Timedelta('1 hour'), - np.timedelta64(1, 'D'), ] - tm.assert_numpy_array_equal( - idx.get_indexer(target, 'nearest', - tolerance=[np.timedelta64(x) for x in tol_raw]), - np.array([0, -1, 1], dtype=np.intp)) - tol_bad = [pd.Timedelta('2 hour').to_timedelta64(), - pd.Timedelta('1 hour').to_timedelta64(), - np.timedelta64(1, 'M'), ] - with pytest.raises( - IncompatibleFrequency, - match='Input has different freq from'): - idx.get_indexer(target, 'nearest', tolerance=tol_bad) - def test_repeat(self): # GH10183 idx = pd.period_range('2000-01-01', periods=3, freq='D') @@ -189,16 +111,6 @@ def test_repeat(self): tm.assert_index_equal(res, exp) assert res.freqstr == 'D' - def test_period_index_indexer(self): - # GH4125 - idx = pd.period_range('2002-01', '2003-12', freq='M') - df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx) - tm.assert_frame_equal(df, df.loc[idx]) - tm.assert_frame_equal(df, df.loc[list(idx)]) - tm.assert_frame_equal(df, df.loc[list(idx)]) - tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]]) - tm.assert_frame_equal(df, df.loc[list(idx)]) - def test_fillna_period(self): # GH 11343 idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT, @@ -437,15 +349,6 @@ def _check_all_fields(self, periodindex): for x, val in zip(periods, field_s): assert getattr(x, field) == val - def test_indexing(self): - - # GH 4390, iat incorrectly indexing - index = period_range('1/1/2001', periods=10) - s = Series(randn(10), index=index) - expected = s[index[0]] - result = s.iat[0] - assert expected == result - def test_period_set_index_reindex(self): # GH 6631 df = DataFrame(np.random.random(6)) diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 844033cc19eed..cb88bac6386f7 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -1,7 +1,9 @@ -import pytest - from datetime import timedelta +import pytest +import numpy as np + +import pandas as pd import pandas.util.testing as tm from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta @@ -110,3 +112,197 @@ def test_delete_slice(self): tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq + + def test_getitem(self): + idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') + + for idx in [idx1]: + result = idx[0] + assert result == Timedelta('1 day') + + result = idx[0:5] + expected = timedelta_range('1 day', '5 day', freq='D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[0:10:2] + expected = timedelta_range('1 day', '9 day', freq='2D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[-20:-5:3] + expected = timedelta_range('12 day', '24 day', freq='3D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[4::-1] + expected = TimedeltaIndex(['5 day', '4 day', '3 day', + '2 day', '1 day'], + freq='-1D', name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_take(self): + # GH 10295 + idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') + + for idx in [idx1]: + result = idx.take([0]) + assert result == Timedelta('1 day') + + result = idx.take([-1]) + assert result == Timedelta('31 day') + + result = idx.take([0, 1, 2]) + expected = timedelta_range('1 day', '3 day', freq='D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([0, 2, 4]) + expected = timedelta_range('1 day', '5 day', freq='2D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([7, 4, 1]) + expected = timedelta_range('8 day', '2 day', freq='-3D', + name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([3, 2, 5]) + expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + result = idx.take([-3, 2, 5]) + expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_take_invalid_kwargs(self): + idx = timedelta_range('1 day', '31 day', freq='D', name='idx') + indices = [1, 6, 5, 9, 10, 13, 15, 3] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + # TODO: This method came from test_timedelta; de-dup with version above + def test_take2(self): + tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00'] + idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx') + expected = TimedeltaIndex(tds, freq=None, name='idx') + + taken1 = idx.take([2, 4, 10]) + taken2 = idx[[2, 4, 10]] + + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, TimedeltaIndex) + assert taken.freq is None + assert taken.name == expected.name + + def test_take_fill_value(self): + # GH 12631 + idx = TimedeltaIndex(['1 days', '2 days', '3 days'], + name='xxx') + result = idx.take(np.array([1, 0, -1])) + expected = TimedeltaIndex(['2 days', '1 days', '3 days'], + name='xxx') + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = TimedeltaIndex(['2 days', '1 days', 'NaT'], + name='xxx') + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = TimedeltaIndex(['2 days', '1 days', '3 days'], + name='xxx') + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + def test_get_loc(self): + idx = pd.to_timedelta(['0 days', '1 days', '2 days']) + + for method in [None, 'pad', 'backfill', 'nearest']: + assert idx.get_loc(idx[1], method) == 1 + assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1 + assert idx.get_loc(str(idx[1]), method) == 1 + + assert idx.get_loc(idx[1], 'pad', + tolerance=Timedelta(0)) == 1 + assert idx.get_loc(idx[1], 'pad', + tolerance=np.timedelta64(0, 's')) == 1 + assert idx.get_loc(idx[1], 'pad', + tolerance=timedelta(0)) == 1 + + with tm.assert_raises_regex(ValueError, + 'unit abbreviation w/o a number'): + idx.get_loc(idx[1], method='nearest', tolerance='foo') + + with pytest.raises( + ValueError, + match='tolerance size must match'): + idx.get_loc(idx[1], method='nearest', + tolerance=[Timedelta(0).to_timedelta64(), + Timedelta(0).to_timedelta64()]) + + for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: + assert idx.get_loc('1 day 1 hour', method) == loc + + # GH 16909 + assert idx.get_loc(idx[1].to_timedelta64()) == 1 + + # GH 16896 + assert idx.get_loc('0 days') == 0 + + def test_get_loc_nat(self): + tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00']) + + assert tidx.get_loc(pd.NaT) == 1 + assert tidx.get_loc(None) == 1 + assert tidx.get_loc(float('nan')) == 1 + assert tidx.get_loc(np.nan) == 1 + + def test_get_indexer(self): + idx = pd.to_timedelta(['0 days', '1 days', '2 days']) + tm.assert_numpy_array_equal(idx.get_indexer(idx), + np.array([0, 1, 2], dtype=np.intp)) + + target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), + np.array([-1, 0, 1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), + np.array([0, 1, 2], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), + np.array([0, 1, 1], dtype=np.intp)) + + res = idx.get_indexer(target, 'nearest', + tolerance=Timedelta('1 hour')) + tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp)) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index ff52afea2a918..67238665a2e8a 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -358,38 +358,6 @@ def test_order(self): tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None - def test_getitem(self): - idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') - - for idx in [idx1]: - result = idx[0] - assert result == pd.Timedelta('1 day') - - result = idx[0:5] - expected = pd.timedelta_range('1 day', '5 day', freq='D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[0:10:2] - expected = pd.timedelta_range('1 day', '9 day', freq='2D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[-20:-5:3] - expected = pd.timedelta_range('12 day', '24 day', freq='3D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[4::-1] - expected = TimedeltaIndex(['5 day', '4 day', '3 day', - '2 day', '1 day'], - freq='-1D', name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') @@ -424,61 +392,6 @@ def test_drop_duplicates(self): res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) - def test_take(self): - # GH 10295 - idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') - - for idx in [idx1]: - result = idx.take([0]) - assert result == pd.Timedelta('1 day') - - result = idx.take([-1]) - assert result == pd.Timedelta('31 day') - - result = idx.take([0, 1, 2]) - expected = pd.timedelta_range('1 day', '3 day', freq='D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([0, 2, 4]) - expected = pd.timedelta_range('1 day', '5 day', freq='2D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([7, 4, 1]) - expected = pd.timedelta_range('8 day', '2 day', freq='-3D', - name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([3, 2, 5]) - expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - result = idx.take([-3, 2, 5]) - expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - def test_take_invalid_kwargs(self): - idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') - indices = [1, 6, 5, 9, 10, 13, 15, 3] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - def test_infer_freq(self): # GH 11018 for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S' diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 615c0d0ffa210..533b06088f1bf 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -50,66 +50,6 @@ def test_shift(self): '10 days 01:00:03'], freq='D') tm.assert_index_equal(result, expected) - def test_get_loc(self): - idx = pd.to_timedelta(['0 days', '1 days', '2 days']) - - for method in [None, 'pad', 'backfill', 'nearest']: - assert idx.get_loc(idx[1], method) == 1 - assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1 - assert idx.get_loc(str(idx[1]), method) == 1 - - assert idx.get_loc(idx[1], 'pad', - tolerance=pd.Timedelta(0)) == 1 - assert idx.get_loc(idx[1], 'pad', - tolerance=np.timedelta64(0, 's')) == 1 - assert idx.get_loc(idx[1], 'pad', - tolerance=timedelta(0)) == 1 - - with tm.assert_raises_regex(ValueError, - 'unit abbreviation w/o a number'): - idx.get_loc(idx[1], method='nearest', tolerance='foo') - - with pytest.raises( - ValueError, - match='tolerance size must match'): - idx.get_loc(idx[1], method='nearest', - tolerance=[Timedelta(0).to_timedelta64(), - Timedelta(0).to_timedelta64()]) - - for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: - assert idx.get_loc('1 day 1 hour', method) == loc - - # GH 16909 - assert idx.get_loc(idx[1].to_timedelta64()) == 1 - - # GH 16896 - assert idx.get_loc('0 days') == 0 - - def test_get_loc_nat(self): - tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00']) - - assert tidx.get_loc(pd.NaT) == 1 - assert tidx.get_loc(None) == 1 - assert tidx.get_loc(float('nan')) == 1 - assert tidx.get_loc(np.nan) == 1 - - def test_get_indexer(self): - idx = pd.to_timedelta(['0 days', '1 days', '2 days']) - tm.assert_numpy_array_equal(idx.get_indexer(idx), - np.array([0, 1, 2], dtype=np.intp)) - - target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), - np.array([-1, 0, 1], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), - np.array([0, 1, 2], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), - np.array([0, 1, 1], dtype=np.intp)) - - res = idx.get_indexer(target, 'nearest', - tolerance=pd.Timedelta('1 hour')) - tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp)) - def test_pickle_compat_construction(self): pass @@ -144,53 +84,6 @@ def test_difference_freq(self): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal('freq', idx_diff, expected) - def test_take(self): - - tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00'] - idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx') - expected = TimedeltaIndex(tds, freq=None, name='idx') - - taken1 = idx.take([2, 4, 10]) - taken2 = idx[[2, 4, 10]] - - for taken in [taken1, taken2]: - tm.assert_index_equal(taken, expected) - assert isinstance(taken, TimedeltaIndex) - assert taken.freq is None - assert taken.name == expected.name - - def test_take_fill_value(self): - # GH 12631 - idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'], - name='xxx') - result = idx.take(np.array([1, 0, -1])) - expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'], - name='xxx') - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'], - name='xxx') - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'], - name='xxx') - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - def test_isin(self): index = tm.makeTimedeltaIndex(4)
There are `test_take_foo` and similar scattered about. The canonical location appears to be in the test_indexing modules.
https://api.github.com/repos/pandas-dev/pandas/pulls/18145
2017-11-06T23:01:32Z
2017-11-07T13:09:48Z
2017-11-07T13:09:47Z
2017-12-08T19:40:52Z
update imports and depends
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 0456033dbb731..dd68a828bfd0a 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -18,9 +18,8 @@ from pandas.compat import PY2 cimport cython from tslibs.np_datetime cimport (pandas_datetimestruct, - dtstruct_to_dt64, dt64_to_dtstruct) -from datetime cimport is_leapyear - + dtstruct_to_dt64, dt64_to_dtstruct, + is_leapyear) cimport util from util cimport is_period_object, is_string_object, INT32_MIN diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd index ac975a3bf3537..9dde5fa997c27 100644 --- a/pandas/_libs/src/datetime.pxd +++ b/pandas/_libs/src/datetime.pxd @@ -17,10 +17,6 @@ cdef extern from "numpy/ndarrayobject.h": NPY_UNSAFE_CASTING -cdef extern from "numpy_helper.h": - npy_datetime get_datetime64_value(object o) - npy_timedelta get_timedelta64_value(object o) - cdef extern from "numpy/npy_common.h": ctypedef unsigned char npy_bool @@ -52,12 +48,7 @@ cdef extern from "datetime/np_datetime.h": void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil - int days_per_month_table[2][12] - pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS - int dayofweek(int y, int m, int d) nogil - int is_leapyear(int64_t year) nogil - PANDAS_DATETIMEUNIT get_datetime64_unit(object o) cdef extern from "datetime/np_datetime_strings.h": diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index ad683459ad878..8a9a05723d9fe 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -18,14 +18,6 @@ The full license is in the LICENSE file, distributed with this software. PANDAS_INLINE npy_int64 get_nat(void) { return NPY_MIN_INT64; } -PANDAS_INLINE npy_datetime get_datetime64_value(PyObject* obj) { - return ((PyDatetimeScalarObject*)obj)->obval; -} - -PANDAS_INLINE npy_timedelta get_timedelta64_value(PyObject* obj) { - return ((PyTimedeltaScalarObject*)obj)->obval; -} - PANDAS_INLINE int is_integer_object(PyObject* obj) { return (!PyBool_Check(obj)) && PyArray_IsIntegerScalar(obj); } diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 08a0ed713d936..7cb55e6dfe992 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -38,14 +38,7 @@ from cpython.datetime cimport (PyDelta_Check, PyTZInfo_Check, # import datetime C API PyDateTime_IMPORT # this is our datetime.pxd -from datetime cimport ( - pandas_datetime_to_datetimestruct, - days_per_month_table, - PANDAS_DATETIMEUNIT, - _string_to_dts, - is_leapyear, - dayofweek, - PANDAS_FR_ns) +from datetime cimport pandas_datetime_to_datetimestruct, _string_to_dts # stdlib datetime imports from datetime import time as datetime_time @@ -54,11 +47,13 @@ from tslibs.np_datetime cimport (check_dts_bounds, reverse_ops, cmp_scalar, pandas_datetimestruct, + PANDAS_DATETIMEUNIT, PANDAS_FR_ns, dt64_to_dtstruct, dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64, npy_datetime, get_datetime64_unit, get_datetime64_value, - get_timedelta64_value) + get_timedelta64_value, + days_per_month_table, is_leapyear, dayofweek) from tslibs.np_datetime import OutOfBoundsDatetime from khash cimport ( diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 61efc865112a9..6c4330b7864d8 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -20,13 +20,13 @@ PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, pandas_datetimestruct, + PANDAS_DATETIMEUNIT, PANDAS_FR_ns, dt64_to_dtstruct, dtstruct_to_dt64, - pydatetime_to_dt64) + pydatetime_to_dt64, + npy_datetime, + get_datetime64_unit, get_datetime64_value) -from datetime cimport (pandas_datetime_to_datetimestruct, - PANDAS_DATETIMEUNIT, PANDAS_FR_ns, npy_datetime, - _string_to_dts, - get_datetime64_unit, get_datetime64_value) +from datetime cimport pandas_datetime_to_datetimestruct, _string_to_dts cimport util from util cimport (is_string_object, diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index b40646295cce5..d74a6b53f4228 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -17,12 +17,8 @@ from numpy cimport ndarray, int64_t, int32_t, int8_t np.import_array() -from np_datetime cimport pandas_datetimestruct, dt64_to_dtstruct - -from datetime cimport ( - days_per_month_table, - is_leapyear, - dayofweek) +from np_datetime cimport (pandas_datetimestruct, dt64_to_dtstruct, + days_per_month_table, is_leapyear, dayofweek) cimport util diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 1ae0499f90c0d..f13cffa23a198 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -46,6 +46,11 @@ cdef extern from "../src/datetime/np_datetime.h": PANDAS_FR_fs PANDAS_FR_as + int days_per_month_table[2][12] + int dayofweek(int y, int m, int d) nogil + int is_leapyear(int64_t year) nogil + + cdef int reverse_ops[6] cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1 diff --git a/setup.py b/setup.py index bd7c8f175607c..0dba1a0588e8a 100755 --- a/setup.py +++ b/setup.py @@ -526,9 +526,10 @@ def pxd(name): 'pxdfiles': ['_libs/src/util', '_libs/lib', '_libs/tslibs/timezones', - '_libs/tslibs/nattype'], - 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], - 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, + '_libs/tslibs/nattype', + '_libs/tslibs/np_datetime'], + 'depends': ['pandas/_libs/src/period_helper.h'], + 'sources': ['pandas/_libs/src/period_helper.c']}, '_libs.properties': { 'pyxfile': '_libs/properties', 'include': []}, @@ -557,9 +558,8 @@ def pxd(name): 'sources': np_datetime_sources}, '_libs.tslibs.fields': { 'pyxfile': '_libs/tslibs/fields', - 'pxdfiles': ['_libs/src/util'], - 'depends': tseries_depends, - 'sources': np_datetime_sources}, + 'pxdfiles': ['_libs/src/util', + '_libs/tslibs/np_datetime']}, '_libs.tslibs.frequencies': { 'pyxfile': '_libs/tslibs/frequencies', 'pxdfiles': ['_libs/src/util']},
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18144
2017-11-06T16:58:26Z
2017-11-06T23:02:20Z
null
2017-12-08T19:40:53Z
Separate out non-scalar tests from scalar tests; move to ?? in follow-up
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ab2e810d77634..d693c5ffe229f 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -20,6 +20,15 @@ class TestDataFrameApply(TestData): + def test_map_box_timestamps(self): + # GH#2689, GH#2627 + s = Series(date_range('1/1/2000', periods=10)) + + def f(x): + return (x.hour, x.day, x.month) + + # it works! + DataFrame(s).applymap(f) def test_apply(self): with np.errstate(all='ignore'): diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 78554d98ab5df..852681ce5e6c3 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -594,6 +594,15 @@ def test_setitem_boolean_column(self): assert_frame_equal(self.frame, expected) + def test_frame_setitem_timestamp(self): + # GH#2155 + columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', freq=BDay()) + index = lrange(10) + data = DataFrame(columns=columns, index=index) + t = datetime(2012, 11, 1) + ts = Timestamp(t) + data[ts] = np.nan # works + def test_setitem_corner(self): # corner case df = DataFrame({'B': [1., 2., 3.], diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 1fca0445de5c4..14df2e5ea3647 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -5,6 +5,7 @@ import pytest import numpy as np +import pytz from pytz import timezone from datetime import datetime, timedelta, time @@ -14,6 +15,7 @@ from pandas import date_range, bdate_range, offsets, DatetimeIndex, Timestamp from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset, MonthEnd, prefix_mapping) +from pandas._libs.tslibs.timezones import maybe_get_tz, dateutil_gettz from pandas.tests.series.common import TestData @@ -390,7 +392,7 @@ def test_range_tz_dateutil(self): # see gh-2906 # Use maybe_get_tz to fix filename in tz under dateutil. - from pandas._libs.tslibs.timezones import maybe_get_tz + tz = lambda x: maybe_get_tz('dateutil/' + x) start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern')) @@ -631,3 +633,58 @@ def test_all_custom_freq(self, freq): msg = 'invalid custom frequency string: {freq}' with tm.assert_raises_regex(ValueError, msg.format(freq=bad_freq)): bdate_range(START, END, freq=bad_freq) + + +class TestTimestampEquivDateRange(object): + # Older tests in scalar.test_timestamp.TestTimeSeries constructed + # their `stamp` objects using `date_range` instead of the `Timestamp` + # constructor. TestTimestampEquivDateRange checks that these are + # equivalent in the pertinent cases. + + def test_date_range_timestamp_equiv(self): + rng = date_range('20090415', '20090519', tz='US/Eastern') + stamp = rng[0] + + ts = Timestamp('20090415', tz='US/Eastern', freq='D') + assert ts == stamp + + def test_date_range_timestamp_equiv_dateutil(self): + rng = date_range('20090415', '20090519', tz='dateutil/US/Eastern') + stamp = rng[0] + + ts = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D') + assert ts == stamp + + def test_date_range_timestamp_equiv_explicit_pytz(self): + rng = date_range('20090415', '20090519', + tz=pytz.timezone('US/Eastern')) + stamp = rng[0] + + ts = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D') + assert ts == stamp + + def test_date_range_timestamp_equiv_explicit_dateutil(self): + tm._skip_if_windows_python_3() + + rng = date_range('20090415', '20090519', + tz=dateutil_gettz('US/Eastern')) + stamp = rng[0] + + ts = Timestamp('20090415', tz=dateutil_gettz('US/Eastern'), freq='D') + assert ts == stamp + + def test_date_range_timestamp_equiv_from_datetime_instance(self): + datetime_instance = datetime(2014, 3, 4) + # build a timestamp with a frequency, since then it supports + # addition/subtraction of integers + timestamp_instance = date_range(datetime_instance, periods=1, + freq='D')[0] + + ts = Timestamp(datetime_instance, freq='D') + assert ts == timestamp_instance + + def test_date_range_timestamp_equiv_preserve_frequency(self): + timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0] + ts = Timestamp('2014-03-05', freq='D') + + assert timestamp_instance == ts diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 9e4f8d979ca99..3f682ec5840a9 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -8,17 +8,69 @@ from itertools import product import pandas as pd import pandas._libs.tslib as tslib +from pandas._libs import period as libperiod import pandas.util.testing as tm from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, date_range, _np_version_under1p10, Index, bdate_range) from pandas.tseries.offsets import BMonthEnd, CDay, BDay +from pandas.tseries.frequencies import (RESO_DAY, RESO_HR, RESO_MIN, RESO_US, + RESO_MS, RESO_SEC) from pandas.tests.test_base import Ops START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) +class TestDatetimeIndexVectorizedTimestamp(object): + def test_timestamp_date_out_of_range(self): + # see gh-1475 + pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) + pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) + + def test_timestamp_fields(self): + # extra fields from DatetimeIndex like quarter and week + idx = tm.makeDateIndex(100) + + fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', + 'days_in_month', 'is_month_start', 'is_month_end', + 'is_quarter_start', 'is_quarter_end', 'is_year_start', + 'is_year_end', 'weekday_name'] + for f in fields: + expected = getattr(idx, f)[-1] + result = getattr(Timestamp(idx[-1]), f) + assert result == expected + + assert idx.freq == Timestamp(idx[-1], idx.freq).freq + assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr + + def test_tz_localize_ambiguous(self): + ts = Timestamp('2014-11-02 01:00') + ts_dst = ts.tz_localize('US/Eastern', ambiguous=True) + ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False) + + rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern') + assert rng[1] == ts_dst + assert rng[2] == ts_no_dst + pytest.raises(ValueError, ts.tz_localize, 'US/Eastern', + ambiguous='infer') + + def test_resolution(self): + for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', + 'S', 'L', 'U'], + [RESO_DAY, RESO_DAY, + RESO_DAY, RESO_DAY, + RESO_HR, RESO_MIN, + RESO_SEC, RESO_MS, + RESO_US]): + for tz in [None, 'Asia/Tokyo', 'US/Eastern', + 'dateutil/US/Eastern']: + idx = date_range(start='2013-04-01', periods=30, freq=freq, + tz=tz) + result = libperiod.resolution(idx.asi8, idx.tz) + assert result == expected + + class TestDatetimeIndexOps(Ops): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] @@ -142,6 +194,28 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, dr, out=0) + # TODO: De-dup with version below + def test_round2(self): + # tz-naive + dti = date_range('20130101 09:10:11', periods=5) + result = dti.round('D') + expected = date_range('20130101', periods=5) + tm.assert_index_equal(result, expected) + + # tz-aware + dti = date_range('20130101 09:10:11', + periods=5).tz_localize('UTC').tz_convert('US/Eastern') + result = dti.round('D') + expected = date_range('20130101', periods=5).tz_localize('US/Eastern') + tm.assert_index_equal(result, expected) + + result = dti.round('s') + tm.assert_index_equal(result, dti) + + # invalid + for freq in ['Y', 'M', 'foobar']: + pytest.raises(ValueError, lambda: dti.round(freq)) + def test_round(self): for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=5, diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index a9c26ebb90359..6bb4229883525 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -13,6 +13,21 @@ class TestSlicing(object): + def test_dti_slicing(self): + dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') + dti2 = dti[[1, 3, 5]] + + v1 = dti2[0] + v2 = dti2[1] + v3 = dti2[2] + + assert v1 == Timestamp('2/28/2005') + assert v2 == Timestamp('4/30/2005') + assert v3 == Timestamp('6/30/2005') + + # don't carry freq through irregular slicing + assert dti2.freq is None + def test_slice_keeps_name(self): # GH4226 st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index cb88bac6386f7..2caedc2e3294c 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -11,6 +11,17 @@ class TestTimedeltaIndex(object): _multiprocess_can_split_ = True + def test_contains(self): + # Checking for any NaT-like objects + # GH 13603 + td = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) + for v in [pd.NaT, None, float('nan'), np.nan]: + assert not (v in td) + + td = pd.to_timedelta([pd.NaT]) + for v in [pd.NaT, None, float('nan'), np.nan]: + assert (v in td) + def test_insert(self): idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx') diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 67238665a2e8a..d18924ef4f23d 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -98,6 +98,57 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, td, out=0) + # TODO: Dedup with version below + def test_round2(self): + t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us') + t2 = -1 * t1 + t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s') + t1c = pd.TimedeltaIndex([1, 1, 1], unit='D') + + # note that negative times round DOWN! so don't give whole numbers + for (freq, s1, s2) in [('N', t1, t2), + ('U', t1, t2), + ('L', t1a, + TimedeltaIndex(['-1 days +00:00:00', + '-2 days +23:58:58', + '-2 days +23:57:56'], + dtype='timedelta64[ns]', + freq=None) + ), + ('S', t1a, + TimedeltaIndex(['-1 days +00:00:00', + '-2 days +23:58:58', + '-2 days +23:57:56'], + dtype='timedelta64[ns]', + freq=None) + ), + ('12T', t1c, + TimedeltaIndex(['-1 days', + '-1 days', + '-1 days'], + dtype='timedelta64[ns]', + freq=None) + ), + ('H', t1c, + TimedeltaIndex(['-1 days', + '-1 days', + '-1 days'], + dtype='timedelta64[ns]', + freq=None) + ), + ('d', t1c, + pd.TimedeltaIndex([-1, -1, -1], unit='D') + )]: + + r1 = t1.round(freq) + tm.assert_index_equal(r1, s1) + r2 = t2.round(freq) + tm.assert_index_equal(r2, s2) + + # invalid + for freq in ['Y', 'M', 'foobar']: + pytest.raises(ValueError, lambda: t1.round(freq)) + def test_round(self): td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') elt = td[1] diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 533b06088f1bf..ad69cb788f224 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -401,3 +401,44 @@ def test_series_box_timedelta(self): s = Series(rng) assert isinstance(s[1], Timedelta) assert isinstance(s.iat[2], Timedelta) + + +class TestTimedeltaIndexVectorizedTimedelta(object): + + def test_nat_converters(self): + + def testit(unit, transform): + # array + result = pd.to_timedelta(np.arange(5), unit=unit) + expected = TimedeltaIndex([np.timedelta64(i, transform(unit)) + for i in np.arange(5).tolist()]) + tm.assert_index_equal(result, expected) + + # scalar + result = pd.to_timedelta(2, unit=unit) + expected = Timedelta(np.timedelta64(2, transform(unit)).astype( + 'timedelta64[ns]')) + assert result == expected + + # validate all units + # GH 6855 + for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']: + testit(unit, lambda x: x.upper()) + for unit in ['days', 'day', 'Day', 'Days']: + testit(unit, lambda x: 'D') + for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US', + 'NS']: + testit(unit, lambda x: x.lower()) + + # offsets + + # m + testit('T', lambda x: 'm') + + # ms + testit('L', lambda x: 'ms') + + def test_timedelta_hash_equality(self): + # GH 11129 + tds = timedelta_range('1 second', periods=20) + assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 0c8ea98a44d50..0e01d01badf4a 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -24,6 +24,13 @@ class TestToHTML(object): + def test_date_range_to_html_timestamp(self): + rng = pd.date_range('2000-01-01', periods=10) + df = DataFrame(np.random.randn(10, 4), index=rng) + + result = df.to_html() + assert '2000-01-01' in result + def test_to_html_with_col_space(self): def check_with_width(df, col_space): # check that col_space affects HTML generation diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 17c818779c76d..1110e9341e3a2 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -7,8 +7,7 @@ import pandas as pd import pandas.util.testing as tm from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct -from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series, - to_timedelta, compat) +from pandas import Timedelta, to_timedelta, compat from pandas._libs.tslib import iNaT, NaT @@ -419,38 +418,6 @@ def test_nat_converters(self): assert to_timedelta('nat', box=False).astype('int64') == iNaT assert to_timedelta('nan', box=False).astype('int64') == iNaT - def testit(unit, transform): - - # array - result = to_timedelta(np.arange(5), unit=unit) - expected = TimedeltaIndex([np.timedelta64(i, transform(unit)) - for i in np.arange(5).tolist()]) - tm.assert_index_equal(result, expected) - - # scalar - result = to_timedelta(2, unit=unit) - expected = Timedelta(np.timedelta64(2, transform(unit)).astype( - 'timedelta64[ns]')) - assert result == expected - - # validate all units - # GH 6855 - for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']: - testit(unit, lambda x: x.upper()) - for unit in ['days', 'day', 'Day', 'Days']: - testit(unit, lambda x: 'D') - for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US', - 'NS']: - testit(unit, lambda x: x.lower()) - - # offsets - - # m - testit('T', lambda x: 'm') - - # ms - testit('L', lambda x: 'ms') - def test_numeric_conversions(self): assert ct(0) == np.timedelta64(0, 'ns') assert ct(10) == np.timedelta64(10, 'ns') @@ -502,66 +469,6 @@ def test_round(self): for freq in ['Y', 'M', 'foobar']: pytest.raises(ValueError, lambda: t1.round(freq)) - t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us') - t2 = -1 * t1 - t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s') - t1c = pd.TimedeltaIndex([1, 1, 1], unit='D') - - # note that negative times round DOWN! so don't give whole numbers - for (freq, s1, s2) in [('N', t1, t2), - ('U', t1, t2), - ('L', t1a, - TimedeltaIndex(['-1 days +00:00:00', - '-2 days +23:58:58', - '-2 days +23:57:56'], - dtype='timedelta64[ns]', - freq=None) - ), - ('S', t1a, - TimedeltaIndex(['-1 days +00:00:00', - '-2 days +23:58:58', - '-2 days +23:57:56'], - dtype='timedelta64[ns]', - freq=None) - ), - ('12T', t1c, - TimedeltaIndex(['-1 days', - '-1 days', - '-1 days'], - dtype='timedelta64[ns]', - freq=None) - ), - ('H', t1c, - TimedeltaIndex(['-1 days', - '-1 days', - '-1 days'], - dtype='timedelta64[ns]', - freq=None) - ), - ('d', t1c, - pd.TimedeltaIndex([-1, -1, -1], unit='D') - )]: - - r1 = t1.round(freq) - tm.assert_index_equal(r1, s1) - r2 = t2.round(freq) - tm.assert_index_equal(r2, s2) - - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: t1.round(freq)) - - def test_contains(self): - # Checking for any NaT-like objects - # GH 13603 - td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) - for v in [pd.NaT, None, float('nan'), np.nan]: - assert not (v in td) - - td = to_timedelta([pd.NaT]) - for v in [pd.NaT, None, float('nan'), np.nan]: - assert (v in td) - def test_identity(self): td = Timedelta(10, unit='d') @@ -640,27 +547,6 @@ def conv(v): # invalid pytest.raises(ValueError, ct, '- 1days, 00') - def test_overflow(self): - # GH 9442 - s = Series(pd.date_range('20130101', periods=100000, freq='H')) - s[0] += pd.Timedelta('1s 1ms') - - # mean - result = (s - s.min()).mean() - expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s) - ).sum()) - - # the computation is converted to float so - # might be some loss of precision - assert np.allclose(result.value / 1000, expected.value / 1000) - - # sum - pytest.raises(ValueError, lambda: (s - s.min()).sum()) - s1 = s[0:10000] - pytest.raises(ValueError, lambda: (s1 - s1.min()).sum()) - s2 = s[0:1000] - result = (s2 - s2.min()).sum() - def test_pickle(self): v = Timedelta('1 days 10:11:12.0123456') @@ -676,9 +562,6 @@ def test_timedelta_hash_equality(self): d = {td: 2} assert d[v] == 2 - tds = timedelta_range('1 second', periods=20) - assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) - # python timedeltas drop ns resolution ns_td = Timedelta(1, 'ns') assert hash(ns_td) != hash(ns_td.to_pytimedelta()) @@ -711,28 +594,6 @@ def test_implementation_limits(self): with pytest.raises(OverflowError): Timedelta(max_td.value + 1, 'ns') - def test_timedelta_arithmetic(self): - data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]') - deltas = [timedelta(days=1), Timedelta(1, unit='D')] - for delta in deltas: - result_method = data.add(delta) - result_operator = data + delta - expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - - result_method = data.sub(delta) - result_operator = data - delta - expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - # GH 9396 - result_method = data.div(delta) - result_operator = data / delta - expected = pd.Series([np.nan, 32.], dtype='float64') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - def test_arithmetic_overflow(self): with pytest.raises(OverflowError): @@ -741,35 +602,6 @@ def test_arithmetic_overflow(self): with pytest.raises(OverflowError): pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999) - def test_apply_to_timedelta(self): - timedelta_NaT = pd.to_timedelta('NaT') - - list_of_valid_strings = ['00:00:01', '00:00:02'] - a = pd.to_timedelta(list_of_valid_strings) - b = Series(list_of_valid_strings).apply(pd.to_timedelta) - # Can't compare until apply on a Series gives the correct dtype - # assert_series_equal(a, b) - - list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT] - - # TODO: unused? - a = pd.to_timedelta(list_of_strings) # noqa - b = Series(list_of_strings).apply(pd.to_timedelta) # noqa - # Can't compare until apply on a Series gives the correct dtype - # assert_series_equal(a, b) - - def test_components(self): - rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') - rng.components - - # with nat - s = Series(rng) - s[1] = np.nan - - result = s.dt.components - assert not result.iloc[0].isna().all() - assert result.iloc[1].isna().all() - def test_isoformat(self): td = Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10, nanoseconds=12) diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index a79fb554f9454..ce18998eb05c9 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -16,17 +16,13 @@ import pandas.util.testing as tm from pandas.tseries import offsets, frequencies -from pandas._libs import period + from pandas._libs.tslibs.timezones import get_timezone from pandas._libs.tslibs import conversion -from pandas.compat import lrange, long, PY3 -from pandas.util.testing import assert_series_equal +from pandas.compat import long, PY3 from pandas.compat.numpy import np_datetime64_compat -from pandas import (Timestamp, date_range, Period, Timedelta, compat, - Series, NaT, DataFrame, DatetimeIndex) -from pandas.tseries.frequencies import (RESO_DAY, RESO_HR, RESO_MIN, RESO_US, - RESO_MS, RESO_SEC) +from pandas import Timestamp, Period, Timedelta, NaT class TestTimestampArithmetic(object): @@ -411,17 +407,6 @@ def test_tz(self): assert conv.hour == 19 def test_tz_localize_ambiguous(self): - - ts = Timestamp('2014-11-02 01:00') - ts_dst = ts.tz_localize('US/Eastern', ambiguous=True) - ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False) - - rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern') - assert rng[1] == ts_dst - assert rng[2] == ts_no_dst - pytest.raises(ValueError, ts.tz_localize, 'US/Eastern', - ambiguous='infer') - # GH 8025 with tm.assert_raises_regex(TypeError, 'Cannot localize tz-aware Timestamp, ' @@ -551,7 +536,7 @@ def test_asm8(self): def test_fields(self): def check(value, equal): # that we are int/long like - assert isinstance(value, (int, compat.long)) + assert isinstance(value, (int, long)) assert value == equal # GH 10050 @@ -673,11 +658,6 @@ def test_round(self): expected = Timestamp('20130104 12:30:00') assert result == expected - dti = date_range('20130101 09:10:11', periods=5) - result = dti.round('D') - expected = date_range('20130101', periods=5) - tm.assert_index_equal(result, expected) - # floor dt = Timestamp('20130101 09:10:11') result = dt.floor('D') @@ -700,19 +680,6 @@ def test_round(self): result = dt.round('s') assert result == dt - dti = date_range('20130101 09:10:11', - periods=5).tz_localize('UTC').tz_convert('US/Eastern') - result = dti.round('D') - expected = date_range('20130101', periods=5).tz_localize('US/Eastern') - tm.assert_index_equal(result, expected) - - result = dti.round('s') - tm.assert_index_equal(result, dti) - - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: dti.round(freq)) - # GH 14440 & 15578 result = Timestamp('2016-10-17 12:00:00.0015').round('ms') expected = Timestamp('2016-10-17 12:00:00.002000') @@ -835,7 +802,7 @@ def check(val, unit=None, h=1, s=1, us=0): check(days, unit='D', h=0) # using truediv, so these are like floats - if compat.PY3: + if PY3: check((val + 500000) / long(1000000000), unit='s', us=500) check((val + 500000000) / long(1000000000), unit='s', us=500000) check((val + 500000) / long(1000000), unit='ms', us=500) @@ -937,13 +904,6 @@ def test_compare_invalid(self): assert val != np.float64(1) assert val != np.int64(1) - # ops testing - df = DataFrame(np.random.randn(5, 2)) - a = df[0] - b = Series(np.random.randn(5)) - b.name = Timestamp('2000-01-01') - tm.assert_series_equal(a / b, 1 / (b / a)) - def test_cant_compare_tz_naive_w_aware(self): # see gh-1404 a = Timestamp('3/12/2012') @@ -1042,41 +1002,6 @@ def test_timestamp_compare_scalars(self): result = right_f(nat, rhs) assert result == expected - def test_timestamp_compare_series(self): - # make sure we can compare Timestamps on the right AND left hand side - # GH4982 - s = Series(date_range('20010101', periods=10), name='dates') - s_nat = s.copy(deep=True) - - s[0] = Timestamp('nat') - s[3] = Timestamp('nat') - - ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - - # no nats - expected = left_f(s, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s) - tm.assert_series_equal(result, expected) - - # nats - expected = left_f(s, Timestamp('nat')) - result = right_f(Timestamp('nat'), s) - tm.assert_series_equal(result, expected) - - # compare to timestamp with series containing nats - expected = left_f(s_nat, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s_nat) - tm.assert_series_equal(result, expected) - - # compare to nat with series containing nats - expected = left_f(s_nat, Timestamp('nat')) - result = right_f(Timestamp('nat'), s_nat) - tm.assert_series_equal(result, expected) - def test_is_leap_year(self): # GH 13727 for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: @@ -1211,15 +1136,6 @@ def test_timestamp_and_datetime(self): assert ((datetime(2013, 10, 12) - Timestamp(datetime(2013, 10, 13))).days == -1) - def test_timestamp_and_series(self): - timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', - tz='US/Eastern')) - first_timestamp = timestamp_series[0] - - delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) - assert_series_equal(timestamp_series - first_timestamp, delta_series) - assert_series_equal(first_timestamp - timestamp_series, -delta_series) - def test_addition_subtraction_types(self): # Assert on the types resulting from Timestamp +/- various date/time # objects @@ -1227,8 +1143,7 @@ def test_addition_subtraction_types(self): timedelta_instance = timedelta(seconds=1) # build a timestamp with a frequency, since then it supports # addition/subtraction of integers - timestamp_instance = date_range(datetime_instance, periods=1, - freq='D')[0] + timestamp_instance = Timestamp(datetime_instance, freq='D') assert type(timestamp_instance + 1) == Timestamp assert type(timestamp_instance - 1) == Timestamp @@ -1246,7 +1161,7 @@ def test_addition_subtraction_types(self): assert type(timestamp_instance - timedelta64_instance) == Timestamp def test_addition_subtraction_preserve_frequency(self): - timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0] + timestamp_instance = Timestamp('2014-03-05', freq='D') timedelta_instance = timedelta(days=1) original_freq = timestamp_instance.freq @@ -1261,22 +1176,6 @@ def test_addition_subtraction_preserve_frequency(self): assert (timestamp_instance - timedelta64_instance).freq == original_freq - def test_resolution(self): - - for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', - 'S', 'L', 'U'], - [RESO_DAY, RESO_DAY, - RESO_DAY, RESO_DAY, - RESO_HR, RESO_MIN, - RESO_SEC, RESO_MS, - RESO_US]): - for tz in [None, 'Asia/Tokyo', 'US/Eastern', - 'dateutil/US/Eastern']: - idx = date_range(start='2013-04-01', periods=30, freq=freq, - tz=tz) - result = period.resolution(idx.asi8, idx.tz) - assert result == expected - class TestTimestampToJulianDate(object): @@ -1330,30 +1229,10 @@ def test_timestamp_to_datetime_explicit_dateutil(self): assert stamp == dtval assert stamp.tzinfo == dtval.tzinfo - def test_timestamp_fields(self): - # extra fields from DatetimeIndex like quarter and week - idx = tm.makeDateIndex(100) - - fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', - 'days_in_month', 'is_month_start', 'is_month_end', - 'is_quarter_start', 'is_quarter_end', 'is_year_start', - 'is_year_end', 'weekday_name'] - for f in fields: - expected = getattr(idx, f)[-1] - result = getattr(Timestamp(idx[-1]), f) - assert result == expected - - assert idx.freq == Timestamp(idx[-1], idx.freq).freq - assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr - def test_timestamp_date_out_of_range(self): pytest.raises(ValueError, Timestamp, '1676-01-01') pytest.raises(ValueError, Timestamp, '2263-01-01') - # see gh-1475 - pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) - pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) - def test_timestamp_repr(self): # pre-1900 stamp = Timestamp('1850-01-01', tz='US/Eastern') @@ -1392,80 +1271,6 @@ def test_timestamp_compare_with_early_datetime(self): assert stamp < datetime(2700, 1, 1) assert stamp <= datetime(2700, 1, 1) - def test_timestamp_equality(self): - - # GH 11034 - s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT']) - result = s != s - assert_series_equal(result, Series([False, True])) - result = s != s[0] - assert_series_equal(result, Series([False, True])) - result = s != s[1] - assert_series_equal(result, Series([True, True])) - - result = s == s - assert_series_equal(result, Series([True, False])) - result = s == s[0] - assert_series_equal(result, Series([True, False])) - result = s == s[1] - assert_series_equal(result, Series([False, False])) - - def test_series_box_timestamp(self): - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng) - - assert isinstance(s[5], Timestamp) - - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng, index=rng) - assert isinstance(s[5], Timestamp) - - assert isinstance(s.iat[5], Timestamp) - - def test_frame_setitem_timestamp(self): - # 2155 - columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', - freq=offsets.BDay()) - index = lrange(10) - data = DataFrame(columns=columns, index=index) - t = datetime(2012, 11, 1) - ts = Timestamp(t) - data[ts] = np.nan # works - - def test_to_html_timestamp(self): - rng = date_range('2000-01-01', periods=10) - df = DataFrame(np.random.randn(10, 4), index=rng) - - result = df.to_html() - assert '2000-01-01' in result - - def test_series_map_box_timestamps(self): - # #2689, #2627 - s = Series(date_range('1/1/2000', periods=10)) - - def f(x): - return (x.hour, x.day, x.month) - - # it works! - s.map(f) - s.apply(f) - DataFrame(s).applymap(f) - - def test_dti_slicing(self): - dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') - dti2 = dti[[1, 3, 5]] - - v1 = dti2[0] - v2 = dti2[1] - v3 = dti2[2] - - assert v1 == Timestamp('2/28/2005') - assert v2 == Timestamp('4/30/2005') - assert v3 == Timestamp('6/30/2005') - - # don't carry freq through irregular slicing - assert dti2.freq is None - def test_woy_boundary(self): # make sure weeks at year boundaries are correct d = datetime(2013, 12, 31) @@ -1521,58 +1326,3 @@ def test_to_datetime_bijective(self): with tm.assert_produces_warning(exp_warning, check_stacklevel=False): assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 == Timestamp.min.value / 1000) - - -class TestTimestampEquivDateRange(object): - # Older tests in TestTimeSeries constructed their `stamp` objects - # using `date_range` instead of the `Timestamp` constructor. - # TestTimestampEquivDateRange checks that these are equivalent in the - # pertinent cases. - - def test_date_range_timestamp_equiv(self): - rng = date_range('20090415', '20090519', tz='US/Eastern') - stamp = rng[0] - - ts = Timestamp('20090415', tz='US/Eastern', freq='D') - assert ts == stamp - - def test_date_range_timestamp_equiv_dateutil(self): - rng = date_range('20090415', '20090519', tz='dateutil/US/Eastern') - stamp = rng[0] - - ts = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D') - assert ts == stamp - - def test_date_range_timestamp_equiv_explicit_pytz(self): - rng = date_range('20090415', '20090519', - tz=pytz.timezone('US/Eastern')) - stamp = rng[0] - - ts = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D') - assert ts == stamp - - def test_date_range_timestamp_equiv_explicit_dateutil(self): - tm._skip_if_windows_python_3() - from pandas._libs.tslibs.timezones import dateutil_gettz as gettz - - rng = date_range('20090415', '20090519', tz=gettz('US/Eastern')) - stamp = rng[0] - - ts = Timestamp('20090415', tz=gettz('US/Eastern'), freq='D') - assert ts == stamp - - def test_date_range_timestamp_equiv_from_datetime_instance(self): - datetime_instance = datetime(2014, 3, 4) - # build a timestamp with a frequency, since then it supports - # addition/subtraction of integers - timestamp_instance = date_range(datetime_instance, periods=1, - freq='D')[0] - - ts = Timestamp(datetime_instance, freq='D') - assert ts == timestamp_instance - - def test_date_range_timestamp_equiv_preserve_frequency(self): - timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0] - ts = Timestamp('2014-03-05', freq='D') - - assert timestamp_instance == ts diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index d0693984689a6..27378c7cae0bf 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -18,6 +18,16 @@ class TestSeriesApply(TestData): + def test_map_box_timestamps(self): + # GH#2689, GH#2627 + s = Series(pd.date_range('1/1/2000', periods=10)) + + def f(x): + return (x.hour, x.day, x.month) + + # it works! + s.map(f) + s.apply(f) def test_apply(self): with np.errstate(all='ignore'): diff --git a/pandas/tests/series/test_timedeltas.py b/pandas/tests/series/test_timedeltas.py new file mode 100644 index 0000000000000..52196439ef634 --- /dev/null +++ b/pandas/tests/series/test_timedeltas.py @@ -0,0 +1,84 @@ +""" test Series-boxed versions of the scalar Timedelta """ +from datetime import timedelta + +import pytest + +import numpy as np + +import pandas as pd +import pandas.util.testing as tm +from pandas import Timedelta, timedelta_range, Series, to_timedelta + + +class TestSeriesTimedeltas(object): + def test_apply_to_timedelta(self): + timedelta_NaT = to_timedelta('NaT') + + list_of_valid_strings = ['00:00:01', '00:00:02'] + a = to_timedelta(list_of_valid_strings) + b = Series(list_of_valid_strings).apply(to_timedelta) + # Can't compare until apply on a Series gives the correct dtype + # assert_series_equal(a, b) + + list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT] + + # TODO: unused? + a = to_timedelta(list_of_strings) # noqa + b = Series(list_of_strings).apply(to_timedelta) # noqa + # Can't compare until apply on a Series gives the correct dtype + # assert_series_equal(a, b) + + def test_components(self): + rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') + rng.components + + # with nat + s = Series(rng) + s[1] = np.nan + + result = s.dt.components + assert not result.iloc[0].isna().all() + assert result.iloc[1].isna().all() + + def test_timedelta_arithmetic(self): + data = Series(['nat', '32 days'], dtype='timedelta64[ns]') + deltas = [timedelta(days=1), Timedelta(1, unit='D')] + for delta in deltas: + result_method = data.add(delta) + result_operator = data + delta + expected = Series(['nat', '33 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + + result_method = data.sub(delta) + result_operator = data - delta + expected = Series(['nat', '31 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + # GH 9396 + result_method = data.div(delta) + result_operator = data / delta + expected = Series([np.nan, 32.], dtype='float64') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + + def test_overflow(self): + # GH 9442 + s = Series(pd.date_range('20130101', periods=100000, freq='H')) + s[0] += Timedelta('1s 1ms') + + # mean + result = (s - s.min()).mean() + expected = Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s) + ).sum()) + + # the computation is converted to float so + # might be some loss of precision + assert np.allclose(result.value / 1000, expected.value / 1000) + + # sum + pytest.raises(ValueError, lambda: (s - s.min()).sum()) + s1 = s[0:10000] + pytest.raises(ValueError, lambda: (s1 - s1.min()).sum()) + s2 = s[0:1000] + result = (s2 - s2.min()).sum() diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index c1ef70bba8634..30c0849694d2f 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -34,6 +34,12 @@ def assert_range_equal(left, right): class TestTimeSeries(TestData): + def test_compare_invalid(self): + # GH 8058 + a = Series(np.random.randn(5)) + b = Series(np.random.randn(5)) + b.name = Timestamp('2000-01-01') + tm.assert_series_equal(a / b, 1 / (b / a)) def test_shift(self): shifted = self.ts.shift(1) diff --git a/pandas/tests/series/test_timestamps.py b/pandas/tests/series/test_timestamps.py new file mode 100644 index 0000000000000..b456397991e5b --- /dev/null +++ b/pandas/tests/series/test_timestamps.py @@ -0,0 +1,84 @@ +""" test Series-boxed versions of the scalar Timestamp """ + +import operator +import numpy as np + +import pandas.util.testing as tm + +from pandas.util.testing import assert_series_equal +from pandas import Timestamp, date_range, Series + + +class TestSeriesTimestamps(object): + def test_series_box_timestamp(self): + rng = date_range('20090415', '20090519', freq='B') + s = Series(rng) + + assert isinstance(s[5], Timestamp) + + rng = date_range('20090415', '20090519', freq='B') + s = Series(rng, index=rng) + assert isinstance(s[5], Timestamp) + + assert isinstance(s.iat[5], Timestamp) + + def test_timestamp_equality(self): + # GH 11034 + s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT']) + result = s != s + assert_series_equal(result, Series([False, True])) + result = s != s[0] + assert_series_equal(result, Series([False, True])) + result = s != s[1] + assert_series_equal(result, Series([True, True])) + + result = s == s + assert_series_equal(result, Series([True, False])) + result = s == s[0] + assert_series_equal(result, Series([True, False])) + result = s == s[1] + assert_series_equal(result, Series([False, False])) + + def test_timestamp_and_series(self): + timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', + tz='US/Eastern')) + first_timestamp = timestamp_series[0] + + delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) + assert_series_equal(timestamp_series - first_timestamp, delta_series) + assert_series_equal(first_timestamp - timestamp_series, -delta_series) + + def test_timestamp_compare_series(self): + # make sure we can compare Timestamps on the right AND left hand side + # GH4982 + s = Series(date_range('20010101', periods=10), name='dates') + s_nat = s.copy(deep=True) + + s[0] = Timestamp('nat') + s[3] = Timestamp('nat') + + ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} + + for left, right in ops.items(): + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + expected = left_f(s, Timestamp('20010109')) + result = right_f(Timestamp('20010109'), s) + tm.assert_series_equal(result, expected) + + # nats + expected = left_f(s, Timestamp('nat')) + result = right_f(Timestamp('nat'), s) + tm.assert_series_equal(result, expected) + + # compare to timestamp with series containing nats + expected = left_f(s_nat, Timestamp('20010109')) + result = right_f(Timestamp('20010109'), s_nat) + tm.assert_series_equal(result, expected) + + # compare to nat with series containing nats + expected = left_f(s_nat, Timestamp('nat')) + result = right_f(Timestamp('nat'), s_nat) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 3dfad2d4af75e..36709f1eba15e 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # pylint: disable-msg=E1101,W0612 import pytest @@ -45,8 +46,7 @@ def dst(self, dt): fixed_off_no_name = FixedOffset(-330, None) -class TestTimeZoneSupportPytz(object): - +class TimeZoneSupportPytz(object): def tz(self, tz): # Construct a timezone object from a string. Overridden in subclass to # parameterize tests. @@ -65,6 +65,145 @@ def cmptz(self, tz1, tz2): # tests. return tz1.zone == tz2.zone + +class TestSeriesTimeZoneSupportPytz(TimeZoneSupportPytz): + def test_tz_localize_empty_series(self): + # GH#2248 + ts = Series() + + ts2 = ts.tz_localize('utc') + assert ts2.index.tz == pytz.utc + + ts2 = ts.tz_localize(self.tzstr('US/Eastern')) + assert self.cmptz(ts2.index.tz, self.tz('US/Eastern')) + + def test_ambiguous_bool(self): + # make sure that we are correctly accepting bool values as ambiguous + + # GH#14402 + ts = Timestamp('2015-11-01 01:00:03') + expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') + expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') + + ser = Series([ts]) + expected0 = Series([expected0]) + expected1 = Series([expected1]) + + with pytest.raises(pytz.AmbiguousTimeError): + ser.dt.tz_localize('US/Central') + + result = ser.dt.tz_localize('US/Central', ambiguous=True) + assert_series_equal(result, expected0) + + result = ser.dt.tz_localize('US/Central', ambiguous=[True]) + assert_series_equal(result, expected0) + + result = ser.dt.tz_localize('US/Central', ambiguous=False) + assert_series_equal(result, expected1) + + result = ser.dt.tz_localize('US/Central', ambiguous=[False]) + assert_series_equal(result, expected1) + + def test_localized_at_time_between_time(self): + from datetime import time + + rng = date_range('4/16/2012', '5/1/2012', freq='H') + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(self.tzstr('US/Eastern')) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr( + 'US/Eastern')) + assert_series_equal(result, expected) + assert self.cmptz(result.index.tz, self.tz('US/Eastern')) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, + t2).tz_localize(self.tzstr('US/Eastern')) + assert_series_equal(result, expected) + assert self.cmptz(result.index.tz, self.tz('US/Eastern')) + + def test_string_index_alias_tz_aware(self): + rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern')) + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts['1/3/2000'] + tm.assert_almost_equal(result, ts[2]) + + def test_tz_aware_asfreq(self): + dr = date_range('2011-12-01', '2012-07-20', freq='D', + tz=self.tzstr('US/Eastern')) + + s = Series(np.random.randn(len(dr)), index=dr) + + # it works! + s.asfreq('T') + + def test_dateutil_tzoffset_support(self): + values = [188.5, 328.25] + tzinfo = tzoffset(None, 7200) + index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo), + datetime(2012, 5, 11, 12, tzinfo=tzinfo)] + series = Series(data=values, index=index) + + assert series.index.tz == tzinfo + + # it works! #2443 + repr(series.index[0]) + + def test_getitem_pydatetime_tz(self): + index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00', + freq='H', tz=self.tzstr('Europe/Berlin')) + ts = Series(index=index, data=index.hour) + time_pandas = Timestamp('2012-12-24 17:00', + tz=self.tzstr('Europe/Berlin')) + time_datetime = self.localize( + self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0)) + assert ts[time_pandas] == ts[time_datetime] + + +class TestDataFrameTimeZoneSupportPytz(TimeZoneSupportPytz): + def test_frame_no_datetime64_dtype(self): + # after GH#7822 + # these retain the timezones on dict construction + + dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') + dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) + e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) + tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo) + assert e['B'].dtype == tz_expected + + # GH 2810 (with timezones) + datetimes_naive = [ts.to_pydatetime() for ts in dr] + datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] + df = DataFrame({'dr': dr, + 'dr_tz': dr_tz, + 'datetimes_naive': datetimes_naive, + 'datetimes_with_tz': datetimes_with_tz}) + result = df.get_dtype_counts().sort_index() + expected = Series({'datetime64[ns]': 2, + str(tz_expected): 2}).sort_index() + assert_series_equal(result, expected) + + def test_frame_from_records_utc(self): + rec = {'datum': 1.5, + 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)} + + # it works + DataFrame.from_records([rec], index='begin_time') + + def test_frame_reset_index(self): + dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern')) + df = DataFrame(np.random.randn(len(dr)), dr) + roundtripped = df.reset_index().set_index('index') + xp = df.index.tz + rs = roundtripped.index.tz + assert xp == rs + + +class TestTimeZoneSupportPytz(TimeZoneSupportPytz): def test_utc_to_local_no_modify(self): rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) @@ -253,17 +392,6 @@ def test_tz_localize_dti(self): pytest.raises(pytz.NonExistentTimeError, dti.tz_localize, self.tzstr('US/Eastern')) - def test_tz_localize_empty_series(self): - # #2248 - - ts = Series() - - ts2 = ts.tz_localize('utc') - assert ts2.index.tz == pytz.utc - - ts2 = ts.tz_localize(self.tzstr('US/Eastern')) - assert self.cmptz(ts2.index.tz, self.tz('US/Eastern')) - def test_astimezone(self): utc = Timestamp('3/11/2012 22:00', tz='UTC') expected = utc.tz_convert(self.tzstr('US/Eastern')) @@ -598,26 +726,6 @@ def f(): result = t.tz_localize('US/Central', ambiguous=False) assert result == expected1 - s = Series([t]) - expected0 = Series([expected0]) - expected1 = Series([expected1]) - - def f(): - s.dt.tz_localize('US/Central') - pytest.raises(pytz.AmbiguousTimeError, f) - - result = s.dt.tz_localize('US/Central', ambiguous=True) - assert_series_equal(result, expected0) - - result = s.dt.tz_localize('US/Central', ambiguous=[True]) - assert_series_equal(result, expected0) - - result = s.dt.tz_localize('US/Central', ambiguous=False) - assert_series_equal(result, expected1) - - result = s.dt.tz_localize('US/Central', ambiguous=[False]) - assert_series_equal(result, expected1) - def test_nonexistent_raise_coerce(self): # See issue 13057 from pytz.exceptions import NonExistentTimeError @@ -700,34 +808,6 @@ def test_index_astype_asobject_tzinfos(self): assert x == exval assert x.tzinfo == exval.tzinfo - def test_localized_at_time_between_time(self): - from datetime import time - - rng = date_range('4/16/2012', '5/1/2012', freq='H') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_local = ts.tz_localize(self.tzstr('US/Eastern')) - - result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr( - 'US/Eastern')) - assert_series_equal(result, expected) - assert self.cmptz(result.index.tz, self.tz('US/Eastern')) - - t1, t2 = time(10, 0), time(11, 0) - result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, - t2).tz_localize(self.tzstr('US/Eastern')) - assert_series_equal(result, expected) - assert self.cmptz(result.index.tz, self.tz('US/Eastern')) - - def test_string_index_alias_tz_aware(self): - rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern')) - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts['1/3/2000'] - tm.assert_almost_equal(result, ts[2]) - def test_fixed_offset(self): dates = [datetime(2000, 1, 1, tzinfo=fixed_off), datetime(2000, 1, 2, tzinfo=fixed_off), @@ -781,29 +861,6 @@ def test_to_datetime_tzlocal(self): result = to_datetime(arr, utc=True) assert result.tz is pytz.utc - def test_frame_no_datetime64_dtype(self): - - # after 7822 - # these retain the timezones on dict construction - - dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) - e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) - tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo) - assert e['B'].dtype == tz_expected - - # GH 2810 (with timezones) - datetimes_naive = [ts.to_pydatetime() for ts in dr] - datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] - df = DataFrame({'dr': dr, - 'dr_tz': dr_tz, - 'datetimes_naive': datetimes_naive, - 'datetimes_with_tz': datetimes_with_tz}) - result = df.get_dtype_counts().sort_index() - expected = Series({'datetime64[ns]': 2, - str(tz_expected): 2}).sort_index() - assert_series_equal(result, expected) - def test_hongkong_tz_convert(self): # #1673 dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong') @@ -826,15 +883,6 @@ def test_shift_localized(self): result = dr_tz.shift(1, '10T') assert result.tz == dr_tz.tz - def test_tz_aware_asfreq(self): - dr = date_range('2011-12-01', '2012-07-20', freq='D', - tz=self.tzstr('US/Eastern')) - - s = Series(np.random.randn(len(dr)), index=dr) - - # it works! - s.asfreq('T') - def test_static_tzinfo(self): # it works! index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST')) @@ -867,43 +915,6 @@ def test_convert_datetime_list(self): assert dr.tz == dr2.tz assert dr2.name == 'foo' - def test_frame_from_records_utc(self): - rec = {'datum': 1.5, - 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)} - - # it works - DataFrame.from_records([rec], index='begin_time') - - def test_frame_reset_index(self): - dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern')) - df = DataFrame(np.random.randn(len(dr)), dr) - roundtripped = df.reset_index().set_index('index') - xp = df.index.tz - rs = roundtripped.index.tz - assert xp == rs - - def test_dateutil_tzoffset_support(self): - values = [188.5, 328.25] - tzinfo = tzoffset(None, 7200) - index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo), - datetime(2012, 5, 11, 12, tzinfo=tzinfo)] - series = Series(data=values, index=index) - - assert series.index.tz == tzinfo - - # it works! #2443 - repr(series.index[0]) - - def test_getitem_pydatetime_tz(self): - index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00', - freq='H', tz=self.tzstr('Europe/Berlin')) - ts = Series(index=index, data=index.hour) - time_pandas = Timestamp('2012-12-24 17:00', - tz=self.tzstr('Europe/Berlin')) - time_datetime = self.localize( - self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0)) - assert ts[time_pandas] == ts[time_datetime] - def test_index_drop_dont_lose_tz(self): # #2621 ind = date_range("2012-12-01", periods=10, tz="utc") @@ -1330,60 +1341,11 @@ def test_tz_localize_roundtrip(self): tm.assert_index_equal(reset, idx) assert reset.tzinfo is None - def test_series_frame_tz_localize(self): - - rng = date_range('1/1/2011', periods=100, freq='H') - ts = Series(1, index=rng) - - result = ts.tz_localize('utc') - assert result.index.tz.zone == 'UTC' - - df = DataFrame({'a': 1}, index=rng) - result = df.tz_localize('utc') - expected = DataFrame({'a': 1}, rng.tz_localize('UTC')) - assert result.index.tz.zone == 'UTC' - assert_frame_equal(result, expected) - - df = df.T - result = df.tz_localize('utc', axis=1) - assert result.columns.tz.zone == 'UTC' - assert_frame_equal(result, expected.T) - - # Can't localize if already tz-aware - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - ts = Series(1, index=rng) - tm.assert_raises_regex(TypeError, 'Already tz-aware', - ts.tz_localize, 'US/Eastern') - - def test_series_frame_tz_convert(self): - rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') - ts = Series(1, index=rng) - - result = ts.tz_convert('Europe/Berlin') - assert result.index.tz.zone == 'Europe/Berlin' - - df = DataFrame({'a': 1}, index=rng) - result = df.tz_convert('Europe/Berlin') - expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin')) - assert result.index.tz.zone == 'Europe/Berlin' - assert_frame_equal(result, expected) - - df = df.T - result = df.tz_convert('Europe/Berlin', axis=1) - assert result.columns.tz.zone == 'Europe/Berlin' - assert_frame_equal(result, expected.T) - - # can't convert tz-naive - rng = date_range('1/1/2011', periods=200, freq='D') - ts = Series(1, index=rng) - tm.assert_raises_regex(TypeError, "Cannot convert tz-naive", - ts.tz_convert, 'US/Eastern') - - def test_tz_convert_roundtrip(self): - for tz in self.timezones: - idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M', - tz='UTC') - exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') + def test_tz_convert_roundtrip(self): + for tz in self.timezones: + idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M', + tz='UTC') + exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D', tz='UTC') @@ -1421,189 +1383,6 @@ def test_join_utc_convert(self): assert isinstance(result, DatetimeIndex) assert result.tz.zone == 'UTC' - def test_join_aware(self): - rng = date_range('1/1/2011', periods=10, freq='H') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_utc = ts.tz_localize('utc') - - pytest.raises(Exception, ts.__add__, ts_utc) - pytest.raises(Exception, ts_utc.__add__, ts) - - test1 = DataFrame(np.zeros((6, 3)), - index=date_range("2012-11-15 00:00:00", periods=6, - freq="100L", tz="US/Central")) - test2 = DataFrame(np.zeros((3, 3)), - index=date_range("2012-11-15 00:00:00", periods=3, - freq="250L", tz="US/Central"), - columns=lrange(3, 6)) - - result = test1.join(test2, how='outer') - ex_index = test1.index.union(test2.index) - - tm.assert_index_equal(result.index, ex_index) - assert result.index.tz.zone == 'US/Central' - - # non-overlapping - rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", - tz="US/Central") - - rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", - tz="US/Eastern") - - result = rng.union(rng2) - assert result.tz.zone == 'UTC' - - def test_align_aware(self): - idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') - idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern') - df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) - df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) - new1, new2 = df1.align(df2) - assert df1.index.tz == new1.index.tz - assert df2.index.tz == new2.index.tz - - # # different timezones convert to UTC - - # frame - df1_central = df1.tz_convert('US/Central') - new1, new2 = df1.align(df1_central) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - # series - new1, new2 = df1[0].align(df1_central[0]) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - # combination - new1, new2 = df1.align(df1_central[0], axis=0) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - df1[0].align(df1_central, axis=0) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - def test_append_aware(self): - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', - tz='US/Eastern') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Eastern') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], - tz='US/Eastern') - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], - tz='UTC') - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH 7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', - tz='US/Eastern') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Central') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'), - Timestamp('1/1/2011 02:00', tz='US/Central')]) - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - - def test_append_dst(self): - rng1 = date_range('1/1/2016 01:00', periods=3, freq='H', - tz='US/Eastern') - rng2 = date_range('8/1/2016 01:00', periods=3, freq='H', - tz='US/Eastern') - ts1 = Series([1, 2, 3], index=rng1) - ts2 = Series([10, 11, 12], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00', - '2016-01-01 03:00', '2016-08-01 01:00', - '2016-08-01 02:00', '2016-08-01 03:00'], - tz='US/Eastern') - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - def test_append_aware_naive(self): - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Eastern') - ts1 = Series(np.random.randn(len(rng1)), index=rng1) - ts2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ts1.append(ts2) - - assert ts_result.index.equals(ts1.index.asobject.append( - ts2.index.asobject)) - - # mixed - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') - rng2 = lrange(100) - ts1 = Series(np.random.randn(len(rng1)), index=rng1) - ts2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ts1.append(ts2) - assert ts_result.index.equals(ts1.index.asobject.append( - ts2.index)) - - def test_equal_join_ensure_utc(self): - rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_moscow = ts.tz_convert('Europe/Moscow') - - result = ts + ts_moscow - assert result.index.tz is pytz.utc - - result = ts_moscow + ts - assert result.index.tz is pytz.utc - - df = DataFrame({'a': ts}) - df_moscow = df.tz_convert('Europe/Moscow') - result = df + df_moscow - assert result.index.tz is pytz.utc - - result = df_moscow + df - assert result.index.tz is pytz.utc - - def test_arith_utc_convert(self): - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - - perm = np.random.permutation(100)[:90] - ts1 = Series(np.random.randn(90), - index=rng.take(perm).tz_convert('US/Eastern')) - - perm = np.random.permutation(100)[:90] - ts2 = Series(np.random.randn(90), - index=rng.take(perm).tz_convert('Europe/Berlin')) - - result = ts1 + ts2 - - uts1 = ts1.tz_convert('utc') - uts2 = ts2.tz_convert('utc') - expected = uts1 + uts2 - - assert result.index.tz == pytz.UTC - assert_series_equal(result, expected) - def test_intersection(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') @@ -1734,6 +1513,245 @@ def test_nat(self): tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) +class TestSeriesTimeZones(object): + timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific'] + + def test_arith_utc_convert(self): + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + + perm = np.random.permutation(100)[:90] + ts1 = Series(np.random.randn(90), + index=rng.take(perm).tz_convert('US/Eastern')) + + perm = np.random.permutation(100)[:90] + ts2 = Series(np.random.randn(90), + index=rng.take(perm).tz_convert('Europe/Berlin')) + + result = ts1 + ts2 + + uts1 = ts1.tz_convert('utc') + uts2 = ts2.tz_convert('utc') + expected = uts1 + uts2 + + assert result.index.tz == pytz.UTC + assert_series_equal(result, expected) + + def test_append_aware_naive(self): + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Eastern') + ts1 = Series(np.random.randn(len(rng1)), index=rng1) + ts2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ts1.append(ts2) + + assert ts_result.index.equals(ts1.index.asobject.append( + ts2.index.asobject)) + + # mixed + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') + rng2 = lrange(100) + ts1 = Series(np.random.randn(len(rng1)), index=rng1) + ts2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ts1.append(ts2) + assert ts_result.index.equals(ts1.index.asobject.append( + ts2.index)) + + def test_append_dst(self): + rng1 = date_range('1/1/2016 01:00', periods=3, freq='H', + tz='US/Eastern') + rng2 = date_range('8/1/2016 01:00', periods=3, freq='H', + tz='US/Eastern') + ts1 = Series([1, 2, 3], index=rng1) + ts2 = Series([10, 11, 12], index=rng2) + ts_result = ts1.append(ts2) + + exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00', + '2016-01-01 03:00', '2016-08-01 01:00', + '2016-08-01 02:00', '2016-08-01 03:00'], + tz='US/Eastern') + exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) + assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + def test_append_aware(self): + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', + tz='US/Eastern') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Eastern') + ts1 = Series([1], index=rng1) + ts2 = Series([2], index=rng2) + ts_result = ts1.append(ts2) + + exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], + tz='US/Eastern') + exp = Series([1, 2], index=exp_index) + assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC') + ts1 = Series([1], index=rng1) + ts2 = Series([2], index=rng2) + ts_result = ts1.append(ts2) + + exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], + tz='UTC') + exp = Series([1, 2], index=exp_index) + assert_series_equal(ts_result, exp) + utc = rng1.tz + assert utc == ts_result.index.tz + + # GH 7795 + # different tz coerces to object dtype, not UTC + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', + tz='US/Eastern') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Central') + ts1 = Series([1], index=rng1) + ts2 = Series([2], index=rng2) + ts_result = ts1.append(ts2) + exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'), + Timestamp('1/1/2011 02:00', tz='US/Central')]) + exp = Series([1, 2], index=exp_index) + assert_series_equal(ts_result, exp) + + +class TestDataFrameTimeZones(object): + timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific'] + + def test_series_frame_tz_localize(self): + rng = date_range('1/1/2011', periods=100, freq='H') + ts = Series(1, index=rng) + + result = ts.tz_localize('utc') + assert result.index.tz.zone == 'UTC' + + df = DataFrame({'a': 1}, index=rng) + result = df.tz_localize('utc') + expected = DataFrame({'a': 1}, rng.tz_localize('UTC')) + assert result.index.tz.zone == 'UTC' + assert_frame_equal(result, expected) + + df = df.T + result = df.tz_localize('utc', axis=1) + assert result.columns.tz.zone == 'UTC' + assert_frame_equal(result, expected.T) + + # Can't localize if already tz-aware + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + ts = Series(1, index=rng) + tm.assert_raises_regex(TypeError, 'Already tz-aware', + ts.tz_localize, 'US/Eastern') + + def test_series_frame_tz_convert(self): + rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') + ts = Series(1, index=rng) + + result = ts.tz_convert('Europe/Berlin') + assert result.index.tz.zone == 'Europe/Berlin' + + df = DataFrame({'a': 1}, index=rng) + result = df.tz_convert('Europe/Berlin') + expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin')) + assert result.index.tz.zone == 'Europe/Berlin' + assert_frame_equal(result, expected) + + df = df.T + result = df.tz_convert('Europe/Berlin', axis=1) + assert result.columns.tz.zone == 'Europe/Berlin' + assert_frame_equal(result, expected.T) + + # can't convert tz-naive + rng = date_range('1/1/2011', periods=200, freq='D') + ts = Series(1, index=rng) + tm.assert_raises_regex(TypeError, "Cannot convert tz-naive", + ts.tz_convert, 'US/Eastern') + + def test_join_aware(self): + rng = date_range('1/1/2011', periods=10, freq='H') + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_utc = ts.tz_localize('utc') + + pytest.raises(Exception, ts.__add__, ts_utc) + pytest.raises(Exception, ts_utc.__add__, ts) + + test1 = DataFrame(np.zeros((6, 3)), + index=date_range("2012-11-15 00:00:00", periods=6, + freq="100L", tz="US/Central")) + test2 = DataFrame(np.zeros((3, 3)), + index=date_range("2012-11-15 00:00:00", periods=3, + freq="250L", tz="US/Central"), + columns=lrange(3, 6)) + + result = test1.join(test2, how='outer') + ex_index = test1.index.union(test2.index) + + tm.assert_index_equal(result.index, ex_index) + assert result.index.tz.zone == 'US/Central' + + # non-overlapping + rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", + tz="US/Central") + + rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", + tz="US/Eastern") + + result = rng.union(rng2) + assert result.tz.zone == 'UTC' + + def test_align_aware(self): + idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') + idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern') + df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) + df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) + new1, new2 = df1.align(df2) + assert df1.index.tz == new1.index.tz + assert df2.index.tz == new2.index.tz + + # # different timezones convert to UTC + + # frame + df1_central = df1.tz_convert('US/Central') + new1, new2 = df1.align(df1_central) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + # series + new1, new2 = df1[0].align(df1_central[0]) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + # combination + new1, new2 = df1.align(df1_central[0], axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + df1[0].align(df1_central, axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + def test_equal_join_ensure_utc(self): + rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_moscow = ts.tz_convert('Europe/Moscow') + + result = ts + ts_moscow + assert result.index.tz is pytz.utc + + result = ts_moscow + ts + assert result.index.tz is pytz.utc + + df = DataFrame({'a': ts}) + df_moscow = df.tz_convert('Europe/Moscow') + result = df + df_moscow + assert result.index.tz is pytz.utc + + result = df_moscow + df + assert result.index.tz is pytz.utc + + class TestTslib(object): def test_tslib_tz_convert(self):
The goal I have in mind is getting to the point where we can test (and measure coverage) tslibs in isolation. That means isolating DataFrame and Series tests from everything else. This PR doesn't change any tests and doesn't move anything across modules, just puts DataFrame and Series tests in their own classes. Exactly what modules those belong in I haven't thought out. If I did this right, it shouldn't have any overlap with other outstanding PRs.
https://api.github.com/repos/pandas-dev/pandas/pulls/18142
2017-11-06T16:32:58Z
2017-11-20T11:18:21Z
null
2017-12-08T19:38:37Z
TST: Fixed failing unittests on python3 (GH18037).
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 5c64b0a55c09b..459cf40e9e7d5 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -89,6 +89,7 @@ Bug Fixes - Bug in ``pd.read_msgpack()`` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in ``DataFrame.groupby`` where key as tuple in a ``MultiIndex`` were interpreted as a list of keys (:issue:`17979`) +- .timestamp() unittests fail on python3 (:issue:`18037`) Conversion ^^^^^^^^^^ diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 4cd9a2fadeb32..bb046348be53c 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -1106,8 +1106,8 @@ def test_timestamp(self): if PY3: # should agree with datetime.timestamp method - dt = ts.to_pydatetime() - assert dt.timestamp() == ts.timestamp() + dt = uts.to_pydatetime() + assert dt.timestamp() == uts.timestamp() class TestTimestampNsOperations(object): diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index ddcf1bb7d8b7b..e755d9b654eb9 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -1283,8 +1283,10 @@ def test_replace_tzinfo(self): assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() - result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) - result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) + result_dt = (dt.replace(tzinfo=tzinfo) + .replace(tzinfo=pytz.timezone('UTC'))) + result_pd = (Timestamp(dt).replace(tzinfo=tzinfo) + .replace(tzinfo=pytz.timezone('UTC'))) if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 assert result_dt.timestamp() == result_pd.timestamp()
- [x] closes #18037 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18140
2017-11-06T15:24:19Z
2017-11-06T15:24:28Z
null
2017-11-06T15:24:28Z
TST: Check lossiness of floats with parse_dates
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 90103e7bf26b0..4c0f67fa6876a 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -656,3 +656,21 @@ def test_parse_date_column_with_empty_string(self): [621, ' ']] expected = DataFrame(expected_data, columns=['case', 'opdate']) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("data,expected", [ + ("a\n135217135789158401\n1352171357E+5", + DataFrame({"a": [135217135789158401, + 135217135700000]}, dtype="float64")), + ("a\n99999999999\n123456789012345\n1234E+0", + DataFrame({"a": [99999999999, + 123456789012345, + 1234]}, dtype="float64")) + ]) + @pytest.mark.parametrize("parse_dates", [True, False]) + def test_parse_date_float(self, data, expected, parse_dates): + # see gh-2697 + # + # Date parsing should fail, so we leave the data untouched + # (i.e. float precision should remain unchanged). + result = self.read_csv(StringIO(data), parse_dates=parse_dates) + tm.assert_frame_equal(result, expected)
The examples don't fail anymore with `read_csv`, so let's add them as tests. Closes #2697.
https://api.github.com/repos/pandas-dev/pandas/pulls/18136
2017-11-06T08:46:36Z
2017-11-06T18:18:29Z
2017-11-06T18:18:29Z
2017-12-11T20:24:39Z
ENH: add Bland-Altman plot
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 54f87febdc214..d58b8213b5939 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -571,3 +571,112 @@ def r(h): ax.legend() ax.grid() return ax + + +def bland_altman_plot(m1, m2, + sd_limit=1.96, + ax=None, + scatter_kwds=None, + mean_line_kwds=None, + limit_lines_kwds=None): + """ + Bland-Altman Plot. + + A Bland-Altman plot is a graphical method to analyze the differences + between two methods of measurement. The mean of the measures is plotted + against their difference. + + Parameters + ---------- + m1, m2: pandas Series or array-like + + sd_limit : float, default 1.96 + The limit of agreements expressed in terms of the standard deviation of + the differences. If `md` is the mean of the differences, and `sd` is + the standard deviation of those differences, then the limits of + agreement that will be plotted will be + md - (sd_limit * sd), md + (sd_limit * sd) + The default of 1.96 will produce 95% confidence intervals for the means + of the differences. + + ax: matplotlib.axis, optional + matplotlib axis object to plot on. + + scatter_kwargs: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.scatter plotting method + + mean_line_kwds: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.axhline plotting method + + limit_lines_kwds: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.axhline plotting method + + Returns + ------- + ax: matplotlib Axis object + """ + + import matplotlib.pyplot as plt + from pandas import Series + + if len(m1) != len(m2): + raise ValueError('m1 does not have the same length as m2.') + if sd_limit < 0: + raise ValueError('sd_limit ({}) is less than 0.'.format(sd_limit)) + m1 = Series(m1) + m2 = Series(m2) + means = np.mean([m1, m2], axis=0) + diffs = m1 - m2 + mean_diff = np.mean(diffs) + std_diff = np.std(diffs, axis=0) + + if ax is None: + ax = plt.gca() + + scatter_kwds = scatter_kwds or {} + if 's' not in scatter_kwds: + scatter_kwds['s'] = 20 + mean_line_kwds = mean_line_kwds or {} + limit_lines_kwds = limit_lines_kwds or {} + for kwds in [mean_line_kwds, limit_lines_kwds]: + if 'color' not in kwds: + kwds['color'] = 'gray' + if 'linewidth' not in kwds: + kwds['linewidth'] = 1 + if 'linestyle' not in mean_line_kwds: + kwds['linestyle'] = '--' + if 'linestyle' not in limit_lines_kwds: + kwds['linestyle'] = ':' + + ax.scatter(means, diffs, **scatter_kwds) + half_ylim = np.max([np.min(diffs), np.max(diffs)]) * 1.5 + ax.set_ylim(-half_ylim, half_ylim) + ax.axhline(mean_diff, **mean_line_kwds) # draw mean line + if sd_limit > 0: + limit_of_agreement = sd_limit * std_diff + lower = mean_diff - limit_of_agreement + upper = mean_diff + limit_of_agreement + for j, lim in enumerate([lower, upper]): + ax.axhline(lim, **limit_lines_kwds) + ax.annotate('{} (-{}SD)'.format(np.round(lower, 2), sd_limit), + xy=(0.99, 0.05), + horizontalalignment='right', + verticalalignment='bottom', + fontsize=14, + xycoords='axes fraction') + ax.annotate('{} (+{}SD)'.format(np.round(upper, 2), sd_limit), + xy=(0.99, 0.925), + horizontalalignment='right', + fontsize=14, + xycoords='axes fraction') + elif sd_limit == 0: + pass + + ax.set_ylabel('Difference', fontsize=15) + ax.set_xlabel('Means', fontsize=15) + ax.tick_params(labelsize=13) + plt.tight_layout() + return ax
A new (and small function) `bland_altman_plot()` to produce a [Bland-Altman plot](https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot).
https://api.github.com/repos/pandas-dev/pandas/pulls/18135
2017-11-06T07:46:51Z
2017-11-06T16:05:55Z
null
2017-11-06T16:05:55Z
ENH: add Bland-Altman plot
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 54f87febdc214..f65cdca0fff32 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -571,3 +571,113 @@ def r(h): ax.legend() ax.grid() return ax + +def bland_altman_plot(m1, m2, + sd_limit=1.96, + ax=None, + scatter_kwds=None, + mean_line_kwds=None, + limit_lines_kwds=None): + """ + Bland-Altman Plot. + + A Bland-Altman plot is a graphical method to analyze the differences between + two methods of measurement. The mean of the measures is plotted against their + difference. + + Parameters + ---------- + m1, m2: pandas Series or array-like + + sd_limit : float, default 1.96 + The limit of agreements expressed in terms of the standard deviation of + the differences. If `md` is the mean of the differences, and `sd` is the + standard deviation of those differences, then the limits of agreement + that will be plotted will be + md ± sd_limit * sd + The default of 1.96 will produce 95% confidence intervals for the means + of the differences. + + ax: matplotlib.axis, optional + matplotlib axis object to plot on. + + scatter_kwargs: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.scatter plotting method + + mean_line_kwds: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.axhline plotting method + + limit_lines_kwds: keywords + Options to to style the scatter plot. Accepts any keywords for the + matplotlib Axes.axhline plotting method + + Returns + ------- + ax: matplotlib Axis object + """ + + import numpy as np + import matplotlib as mpl + import matplotlib.pyplot as plt + + if len(m1) != len(m2): + raise ValueError('m1 does not have the same length as m2.') + if sd_limit < 0: + raise ValueError('sd_limit ({}) cannot be less than 0.'.format(sd_limit)) + m1 = pd.Series(m1) + m2 = pd.Series(m2) + n_obs = len(m1) + means = np.mean([m1, m2], axis=0) + diffs = m1 - m2 + mean_diff = np.mean(diffs) + std_diff = np.std(diffs, axis=0) + + if ax == None: + ax = plt.gca() + + scatter_kwds = scatter_kwds or {} + if 's' not in scatter_kwds: + scatter_kwds['s'] = 20 + mean_line_kwds = mean_line_kwds or {} + limit_lines_kwds = limit_lines_kwds or {} + for kwds in [mean_line_kwds, limit_lines_kwds]: + if 'color' not in kwds: + kwds['color'] = 'gray' + if 'linewidth' not in kwds: + kwds['linewidth'] = 1 + if 'linestyle' not in mean_line_kwds: + kwds['linestyle'] = '--' + if 'linestyle' not in limit_lines_kwds: + kwds['linestyle'] = ':' + + ax.scatter(means, diffs, **scatter_kwds) + half_ylim = np.max( [np.min(diffs), np.max(diffs)] ) * 1.5 + ax.set_ylim (-half_ylim, half_ylim) + ax.axhline(mean_diff, **mean_line_kwds) # mean line + if sd_limit > 0: + limit_of_agreement = sd_limit * std_diff + lower = mean_diff - limit_of_agreement + upper = mean_diff + limit_of_agreement + for j, lim in enumerate( [lower, upper] ): + ax.axhline(lim, **limit_lines_kwds) + ax.annotate('{} (-{}SD)'.format(np.round(lower,2), sd_limit), + xy=(0.99, 0.05), + horizontalalignment='right', + verticalalignment='bottom', + fontsize = 14, + xycoords='axes fraction') + ax.annotate('{} (+{}SD)'.format(np.round(upper,2), sd_limit), + xy=(0.99, 0.925), + horizontalalignment='right', + fontsize = 14, + xycoords='axes fraction') + elif sd_limit == 0: + pass + + ax.set_ylabel('Difference', fontsize = 15) + ax.set_xlabel('Means', fontsize = 15) + ax.tick_params(labelsize=13) + plt.tight_layout() + return ax
I have written a small function to produce a Bland-Altman plot. More info re: Bland-Altman plot. https://en.wikipedia.org/wiki/Bland%E2%80%93Altman_plot
https://api.github.com/repos/pandas-dev/pandas/pulls/18134
2017-11-06T07:32:41Z
2017-11-06T07:33:41Z
null
2017-11-06T07:33:41Z
parametrize the first few arithmetic tests
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index e078413c9398c..2f788a116c0e5 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -14,77 +14,118 @@ date_range) +@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', + 'US/Eastern', 'dateutil/Asia/Singapore', + 'dateutil/US/Pacific']) +def tz(request): + return request.param + + +@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2), + np.timedelta64(2, 'h'), Timedelta(hours=2)], + ids=str) +def delta(request): + # Several ways of representing two hours + return request.param + + +@pytest.fixture( + params=[ + datetime(2011, 1, 1), + DatetimeIndex(['2011-01-01', '2011-01-02']), + DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'), + np.datetime64('2011-01-01'), + Timestamp('2011-01-01')], + ids=lambda x: type(x).__name__) +def addend(request): + return request.param + + class TestDatetimeIndexArithmetic(object): - tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', - 'dateutil/US/Pacific'] - - def test_add_iadd(self): - for tz in self.tz: - - # offset - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), Timedelta(hours=2)] - - for delta in offsets: - rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) - result = rng + delta - expected = pd.date_range('2000-01-01 02:00', - '2000-02-01 02:00', tz=tz) - tm.assert_index_equal(result, expected) - rng += delta - tm.assert_index_equal(rng, expected) - - # int - rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, - tz=tz) - result = rng + 1 - expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, - tz=tz) - tm.assert_index_equal(result, expected) - rng += 1 - tm.assert_index_equal(rng, expected) + def test_dti_add_timestamp_raises(self): idx = DatetimeIndex(['2011-01-01', '2011-01-02']) msg = "cannot add DatetimeIndex and Timestamp" with tm.assert_raises_regex(TypeError, msg): idx + Timestamp('2011-01-01') + def test_dti_radd_timestamp_raises(self): + idx = DatetimeIndex(['2011-01-01', '2011-01-02']) + msg = "cannot add DatetimeIndex and Timestamp" with tm.assert_raises_regex(TypeError, msg): Timestamp('2011-01-01') + idx - def test_sub_isub(self): - for tz in self.tz: - - # offset - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), Timedelta(hours=2)] - - for delta in offsets: - rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) - expected = pd.date_range('1999-12-31 22:00', - '2000-01-31 22:00', tz=tz) - - result = rng - delta - tm.assert_index_equal(result, expected) - rng -= delta - tm.assert_index_equal(rng, expected) - - # int - rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, - tz=tz) - result = rng - 1 - expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, - tz=tz) - tm.assert_index_equal(result, expected) - rng -= 1 - tm.assert_index_equal(rng, expected) - - @pytest.mark.parametrize('addend', [ - datetime(2011, 1, 1), - DatetimeIndex(['2011-01-01', '2011-01-02']), - DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'), - np.datetime64('2011-01-01'), - Timestamp('2011-01-01')]) + # ------------------------------------------------------------- + # Binary operations DatetimeIndex and int + + def test_dti_add_int(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + result = rng + 1 + expected = pd.date_range('2000-01-01 10:00', freq='H', + periods=10, tz=tz) + tm.assert_index_equal(result, expected) + + def test_dti_iadd_int(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + expected = pd.date_range('2000-01-01 10:00', freq='H', + periods=10, tz=tz) + rng += 1 + tm.assert_index_equal(rng, expected) + + def test_dti_sub_int(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + result = rng - 1 + expected = pd.date_range('2000-01-01 08:00', freq='H', + periods=10, tz=tz) + tm.assert_index_equal(result, expected) + + def test_dti_isub_int(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + expected = pd.date_range('2000-01-01 08:00', freq='H', + periods=10, tz=tz) + rng -= 1 + tm.assert_index_equal(rng, expected) + + # ------------------------------------------------------------- + # Binary operations DatetimeIndex and timedelta-like + + def test_dti_add_timedeltalike(self, tz, delta): + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + result = rng + delta + expected = pd.date_range('2000-01-01 02:00', + '2000-02-01 02:00', tz=tz) + tm.assert_index_equal(result, expected) + + def test_dti_iadd_timedeltalike(self, tz, delta): + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + expected = pd.date_range('2000-01-01 02:00', + '2000-02-01 02:00', tz=tz) + rng += delta + tm.assert_index_equal(rng, expected) + + def test_dti_sub_timedeltalike(self, tz, delta): + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + expected = pd.date_range('1999-12-31 22:00', + '2000-01-31 22:00', tz=tz) + result = rng - delta + tm.assert_index_equal(result, expected) + + def test_dti_isub_timedeltalike(self, tz, delta): + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + expected = pd.date_range('1999-12-31 22:00', + '2000-01-31 22:00', tz=tz) + rng -= delta + tm.assert_index_equal(rng, expected) + + # ------------------------------------------------------------- + # Binary Operations DatetimeIndex and datetime-like + # TODO: A couple other tests belong in this section. Move them in + # A PR where there isn't already a giant diff. + def test_add_datetimelike_and_dti(self, addend): # GH#9631 dti = DatetimeIndex(['2011-01-01', '2011-01-02']) @@ -95,12 +136,6 @@ def test_add_datetimelike_and_dti(self, addend): with tm.assert_raises_regex(TypeError, msg): addend + dti - @pytest.mark.parametrize('addend', [ - datetime(2011, 1, 1), - DatetimeIndex(['2011-01-01', '2011-01-02']), - DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'), - np.datetime64('2011-01-01'), - Timestamp('2011-01-01')]) def test_add_datetimelike_and_dti_tz(self, addend): # GH#9631 dti_tz = DatetimeIndex(['2011-01-01', @@ -112,6 +147,8 @@ def test_add_datetimelike_and_dti_tz(self, addend): with tm.assert_raises_regex(TypeError, msg): addend + dti_tz + # ------------------------------------------------------------- + def test_sub_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now changed to # return subtraction -> TimeDeltaIndex (GH ...) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 514702e15f7e1..0b60ca6e8a720 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -12,9 +12,21 @@ Timestamp, Timedelta) +@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2), + np.timedelta64(2, 'h'), Timedelta(hours=2)], + ids=str) +def delta(request): + # Several ways of representing two hours + return request.param + + +@pytest.fixture(params=['B', 'D']) +def freq(request): + return request.param + + class TestTimedeltaIndexArithmetic(object): _holder = TimedeltaIndex - _multiprocess_can_split_ = True # TODO: Split by ops, better name def test_numeric_compat(self): @@ -88,62 +100,91 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq is None - def test_add_iadd(self): - # only test adding/sub offsets as + is now numeric - - # offset - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), Timedelta(hours=2)] - - for delta in offsets: - rng = timedelta_range('1 days', '10 days') - result = rng + delta - expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00', - freq='D') - tm.assert_index_equal(result, expected) - rng += delta - tm.assert_index_equal(rng, expected) + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and integer - # int + def test_tdi_add_int(self): rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng + 1 expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) + + def test_tdi_iadd_int(self): + rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) + expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) rng += 1 tm.assert_index_equal(rng, expected) - def test_sub_isub(self): - # only test adding/sub offsets as - is now numeric - - # offset - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), Timedelta(hours=2)] - - for delta in offsets: - rng = timedelta_range('1 days', '10 days') - result = rng - delta - expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') - tm.assert_index_equal(result, expected) - rng -= delta - tm.assert_index_equal(rng, expected) - - # int + def test_tdi_sub_int(self): rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng - 1 expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) + + def test_tdi_isub_int(self): + rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) + expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) rng -= 1 tm.assert_index_equal(rng, expected) + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and timedelta-like + + def test_tdi_add_timedeltalike(self, delta): + # only test adding/sub offsets as + is now numeric + rng = timedelta_range('1 days', '10 days') + result = rng + delta + expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00', + freq='D') + tm.assert_index_equal(result, expected) + + def test_tdi_iadd_timedeltalike(self, delta): + # only test adding/sub offsets as + is now numeric + rng = timedelta_range('1 days', '10 days') + expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00', + freq='D') + rng += delta + tm.assert_index_equal(rng, expected) + + def test_tdi_sub_timedeltalike(self, delta): + # only test adding/sub offsets as - is now numeric + rng = timedelta_range('1 days', '10 days') + result = rng - delta + expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') + tm.assert_index_equal(result, expected) + + def test_tdi_isub_timedeltalike(self, delta): + # only test adding/sub offsets as - is now numeric + rng = timedelta_range('1 days', '10 days') + expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') + rng -= delta + tm.assert_index_equal(rng, expected) + + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and datetime-like + + def test_tdi_sub_timestamp_raises(self): idx = TimedeltaIndex(['1 day', '2 day']) msg = "cannot subtract a datelike from a TimedeltaIndex" with tm.assert_raises_regex(TypeError, msg): idx - Timestamp('2011-01-01') + def test_tdi_add_timestamp(self): + idx = TimedeltaIndex(['1 day', '2 day']) + + result = idx + Timestamp('2011-01-01') + expected = DatetimeIndex(['2011-01-02', '2011-01-03']) + tm.assert_index_equal(result, expected) + + def test_tdi_radd_timestamp(self): + idx = TimedeltaIndex(['1 day', '2 day']) + result = Timestamp('2011-01-01') + idx expected = DatetimeIndex(['2011-01-02', '2011-01-03']) tm.assert_index_equal(result, expected) + # ------------------------------------------------------------- + # TODO: Split by operation, better name def test_ops_compat(self): @@ -634,7 +675,6 @@ def test_tdi_ops_attributes(self): # TODO: Needs more informative name, probably split up into # more targeted tests - @pytest.mark.parametrize('freq', ['B', 'D']) def test_timedelta(self, freq): index = date_range('1/1/2000', periods=50, freq=freq) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 61f0c992225c6..56bc29a7bca1e 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """ Testing that we work in the downstream packages """
Start parametrizing arithmetic tests for DatetimeIndex and TimedeltaIndex, with an eye towards re-usability. Labelling tests with the types of operands and the operations, e.g. "test_dti_add_int" for DatetimeIndex + integer. This make it really easy to tell which combinations are and aren't implemented.
https://api.github.com/repos/pandas-dev/pandas/pulls/18133
2017-11-06T05:03:25Z
2017-11-09T18:25:42Z
2017-11-09T18:25:42Z
2017-12-08T19:38:54Z
Tslibs testing6
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index e078413c9398c..7de56bd460f90 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -14,6 +14,35 @@ date_range) +dtinat = pd.to_datetime(['now', 'NaT']) +dtimax = pd.to_datetime(['now', Timestamp.max]) +dtimin = pd.to_datetime(['now', Timestamp.min]) + +tspos = Timestamp('1980-01-01') +ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + +tsneg = Timestamp('1950-01-01') +ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + +tdpos = Timedelta('1h') +td_pos_variants = [tdpos, + tdpos.to_pytimedelta(), + tdpos.to_timedelta64().astype('timedelta64[ns]'), + tdpos.to_timedelta64().astype('timedelta64[h]')] + +tdneg = Timedelta('-1h') +td_neg_variants = [tdneg, + tdneg.to_pytimedelta(), + tdneg.to_timedelta64().astype('timedelta64[ns]'), + tdneg.to_timedelta64().astype('timedelta64[h]')] + + class TestDatetimeIndexArithmetic(object): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] @@ -199,35 +228,49 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq == 'D' - def test_datetimeindex_sub_timestamp_overflow(self): - dtimax = pd.to_datetime(['now', pd.Timestamp.max]) - dtimin = pd.to_datetime(['now', pd.Timestamp.min]) - - tsneg = Timestamp('1950-01-01') - ts_neg_variants = [tsneg, - tsneg.to_pydatetime(), - tsneg.to_datetime64().astype('datetime64[ns]'), - tsneg.to_datetime64().astype('datetime64[D]')] + # ------------------------------------------------------------------ + # GH17991 checking for overflows and NaT masking on arithmetic ops + + def test_dti_add_timedelta_nat_masking(self): + # Checking for NaTs and checking that we don't get an OverflowError + for variant in td_pos_variants + td_neg_variants: + res = dtinat + variant + assert res[1] is NaT + + def test_dti_sub_timedelta_nat_masking(self): + # Checking for NaTs and checking that we don't get an OverflowError + for variant in td_pos_variants + td_neg_variants: + res = dtinat - variant + assert res[1] is NaT + + def test_dti_sub_timestamp_nat_masking(self): + # Checking for NaTs and checking that we don't get an OverflowError + for variant in ts_pos_variants + ts_neg_variants: + res = dtinat - variant + assert res[1] is NaT + + def test_dti_add_timedelta_overflow(self): + for variant in td_pos_variants: + with pytest.raises(OverflowError): + dtimax + variant - tspos = Timestamp('1980-01-01') - ts_pos_variants = [tspos, - tspos.to_pydatetime(), - tspos.to_datetime64().astype('datetime64[ns]'), - tspos.to_datetime64().astype('datetime64[D]')] + for variant in td_neg_variants: + with pytest.raises(OverflowError): + dtimin + variant - for variant in ts_neg_variants: + def test_dti_sub_timedelta_overflow(self): + for variant in td_neg_variants: with pytest.raises(OverflowError): dtimax - variant - expected = pd.Timestamp.max.value - tspos.value - for variant in ts_pos_variants: - res = dtimax - variant - assert res[1].value == expected + for variant in td_pos_variants: + with pytest.raises(OverflowError): + dtimin - variant - expected = pd.Timestamp.min.value - tsneg.value + def test_dti_sub_timestamp_overflow(self): for variant in ts_neg_variants: - res = dtimin - variant - assert res[1].value == expected + with pytest.raises(OverflowError): + dtimax - variant for variant in ts_pos_variants: with pytest.raises(OverflowError): diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 66aa5d2db6569..a0201bbe5509d 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -12,18 +12,47 @@ class TestPeriodIndexArithmetic(object): - def test_add_iadd(self): + + # ------------------------------------------------------------------ + # PeriodIndex __add__ PeriodIndex operations + + # Note: This test also covers __radd__ + def test_pi_add_pi_raises(self): rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5) - # previously performed setop union, now raises TypeError (GH14164) with pytest.raises(TypeError): rng + other + def test_pi_add_pi_raises(self): + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='D', periods=5) + # previously performed setop union, now raises TypeError (GH14164) with pytest.raises(TypeError): rng += other + # TODO: Follow-up assertion that rng was not altered in-place? + + # Note: This test also covers __rsub__ + def test_pi_sub_pi_raises(self): + # previously performed setop, now raises TypeError (GH14164) + # TODO needs to wait on #13077 for decision on result type + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='D', periods=5) + with pytest.raises(TypeError): + rng - other - # offset + def test_pi_isub_pi_raises(self): + # previously performed setop, now raises TypeError (GH14164) + # TODO needs to wait on #13077 for decision on result type + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='D', periods=5) + with pytest.raises(TypeError): + rng -= other + # TODO: Follow-up assertion that rng was not altered in-place? + + # ------------------------------------------------------------------ + + def test_add_iadd(self): # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng + pd.offsets.YearEnd(5) @@ -121,19 +150,6 @@ def test_sub(self): tm.assert_index_equal(result, exp) def test_sub_isub(self): - - # previously performed setop, now raises TypeError (GH14164) - # TODO needs to wait on #13077 for decision on result type - rng = pd.period_range('1/1/2000', freq='D', periods=5) - other = pd.period_range('1/6/2000', freq='D', periods=5) - - with pytest.raises(TypeError): - rng - other - - with pytest.raises(TypeError): - rng -= other - - # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng - pd.offsets.YearEnd(5) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 514702e15f7e1..01a002a5b9f0b 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -12,6 +12,35 @@ Timestamp, Timedelta) +tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT']) +tdimax = pd.to_timedelta(['24658 days 11:15:00', Timedelta.max]) +tdimin = pd.to_timedelta(['24658 days 11:15:00', Timedelta.min]) + +tspos = Timestamp('1980-01-01') +ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + +tsneg = Timestamp('1950-01-01') +ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + +tdpos = Timedelta('1h') +td_pos_variants = [tdpos, + tdpos.to_pytimedelta(), + tdpos.to_timedelta64().astype('timedelta64[ns]'), + tdpos.to_timedelta64().astype('timedelta64[h]')] + +tdneg = Timedelta('-1h') +td_neg_variants = [tdneg, + tdneg.to_pytimedelta(), + tdneg.to_timedelta64().astype('timedelta64[ns]'), + tdneg.to_timedelta64().astype('timedelta64[h]')] + + class TestTimedeltaIndexArithmetic(object): _holder = TimedeltaIndex _multiprocess_can_split_ = True @@ -576,25 +605,54 @@ def test_add_overflow(self): to_timedelta(['7 seconds', pd.NaT, '4 hours'])) tm.assert_index_equal(result, exp) - def test_timedeltaindex_add_timestamp_nat_masking(self): - # GH17991 checking for overflow-masking with NaT - tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + # ------------------------------------------------------------- + # GH17991 checking for overflows and NaT masking on arithmetic ops - tsneg = Timestamp('1950-01-01') - ts_neg_variants = [tsneg, - tsneg.to_pydatetime(), - tsneg.to_datetime64().astype('datetime64[ns]'), - tsneg.to_datetime64().astype('datetime64[D]')] + def test_tdi_add_timedelta_nat_masking(self): + # Checking for NaTs and checking that we don't get an OverflowError + for variant in td_pos_variants + td_neg_variants: + res = tdinat + variant + assert res[1] is NaT - tspos = Timestamp('1980-01-01') - ts_pos_variants = [tspos, - tspos.to_pydatetime(), - tspos.to_datetime64().astype('datetime64[ns]'), - tspos.to_datetime64().astype('datetime64[D]')] + def test_tdi_sub_timedelta_nat_masking(self): + # Checking for NaTs and checking that we don't get an OverflowError + for variant in td_pos_variants + td_neg_variants: + res = tdinat - variant + assert res[1] is NaT + def test_tdi_add_timestamp_nat_masking(self): for variant in ts_neg_variants + ts_pos_variants: res = tdinat + variant - assert res[1] is pd.NaT + assert res[1] is NaT + + def test_tdi_add_timestamp_overflow(self): + for variant in ts_pos_variants: + with pytest.raises(OverflowError): + tdimax + variant + + for variant in ts_neg_variants: + with pytest.raises(OverflowError): + tdimin + variant + + def test_tdi_add_timedelta_overflow(self): + for variant in td_pos_variants: + with pytest.raises(OverflowError): + tdimax + variant + + for variant in td_neg_variants: + with pytest.raises(OverflowError): + tdimin + variant + + def test_tdi_sub_timedelta_overflow(self): + for variant in td_neg_variants: + with pytest.raises(OverflowError): + tdimax - variant + + for variant in td_pos_variants: + with pytest.raises(OverflowError): + tdimin - variant + + # ------------------------------------------------------------- def test_tdi_ops_attributes(self): rng = timedelta_range('2 days', periods=5, freq='2D', name='x') diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 17c818779c76d..d791d1edb47c7 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -125,6 +125,49 @@ def test_ops(self): pytest.raises(TypeError, lambda: td + 2) pytest.raises(TypeError, lambda: td - 2) + def test_timedelta_arithmetic(self): + data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]') + deltas = [timedelta(days=1), Timedelta(1, unit='D')] + for delta in deltas: + result_method = data.add(delta) + result_operator = data + delta + expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + + result_method = data.sub(delta) + result_operator = data - delta + expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + # GH 9396 + result_method = data.div(delta) + result_operator = data / delta + expected = pd.Series([np.nan, 32.], dtype='float64') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + + def test_arithmetic_overflow(self): + with pytest.raises(OverflowError): + pd.Timestamp('1700-01-01') + Timedelta(13 * 19999, unit='D') + + with pytest.raises(OverflowError): + pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999) + + def test_ops_error_str(self): + # GH 13624 + td = Timedelta('1 day') + + for left, right in [(td, 'a'), ('a', td)]: + with pytest.raises(TypeError): + left + right + + with pytest.raises(TypeError): + left > right + + assert not left == right + assert left != right + class TestTimedeltas(object): _multiprocess_can_split_ = True @@ -415,42 +458,6 @@ def check(value): assert tup.microseconds == 999 assert tup.nanoseconds == 0 - def test_nat_converters(self): - assert to_timedelta('nat', box=False).astype('int64') == iNaT - assert to_timedelta('nan', box=False).astype('int64') == iNaT - - def testit(unit, transform): - - # array - result = to_timedelta(np.arange(5), unit=unit) - expected = TimedeltaIndex([np.timedelta64(i, transform(unit)) - for i in np.arange(5).tolist()]) - tm.assert_index_equal(result, expected) - - # scalar - result = to_timedelta(2, unit=unit) - expected = Timedelta(np.timedelta64(2, transform(unit)).astype( - 'timedelta64[ns]')) - assert result == expected - - # validate all units - # GH 6855 - for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']: - testit(unit, lambda x: x.upper()) - for unit in ['days', 'day', 'Day', 'Days']: - testit(unit, lambda x: 'D') - for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US', - 'NS']: - testit(unit, lambda x: x.lower()) - - # offsets - - # m - testit('T', lambda x: 'm') - - # ms - testit('L', lambda x: 'ms') - def test_numeric_conversions(self): assert ct(0) == np.timedelta64(0, 'ns') assert ct(10) == np.timedelta64(10, 'ns') @@ -502,55 +509,6 @@ def test_round(self): for freq in ['Y', 'M', 'foobar']: pytest.raises(ValueError, lambda: t1.round(freq)) - t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us') - t2 = -1 * t1 - t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s') - t1c = pd.TimedeltaIndex([1, 1, 1], unit='D') - - # note that negative times round DOWN! so don't give whole numbers - for (freq, s1, s2) in [('N', t1, t2), - ('U', t1, t2), - ('L', t1a, - TimedeltaIndex(['-1 days +00:00:00', - '-2 days +23:58:58', - '-2 days +23:57:56'], - dtype='timedelta64[ns]', - freq=None) - ), - ('S', t1a, - TimedeltaIndex(['-1 days +00:00:00', - '-2 days +23:58:58', - '-2 days +23:57:56'], - dtype='timedelta64[ns]', - freq=None) - ), - ('12T', t1c, - TimedeltaIndex(['-1 days', - '-1 days', - '-1 days'], - dtype='timedelta64[ns]', - freq=None) - ), - ('H', t1c, - TimedeltaIndex(['-1 days', - '-1 days', - '-1 days'], - dtype='timedelta64[ns]', - freq=None) - ), - ('d', t1c, - pd.TimedeltaIndex([-1, -1, -1], unit='D') - )]: - - r1 = t1.round(freq) - tm.assert_index_equal(r1, s1) - r2 = t2.round(freq) - tm.assert_index_equal(r2, s2) - - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: t1.round(freq)) - def test_contains(self): # Checking for any NaT-like objects # GH 13603 @@ -676,13 +634,6 @@ def test_timedelta_hash_equality(self): d = {td: 2} assert d[v] == 2 - tds = timedelta_range('1 second', periods=20) - assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) - - # python timedeltas drop ns resolution - ns_td = Timedelta(1, 'ns') - assert hash(ns_td) != hash(ns_td.to_pytimedelta()) - def test_implementation_limits(self): min_td = Timedelta(Timedelta.min) max_td = Timedelta(Timedelta.max) @@ -711,36 +662,6 @@ def test_implementation_limits(self): with pytest.raises(OverflowError): Timedelta(max_td.value + 1, 'ns') - def test_timedelta_arithmetic(self): - data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]') - deltas = [timedelta(days=1), Timedelta(1, unit='D')] - for delta in deltas: - result_method = data.add(delta) - result_operator = data + delta - expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - - result_method = data.sub(delta) - result_operator = data - delta - expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - # GH 9396 - result_method = data.div(delta) - result_operator = data / delta - expected = pd.Series([np.nan, 32.], dtype='float64') - tm.assert_series_equal(result_operator, expected) - tm.assert_series_equal(result_method, expected) - - def test_arithmetic_overflow(self): - - with pytest.raises(OverflowError): - pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D') - - with pytest.raises(OverflowError): - pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999) - def test_apply_to_timedelta(self): timedelta_NaT = pd.to_timedelta('NaT') @@ -758,18 +679,6 @@ def test_apply_to_timedelta(self): # Can't compare until apply on a Series gives the correct dtype # assert_series_equal(a, b) - def test_components(self): - rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') - rng.components - - # with nat - s = Series(rng) - s[1] = np.nan - - result = s.dt.components - assert not result.iloc[0].isna().all() - assert result.iloc[1].isna().all() - def test_isoformat(self): td = Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10, nanoseconds=12) @@ -804,17 +713,114 @@ def test_isoformat(self): expected = 'P0DT0H1M0S' assert result == expected - def test_ops_error_str(self): - # GH 13624 - td = Timedelta('1 day') - for l, r in [(td, 'a'), ('a', td)]: +# TODO: Move these to non-scalar tests +class TestTimedeltaIndex(object): + _multiprocess_can_split_ = True - with pytest.raises(TypeError): - l + r + def test_nat_converters(self): + assert to_timedelta('nat', box=False).astype('int64') == iNaT + assert to_timedelta('nan', box=False).astype('int64') == iNaT - with pytest.raises(TypeError): - l > r + def testit(unit, transform): + + # array + result = to_timedelta(np.arange(5), unit=unit) + expected = TimedeltaIndex([np.timedelta64(i, transform(unit)) + for i in np.arange(5).tolist()]) + tm.assert_index_equal(result, expected) + + # scalar + result = to_timedelta(2, unit=unit) + expected = Timedelta(np.timedelta64(2, transform(unit)).astype( + 'timedelta64[ns]')) + assert result == expected + + # validate all units + # GH 6855 + for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']: + testit(unit, lambda x: x.upper()) + for unit in ['days', 'day', 'Day', 'Days']: + testit(unit, lambda x: 'D') + for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US', + 'NS']: + testit(unit, lambda x: x.lower()) + + # offsets + + # m + testit('T', lambda x: 'm') + + # ms + testit('L', lambda x: 'ms') - assert not l == r - assert l != r + def test_timedelta_hash_equality(self): + # GH 11129 + tds = timedelta_range('1 second', periods=20) + assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) + + # python timedeltas drop ns resolution + ns_td = Timedelta(1, 'ns') + assert hash(ns_td) != hash(ns_td.to_pytimedelta()) + + def test_components(self): + rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') + rng.components + + # with nat + s = Series(rng) + s[1] = np.nan + + result = s.dt.components + assert not result.iloc[0].isna().all() + assert result.iloc[1].isna().all() + + def test_round(self): + t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us') + t2 = -1 * t1 + t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s') + t1c = pd.TimedeltaIndex([1, 1, 1], unit='D') + + # note that negative times round DOWN! so don't give whole numbers + for (freq, s1, s2) in [('N', t1, t2), + ('U', t1, t2), + ('L', t1a, + TimedeltaIndex(['-1 days +00:00:00', + '-2 days +23:58:58', + '-2 days +23:57:56'], + dtype='timedelta64[ns]', + freq=None) + ), + ('S', t1a, + TimedeltaIndex(['-1 days +00:00:00', + '-2 days +23:58:58', + '-2 days +23:57:56'], + dtype='timedelta64[ns]', + freq=None) + ), + ('12T', t1c, + TimedeltaIndex(['-1 days', + '-1 days', + '-1 days'], + dtype='timedelta64[ns]', + freq=None) + ), + ('H', t1c, + TimedeltaIndex(['-1 days', + '-1 days', + '-1 days'], + dtype='timedelta64[ns]', + freq=None) + ), + ('d', t1c, + pd.TimedeltaIndex([-1, -1, -1], unit='D') + )]: + + r1 = t1.round(freq) + tm.assert_index_equal(r1, s1) + r2 = t2.round(freq) + tm.assert_index_equal(r2, s2) + + # invalid + for freq in ['Y', 'M', 'foobar']: + pytest.raises(ValueError, lambda: t1.round(freq)) diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 4cd9a2fadeb32..141fc43ddeaa9 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -936,13 +936,6 @@ def test_compare_invalid(self): assert val != np.float64(1) assert val != np.int64(1) - # ops testing - df = DataFrame(np.random.randn(5, 2)) - a = df[0] - b = Series(np.random.randn(5)) - b.name = Timestamp('2000-01-01') - tm.assert_series_equal(a / b, 1 / (b / a)) - def test_cant_compare_tz_naive_w_aware(self): # see gh-1404 a = Timestamp('3/12/2012') @@ -1041,41 +1034,6 @@ def test_timestamp_compare_scalars(self): result = right_f(nat, rhs) assert result == expected - def test_timestamp_compare_series(self): - # make sure we can compare Timestamps on the right AND left hand side - # GH4982 - s = Series(date_range('20010101', periods=10), name='dates') - s_nat = s.copy(deep=True) - - s[0] = Timestamp('nat') - s[3] = Timestamp('nat') - - ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - - # no nats - expected = left_f(s, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s) - tm.assert_series_equal(result, expected) - - # nats - expected = left_f(s, Timestamp('nat')) - result = right_f(Timestamp('nat'), s) - tm.assert_series_equal(result, expected) - - # compare to timestamp with series containing nats - expected = left_f(s_nat, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s_nat) - tm.assert_series_equal(result, expected) - - # compare to nat with series containing nats - expected = left_f(s_nat, Timestamp('nat')) - result = right_f(Timestamp('nat'), s_nat) - tm.assert_series_equal(result, expected) - def test_is_leap_year(self): # GH 13727 for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: @@ -1205,15 +1163,6 @@ def test_timestamp_and_datetime(self): assert ((datetime(2013, 10, 12) - Timestamp(datetime(2013, 10, 13))).days == -1) - def test_timestamp_and_series(self): - timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', - tz='US/Eastern')) - first_timestamp = timestamp_series[0] - - delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) - assert_series_equal(timestamp_series - first_timestamp, delta_series) - assert_series_equal(first_timestamp - timestamp_series, -delta_series) - def test_addition_subtraction_types(self): # Assert on the types resulting from Timestamp +/- various date/time # objects @@ -1324,30 +1273,10 @@ def test_timestamp_to_datetime_explicit_dateutil(self): assert stamp == dtval assert stamp.tzinfo == dtval.tzinfo - def test_timestamp_fields(self): - # extra fields from DatetimeIndex like quarter and week - idx = tm.makeDateIndex(100) - - fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', - 'days_in_month', 'is_month_start', 'is_month_end', - 'is_quarter_start', 'is_quarter_end', 'is_year_start', - 'is_year_end', 'weekday_name'] - for f in fields: - expected = getattr(idx, f)[-1] - result = getattr(Timestamp(idx[-1]), f) - assert result == expected - - assert idx.freq == Timestamp(idx[-1], idx.freq).freq - assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr - def test_timestamp_date_out_of_range(self): pytest.raises(ValueError, Timestamp, '1676-01-01') pytest.raises(ValueError, Timestamp, '2263-01-01') - # see gh-1475 - pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) - pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) - def test_timestamp_repr(self): # pre-1900 stamp = Timestamp('1850-01-01', tz='US/Eastern') @@ -1386,80 +1315,6 @@ def test_timestamp_compare_with_early_datetime(self): assert stamp < datetime(2700, 1, 1) assert stamp <= datetime(2700, 1, 1) - def test_timestamp_equality(self): - - # GH 11034 - s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT']) - result = s != s - assert_series_equal(result, Series([False, True])) - result = s != s[0] - assert_series_equal(result, Series([False, True])) - result = s != s[1] - assert_series_equal(result, Series([True, True])) - - result = s == s - assert_series_equal(result, Series([True, False])) - result = s == s[0] - assert_series_equal(result, Series([True, False])) - result = s == s[1] - assert_series_equal(result, Series([False, False])) - - def test_series_box_timestamp(self): - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng) - - assert isinstance(s[5], Timestamp) - - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng, index=rng) - assert isinstance(s[5], Timestamp) - - assert isinstance(s.iat[5], Timestamp) - - def test_frame_setitem_timestamp(self): - # 2155 - columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', - freq=offsets.BDay()) - index = lrange(10) - data = DataFrame(columns=columns, index=index) - t = datetime(2012, 11, 1) - ts = Timestamp(t) - data[ts] = np.nan # works - - def test_to_html_timestamp(self): - rng = date_range('2000-01-01', periods=10) - df = DataFrame(np.random.randn(10, 4), index=rng) - - result = df.to_html() - assert '2000-01-01' in result - - def test_series_map_box_timestamps(self): - # #2689, #2627 - s = Series(date_range('1/1/2000', periods=10)) - - def f(x): - return (x.hour, x.day, x.month) - - # it works! - s.map(f) - s.apply(f) - DataFrame(s).applymap(f) - - def test_dti_slicing(self): - dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') - dti2 = dti[[1, 3, 5]] - - v1 = dti2[0] - v2 = dti2[1] - v3 = dti2[2] - - assert v1 == Timestamp('2/28/2005') - assert v2 == Timestamp('4/30/2005') - assert v3 == Timestamp('6/30/2005') - - # don't carry freq through irregular slicing - assert dti2.freq is None - def test_woy_boundary(self): # make sure weeks at year boundaries are correct d = datetime(2013, 12, 31) @@ -1570,3 +1425,158 @@ def test_date_range_timestamp_equiv_preserve_frequency(self): ts = Timestamp('2014-03-05', freq='D') assert timestamp_instance == ts + + +# TODO: Move up to non-scalar test file +class TestTimestampInSeries(object): + def test_timestamp_and_series(self): + timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', + tz='US/Eastern')) + first_timestamp = timestamp_series[0] + + delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) + assert_series_equal(timestamp_series - first_timestamp, delta_series) + assert_series_equal(first_timestamp - timestamp_series, -delta_series) + + def test_timestamp_compare_series(self): + # make sure we can compare Timestamps on the right AND left hand side + # GH4982 + s = Series(date_range('20010101', periods=10), name='dates') + s_nat = s.copy(deep=True) + + s[0] = Timestamp('nat') + s[3] = Timestamp('nat') + + ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} + + for left, right in ops.items(): + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + expected = left_f(s, Timestamp('20010109')) + result = right_f(Timestamp('20010109'), s) + tm.assert_series_equal(result, expected) + + # nats + expected = left_f(s, Timestamp('nat')) + result = right_f(Timestamp('nat'), s) + tm.assert_series_equal(result, expected) + + # compare to timestamp with series containing nats + expected = left_f(s_nat, Timestamp('20010109')) + result = right_f(Timestamp('20010109'), s_nat) + tm.assert_series_equal(result, expected) + + # compare to nat with series containing nats + expected = left_f(s_nat, Timestamp('nat')) + result = right_f(Timestamp('nat'), s_nat) + tm.assert_series_equal(result, expected) + + def test_timestamp_equality(self): + # GH 11034 + s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT']) + result = s != s + assert_series_equal(result, Series([False, True])) + result = s != s[0] + assert_series_equal(result, Series([False, True])) + result = s != s[1] + assert_series_equal(result, Series([True, True])) + + result = s == s + assert_series_equal(result, Series([True, False])) + result = s == s[0] + assert_series_equal(result, Series([True, False])) + result = s == s[1] + assert_series_equal(result, Series([False, False])) + + def test_series_box_timestamp(self): + rng = date_range('20090415', '20090519', freq='B') + s = Series(rng) + + assert isinstance(s[5], Timestamp) + + rng = date_range('20090415', '20090519', freq='B') + s = Series(rng, index=rng) + assert isinstance(s[5], Timestamp) + + assert isinstance(s.iat[5], Timestamp) + + +# TODO: Move up to non-scalar test file +class TestTimestampInDataFrame(object): + def test_compare_invalid(self): + # ops testing + df = DataFrame(np.random.randn(5, 2)) + a = df[0] + b = Series(np.random.randn(5)) + b.name = Timestamp('2000-01-01') + tm.assert_series_equal(a / b, 1 / (b / a)) + + def test_frame_setitem_timestamp(self): + # GH#2155 + columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', + freq=offsets.BDay()) + index = lrange(10) + data = DataFrame(columns=columns, index=index) + t = datetime(2012, 11, 1) + ts = Timestamp(t) + data[ts] = np.nan # works + + def test_to_html_timestamp(self): + rng = date_range('2000-01-01', periods=10) + df = DataFrame(np.random.randn(10, 4), index=rng) + + result = df.to_html() + assert '2000-01-01' in result + + def test_series_map_box_timestamps(self): + # #2689, #2627 + s = Series(date_range('1/1/2000', periods=10)) + + def f(x): + return (x.hour, x.day, x.month) + + # it works! + s.map(f) + s.apply(f) + DataFrame(s).applymap(f) + + +# TODO: Move up to non-scalar test file +class TestDatetimeIndex(object): + def test_dti_slicing(self): + dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') + dti2 = dti[[1, 3, 5]] + + v1 = dti2[0] + v2 = dti2[1] + v3 = dti2[2] + + assert v1 == Timestamp('2/28/2005') + assert v2 == Timestamp('4/30/2005') + assert v3 == Timestamp('6/30/2005') + + # don't carry freq through irregular slicing + assert dti2.freq is None + + def test_timestamp_date_out_of_range(self): + # see gh-1475 + pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) + pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) + + def test_timestamp_fields(self): + # extra fields from DatetimeIndex like quarter and week + idx = tm.makeDateIndex(100) + + fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', + 'days_in_month', 'is_month_start', 'is_month_end', + 'is_quarter_start', 'is_quarter_end', 'is_year_start', + 'is_year_end', 'weekday_name'] + for f in fields: + expected = getattr(idx, f)[-1] + result = getattr(Timestamp(idx[-1]), f) + assert result == expected + + assert idx.freq == Timestamp(idx[-1], idx.freq).freq + assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
Kind of all over the place. There's a lot to be done in terms of making tests.scalars isolate scalar tests, etc. The added tests in two test_arithmetic files fill out the test matrix started in 17991
https://api.github.com/repos/pandas-dev/pandas/pulls/18132
2017-11-06T03:47:22Z
2017-11-06T04:17:49Z
null
2023-05-11T01:16:42Z
small fix in Doku
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c6f7f9ea2eb37..5f5f785111fb4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5105,7 +5105,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): - ... df = df.append({'A'}: i}, ignore_index=True) + ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0
- [x] closes #18130 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18131
2017-11-06T03:25:31Z
2017-11-06T04:39:46Z
2017-11-06T04:39:45Z
2017-11-06T04:39:52Z
TST: Add another test for segfault in C engine
diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index c68b2bf064d97..6d476e326213e 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -290,11 +290,11 @@ def test_empty_header_read(count): test_empty_header_read(count) def test_parse_trim_buffers(self): - # This test is part of a bugfix for issue #13703. It attmepts to + # This test is part of a bugfix for issue #13703. It attempts to # to stress the system memory allocator, to cause it to move the # stream buffer and either let the OS reclaim the region, or let # other memory requests of parser otherwise modify the contents - # of memory space, where it was formely located. + # of memory space, where it was formally located. # This test is designed to cause a `segfault` with unpatched # `tokenizer.c`. Sometimes the test fails on `segfault`, other # times it fails due to memory corruption, which causes the @@ -346,7 +346,7 @@ def test_parse_trim_buffers(self): # Generate the expected output: manually create the dataframe # by splitting by comma and repeating the `n_lines` times. - row = tuple(val_ if val_ else float("nan") + row = tuple(val_ if val_ else np.nan for val_ in record_.split(",")) expected = pd.DataFrame([row for _ in range(n_lines)], dtype=object, columns=None, index=None) @@ -359,6 +359,15 @@ def test_parse_trim_buffers(self): # Check for data corruption if there was no segfault tm.assert_frame_equal(result, expected) + # This extra test was added to replicate the fault in gh-5291. + # Force 'utf-8' encoding, so that `_string_convert` would take + # a different execution branch. + chunks_ = self.read_csv(StringIO(csv_data), header=None, + dtype=object, chunksize=chunksize, + encoding='utf_8') + result = pd.concat(chunks_, axis=0, ignore_index=True) + tm.assert_frame_equal(result, expected) + def test_internal_null_byte(self): # see gh-14012 #
Not sure why the original PR was closed, but let's bring it to completion. xref #13833. Closes #5291.
https://api.github.com/repos/pandas-dev/pandas/pulls/18128
2017-11-06T02:02:00Z
2017-11-06T17:42:09Z
2017-11-06T17:42:08Z
2017-11-06T17:42:52Z
BUG: Don't parse NA-values in index when requested
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 088168fd8a008..6044f25ca5147 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -79,6 +79,7 @@ I/O - Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. - Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) +- Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) Plotting diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 438192b4b0a59..ae79d70d4cf0a 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1249,6 +1249,8 @@ def __init__(self, kwds): self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') + self.na_filter = kwds.get('na_filter', False) + self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.as_recarray = kwds.get('as_recarray', False) @@ -1424,7 +1426,6 @@ def _make_index(self, data, alldata, columns, indexnamerow=False): elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) - elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, @@ -1504,8 +1505,12 @@ def _agg_index(self, index, try_parse_dates=True): if try_parse_dates and self._should_parse_dates(i): arr = self._date_conv(arr) - col_na_values = self.na_values - col_na_fvalues = self.na_fvalues + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() if isinstance(self.na_values, dict): col_name = self.index_names[i] @@ -2060,8 +2065,6 @@ def __init__(self, f, **kwds): self.names_passed = kwds['names'] or None - self.na_filter = kwds['na_filter'] - self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index 7fbf174e19eee..8dc599b42ddc7 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -312,3 +312,21 @@ def test_empty_na_values_no_default_with_index(self): out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0) tm.assert_frame_equal(out, expected) + + def test_no_na_filter_on_index(self): + # see gh-5239 + data = "a,b,c\n1,,3\n4,5,6" + + # Don't parse NA-values in index when na_filter=False. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=False) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index(["", "5"], name="b")) + tm.assert_frame_equal(out, expected) + + # Parse NA-values in index when na_filter=True. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=True) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index([np.nan, 5.0], name="b")) + tm.assert_frame_equal(out, expected)
The `na_filter` parameter wasn't being respected when being applied on values in the index. Closes #5239.
https://api.github.com/repos/pandas-dev/pandas/pulls/18127
2017-11-05T23:33:24Z
2017-11-06T20:33:44Z
2017-11-06T20:33:44Z
2017-12-11T20:24:42Z
demonstrate similarity for discussion purposes
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 045580d393b26..1d73958b3202e 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2913,37 +2913,58 @@ def sparse_reindex(self, new_index): placement=self.mgr_locs) +_block_types_klasses = { + 'sparse': SparseBlock, + 'float': FloatBlock, + 'complex': ComplexBlock, + 'int': IntBlock, + 'bool': BoolBlock, + 'cat': CategoricalBlock, + 'object': ObjectBlock, + 'timedelta': TimeDeltaBlock, + 'datetime': DatetimeBlock, + 'datetime_tz': DatetimeTZBlock} + + +def _get_block_klass(dtype): + dtype = dtype or values.dtype + vtype = dtype.type + + if isinstance(values, SparseArray): + block_type = 'sparse' + elif issubclass(vtype, np.floating): + klass = FloatBlock + block_type = 'float' + elif (issubclass(vtype, np.integer) and + issubclass(vtype, np.timedelta64)): + block_type = 'timedelta' + elif (issubclass(vtype, np.integer) and + not issubclass(vtype, np.datetime64)): + block_type = 'int' + elif dtype == np.bool_: + block_type = 'bool' + elif issubclass(vtype, np.datetime64): + if hasattr(values, 'tz'): + block_type = 'datetime_tz' + else: + block_type = 'datetime' + elif is_datetimetz(values): + block_type = 'datetime_tz' + elif issubclass(vtype, np.complexfloating): + block_type = 'complex' + elif is_categorical(values): + block_type = 'cat' + else: + block_type = 'object' + + klass = _block_types_klasses[block_type] + return klass + + def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=False): if klass is None: - dtype = dtype or values.dtype - vtype = dtype.type - - if isinstance(values, SparseArray): - klass = SparseBlock - elif issubclass(vtype, np.floating): - klass = FloatBlock - elif (issubclass(vtype, np.integer) and - issubclass(vtype, np.timedelta64)): - klass = TimeDeltaBlock - elif (issubclass(vtype, np.integer) and - not issubclass(vtype, np.datetime64)): - klass = IntBlock - elif dtype == np.bool_: - klass = BoolBlock - elif issubclass(vtype, np.datetime64): - if hasattr(values, 'tz'): - klass = DatetimeTZBlock - else: - klass = DatetimeBlock - elif is_datetimetz(values): - klass = DatetimeTZBlock - elif issubclass(vtype, np.complexfloating): - klass = ComplexBlock - elif is_categorical(values): - klass = CategoricalBlock - else: - klass = ObjectBlock + klass = _get_block_klass(dtype) elif klass is DatetimeTZBlock and not is_datetimetz(values): return klass(values, ndim=ndim, fastpath=fastpath, @@ -4637,18 +4658,48 @@ def create_block_manager_from_arrays(arrays, names, axes): construction_error(len(arrays), arrays[0].shape, axes, e) +# TODO: no Timedelta? +def get_block_type(values, dtype=None): + # dtype=None for compat with `make_block` + dtype = dtype or values.dtype + vtype = dtype.type + + if is_sparse(values): + block_type = 'sparse' + elif issubclass(vtype, np.floating): + block_type = 'float' + elif issubclass(vtype, np.complexfloating): + block_type = 'complex' + + elif issubclass(vtype, np.datetime64): + if dtype != _NS_DTYPE: + values = tslib.cast_to_nanoseconds(values) + + if is_datetimetz(values): + block_type = 'datetime_tz' + else: + block_type = 'datetime' + + elif is_datetimetz(values): + block_type = 'datetime_tz' + + elif issubclass(vtype, np.integer): + block_type = 'int' + + elif dtype == np.bool_: + block_type = 'bool' + elif is_categorical(values): + block_type = 'cat' + else: + block_type = 'object' + + return (block_type, values) + + def form_blocks(arrays, names, axes): # put "leftover" items in float bucket, where else? # generalize? - float_items = [] - complex_items = [] - int_items = [] - bool_items = [] - object_items = [] - sparse_items = [] - datetime_items = [] - datetime_tz_items = [] - cat_items = [] + items_dict = {key: [] for key in _block_type_klasses} extra_locs = [] names_idx = Index(names) @@ -4665,74 +4716,42 @@ def form_blocks(arrays, names, axes): k = names[name_idx] v = arrays[name_idx] - - if is_sparse(v): - sparse_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.floating): - float_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.complexfloating): - complex_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.datetime64): - if v.dtype != _NS_DTYPE: - v = tslib.cast_to_nanoseconds(v) - - if is_datetimetz(v): - datetime_tz_items.append((i, k, v)) - else: - datetime_items.append((i, k, v)) - elif is_datetimetz(v): - datetime_tz_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.integer): - int_items.append((i, k, v)) - elif v.dtype == np.bool_: - bool_items.append((i, k, v)) - elif is_categorical(v): - cat_items.append((i, k, v)) - else: - object_items.append((i, k, v)) + (block_type, v) = get_block_type(v) + items_dict[block_type].append((i, k, v)) blocks = [] - if len(float_items): - float_blocks = _multi_blockify(float_items) - blocks.extend(float_blocks) - - if len(complex_items): - complex_blocks = _multi_blockify(complex_items) - blocks.extend(complex_blocks) - - if len(int_items): - int_blocks = _multi_blockify(int_items) - blocks.extend(int_blocks) - - if len(datetime_items): - datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE) - blocks.extend(datetime_blocks) - - if len(datetime_tz_items): - dttz_blocks = [make_block(array, - klass=DatetimeTZBlock, - fastpath=True, - placement=[i], ) - for i, _, array in datetime_tz_items] - blocks.extend(dttz_blocks) - - if len(bool_items): - bool_blocks = _simple_blockify(bool_items, np.bool_) - blocks.extend(bool_blocks) - - if len(object_items) > 0: - object_blocks = _simple_blockify(object_items, np.object_) - blocks.extend(object_blocks) - - if len(sparse_items) > 0: - sparse_blocks = _sparse_blockify(sparse_items) - blocks.extend(sparse_blocks) - - if len(cat_items) > 0: - cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, - placement=[i]) - for i, _, array in cat_items] - blocks.extend(cat_blocks) + float_blocks = _multi_blockify(items_dict['float']) + blocks.extend(float_blocks) + + complex_blocks = _multi_blockify(items_dict['complex']) + blocks.extend(complex_blocks) + + int_blocks = _multi_blockify(items_dict['int']) + blocks.extend(int_blocks) + + datetime_blocks = _simple_blockify(items_dict['datetime'], _NS_DTYPE) + blocks.extend(datetime_blocks) + + dttz_blocks = [make_block(array, + klass=DatetimeTZBlock, + fastpath=True, + placement=[i], ) + for i, _, array in items_dict['datetime_tz']] + blocks.extend(dttz_blocks) + + bool_blocks = _simple_blockify(items_dict['bool'], np.bool_) + blocks.extend(bool_blocks) + + object_blocks = _simple_blockify(items_dict['object'], np.object_) + blocks.extend(object_blocks) + + sparse_blocks = _sparse_blockify(items_dict['sparse']) + blocks.extend(sparse_blocks) + + cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, + placement=[i]) + for i, _, array in items_dict['cat']] + blocks.extend(cat_blocks) if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) @@ -4751,6 +4770,8 @@ def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ + if not tuples: + return [] values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE?
This is a demonstration rather than an actual PR. There is some very similar logic in `internals.make_block` and `internals.form_blocks`. For demonstration this PR separates the relevant bits out into `_get_block_klass` and `get_block_type`, respectively. Suppose that the small differences in these two new functions were reconciled. Then adding a custom block type would be a matter of adding it to `_block_type_klasses` along with registering some kind of hook in `_get_block_klass`. There would need to be some cleanup at the end of `form_blocks` to map block types to `_foo_blockify` functions. ref #17144 noci
https://api.github.com/repos/pandas-dev/pandas/pulls/18126
2017-11-05T17:49:26Z
2017-11-06T16:38:08Z
null
2017-12-08T19:40:55Z
COMPAT: 32-bit indexers compat
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index d99eba3e2d5e9..3d6fba982f560 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -499,8 +499,8 @@ def test_get_indexer_non_unique(self): idx2 = pd.PeriodIndex([p2, p1, p3, p4]) result = idx1.get_indexer_non_unique(idx2) - expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.int64) - expected_missing = np.array([2, 3], dtype=np.int64) + expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp) + expected_missing = np.array([2, 3], dtype=np.intp) tm.assert_numpy_array_equal(result[0], expected_indexer) tm.assert_numpy_array_equal(result[1], expected_missing)
xref #17755
https://api.github.com/repos/pandas-dev/pandas/pulls/18122
2017-11-05T12:02:23Z
2017-11-05T12:49:36Z
2017-11-05T12:49:36Z
2017-12-12T02:38:30Z
cleanup unused imports, constants
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d012e169d1e45..c8c5f86820b76 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -8,7 +8,9 @@ cdef bint PY3 = (sys.version_info[0] >= 3) from numpy cimport * +# initialize numpy np.import_array() +np.import_ufunc() from libc.stdlib cimport malloc, free @@ -52,9 +54,7 @@ PyDateTime_IMPORT from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value from tslib cimport _check_all_nulls -import tslib -from tslib import NaT, Timestamp, Timedelta -import interval +from tslib import NaT, Timestamp, Timedelta, array_to_datetime from interval import Interval cdef int64_t NPY_NAT = util.get_nat() @@ -62,13 +62,7 @@ cdef int64_t NPY_NAT = util.get_nat() cimport util from util cimport is_array, _checknull, _checknan -cdef extern from "math.h": - double sqrt(double x) - double fabs(double) - -# initialize numpy -import_array() -import_ufunc() +from libc.math cimport sqrt, fabs def values_from_object(object o): @@ -151,7 +145,7 @@ cpdef bint checknull_old(object val): elif is_array(val): return False else: - return util._checknull(val) + return _checknull(val) cpdef bint isposinf_scalar(object val): @@ -787,13 +781,13 @@ def scalar_binop(ndarray[object] values, object val, object op): object x result = np.empty(n, dtype=object) - if util._checknull(val): + if _checknull(val): result.fill(val) return result for i in range(n): x = values[i] - if util._checknull(x): + if _checknull(x): result[i] = x else: result[i] = op(x, val) @@ -820,9 +814,9 @@ def vec_binop(ndarray[object] left, ndarray[object] right, object op): try: result[i] = op(x, y) except TypeError: - if util._checknull(x): + if _checknull(x): result[i] = x - elif util._checknull(y): + elif _checknull(y): result[i] = y else: raise diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx index 22d66356ebdc3..374da8067eedd 100644 --- a/pandas/_libs/properties.pyx +++ b/pandas/_libs/properties.pyx @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from cython cimport Py_ssize_t diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd index ac975a3bf3537..a5ba610dc89dc 100644 --- a/pandas/_libs/src/datetime.pxd +++ b/pandas/_libs/src/datetime.pxd @@ -5,7 +5,6 @@ from cpython cimport PyUnicode_Check, PyUnicode_AsASCIIString cdef extern from "numpy/ndarrayobject.h": - ctypedef int64_t npy_timedelta ctypedef int64_t npy_datetime @@ -16,13 +15,11 @@ cdef extern from "numpy/ndarrayobject.h": NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING - cdef extern from "numpy_helper.h": npy_datetime get_datetime64_value(object o) npy_timedelta get_timedelta64_value(object o) cdef extern from "numpy/npy_common.h": - ctypedef unsigned char npy_bool cdef extern from "datetime/np_datetime.h": @@ -53,7 +50,6 @@ cdef extern from "datetime/np_datetime.h": PANDAS_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil int days_per_month_table[2][12] - pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS int dayofweek(int y, int m, int d) nogil int is_leapyear(int64_t year) nogil @@ -68,8 +64,6 @@ cdef extern from "datetime/np_datetime_strings.h": npy_bool *out_special) - - cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: cdef int result diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 5d550148b10bc..ec060335c220e 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -521,7 +521,7 @@ cpdef object infer_datetimelike_array(object arr): # convert *every* string array if len(objs): try: - tslib.array_to_datetime(objs, errors='raise') + array_to_datetime(objs, errors='raise') return 'datetime' except: pass diff --git a/pandas/_libs/src/reduce.pyx b/pandas/_libs/src/reduce.pyx index d1761384114ef..f0ec8d284ef0e 100644 --- a/pandas/_libs/src/reduce.pyx +++ b/pandas/_libs/src/reduce.pyx @@ -6,9 +6,8 @@ from distutils.version import LooseVersion is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2' -cdef _get_result_array(object obj, - Py_ssize_t size, - Py_ssize_t cnt): + +cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt): if isinstance(obj, np.ndarray) \ or isinstance(obj, list) and len(obj) == cnt \
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18119
2017-11-05T02:21:52Z
2017-11-07T13:36:18Z
2017-11-07T13:36:18Z
2017-11-07T15:12:39Z
BLD: list proper deps for tslib
diff --git a/setup.py b/setup.py index 3464169e8d8d1..bd7c8f175607c 100755 --- a/setup.py +++ b/setup.py @@ -474,7 +474,8 @@ def pxd(name): 'pandas/_libs/src/datetime/np_datetime_strings.h'] np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'] -tseries_depends = np_datetime_headers + ['pandas/_libs/src/datetime.pxd'] +tseries_depends = np_datetime_headers + ['pandas/_libs/src/datetime.pxd', + 'pandas/_libs/tslibs/np_datetime.pxd'] # some linux distros require it libraries = ['m'] if not is_platform_windows() else [] @@ -522,6 +523,10 @@ def pxd(name): 'pandas/_libs/src/parser/io.c']}, '_libs.period': { 'pyxfile': '_libs/period', + 'pxdfiles': ['_libs/src/util', + '_libs/lib', + '_libs/tslibs/timezones', + '_libs/tslibs/nattype'], 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, '_libs.properties': { @@ -535,15 +540,24 @@ def pxd(name): 'depends': _pxi_dep['sparse']}, '_libs.tslib': { 'pyxfile': '_libs/tslib', - 'pxdfiles': ['_libs/src/util'], + 'pxdfiles': ['_libs/src/util', + '_libs/src/khash', + '_libs/tslibs/conversion', + '_libs/tslibs/timedeltas', + '_libs/tslibs/timezones', + '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.conversion': { 'pyxfile': '_libs/tslibs/conversion', + 'pxdfiles': ['_libs/src/util', + '_libs/tslibs/timezones', + '_libs/tslibs/timedeltas'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.fields': { 'pyxfile': '_libs/tslibs/fields', + 'pxdfiles': ['_libs/src/util'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.frequencies': { @@ -557,18 +571,27 @@ def pxd(name): 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.offsets': { - 'pyxfile': '_libs/tslibs/offsets'}, + 'pyxfile': '_libs/tslibs/offsets', + 'pxdfiles': ['_libs/src/util', + '_libs/tslibs/conversion']}, '_libs.tslibs.parsing': { 'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util']}, + 'pxdfiles': ['_libs/src/util', + '_libs/src/khash']}, '_libs.tslibs.strptime': { 'pyxfile': '_libs/tslibs/strptime', + 'pxdfiles': ['_libs/src/util', + '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.timedeltas': { - 'pyxfile': '_libs/tslibs/timedeltas'}, + 'pyxfile': '_libs/tslibs/timedeltas', + 'pxdfiles': ['_libs/src/util'], + 'depends': np_datetime_headers, + 'sources': np_datetime_sources}, '_libs.tslibs.timezones': { - 'pyxfile': '_libs/tslibs/timezones'}, + 'pyxfile': '_libs/tslibs/timezones', + 'pxdfiles': ['_libs/src/util']}, '_libs.testing': { 'pyxfile': '_libs/testing'}, '_libs.window': {
closes #18089
https://api.github.com/repos/pandas-dev/pandas/pulls/18117
2017-11-04T21:40:38Z
2017-11-05T10:52:07Z
2017-11-05T10:52:06Z
2017-11-05T10:53:12Z
BUG: Let CategoricalIndex take CategoricalDtype as dtype argument
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index e19f09b195ce0..579f1ec1e3eee 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -119,7 +119,7 @@ Categorical - Bug in :meth:`DataFrame.astype` where casting to 'category' on an empty ``DataFrame`` causes a segmentation fault (:issue:`18004`) - Error messages in the testing module have been improved when items have different ``CategoricalDtype`` (:issue:`18069`) -- +- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`) Other ^^^^^ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 8b680127723c3..70b531ffb0ec4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -79,7 +79,8 @@ def __new__(cls, data=None, categories=None, ordered=None, dtype=None, if data is not None or categories is None: cls._scalar_data_error(data) data = [] - data = cls._create_categorical(cls, data, categories, ordered) + data = cls._create_categorical(cls, data, categories, ordered, + dtype) if copy: data = data.copy() diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d8ec23b9c7e0e..5e40e06d57413 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -4,6 +4,7 @@ import pandas.util.testing as tm from pandas.core.indexes.api import Index, CategoricalIndex +from pandas.core.dtypes.dtypes import CategoricalDtype from .common import Base from pandas.compat import range, PY3 @@ -95,6 +96,11 @@ def test_construction(self): 1, -1, 0], dtype='int8')) assert result.ordered + result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True) + expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True, + dtype='category') + tm.assert_index_equal(result, expected, exact=True) + # turn me to an Index result = Index(np.array(ci)) assert isinstance(result, Index) @@ -125,6 +131,25 @@ def test_construction_with_dtype(self): result = CategoricalIndex(idx, categories=idx, ordered=True) tm.assert_index_equal(result, expected, exact=True) + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH18109 + data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = pd.CategoricalIndex(data, dtype=dtype) + expected = pd.CategoricalIndex(data, categories=cats, + ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # error to combine categories or ordered and dtype keywords args + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, categories=cats, dtype=dtype) + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, ordered=ordered, dtype=dtype) + def test_create_categorical(self): # https://github.com/pandas-dev/pandas/pull/17513 # The public CI constructor doesn't hit this code path with
- [x ] closes #18109 - [x ] tests added / passed - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ x] whatsnew entry This PR allows CategoricalIndex to take CategoricalDtype as its dtype argument, see #18109 for details.
https://api.github.com/repos/pandas-dev/pandas/pulls/18116
2017-11-04T21:37:58Z
2017-11-05T10:54:50Z
2017-11-05T10:54:50Z
2017-11-05T11:26:41Z
TST: add back test_generic.py, accid removed in the big reorg
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 14bf9710fca6a..48e6f8d4d50d3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -352,7 +352,7 @@ def _get_axis_number(self, axis): else: try: return self._AXIS_NUMBERS[axis] - except: + except KeyError: pass raise ValueError('No axis named {0} for object type {1}' .format(axis, type(self))) @@ -365,7 +365,7 @@ def _get_axis_name(self, axis): else: try: return self._AXIS_NAMES[axis] - except: + except KeyError: pass raise ValueError('No axis named {0} for object type {1}' .format(axis, type(self))) @@ -701,7 +701,7 @@ def squeeze(self, axis=None): return self.iloc[ tuple([0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes)])] - except: + except Exception: return self def swaplevel(self, i=-2, j=-1, axis=0): @@ -1021,7 +1021,7 @@ def __invert__(self): try: arr = operator.inv(_values_from_object(self)) return self.__array_wrap__(arr) - except: + except Exception: # inv fails with 0 len if not np.prod(self.shape): @@ -1907,7 +1907,7 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True): else: try: ref._maybe_cache_changed(cacher[0], self) - except: + except Exception: pass if verify_is_copy: @@ -2016,7 +2016,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): if not gc.get_referents(self.is_copy()): self.is_copy = None return - except: + except Exception: pass # we might be a false positive @@ -2024,7 +2024,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): if self.is_copy().shape == self.shape: self.is_copy = None return - except: + except Exception: pass # a custom message @@ -2999,7 +2999,7 @@ def reindex(self, *args, **kwargs): if self._needs_reindex_multi(axes, method, level): try: return self._reindex_multi(axes, copy, fill_value) - except: + except Exception: pass # perform the reindex on the axes @@ -3715,7 +3715,7 @@ def _check_inplace_setting(self, value): try: if np.isnan(value): return True - except: + except Exception: pass raise TypeError('Cannot do inplace boolean setting on ' @@ -5005,6 +5005,8 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, inplace = validate_bool_kwarg(inplace, 'inplace') axis = nv.validate_clip_with_axis(axis, args, kwargs) + if axis is not None: + axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value @@ -5916,7 +5918,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, new_other = _values_from_object(self).copy() new_other[icond] = other other = new_other - except: + except Exception: try_quick = False # let's create a new (if we failed at the above diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 1bac4037e99c9..cfdb18cefee64 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1822,6 +1822,24 @@ def test_built_in_round(self): {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]}) tm.assert_frame_equal(round(df), expected_rounded) + def test_pct_change(self): + # GH 11150 + pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange( + 0, 40, 10)]).astype(np.float64) + pnl.iat[1, 0] = np.nan + pnl.iat[1, 1] = np.nan + pnl.iat[2, 3] = 60 + + mask = pnl.isnull() + + for axis in range(2): + expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift( + axis=axis) - 1 + expected[mask] = np.nan + result = pnl.pct_change(axis=axis, fill_method='pad') + + tm.assert_frame_equal(result, expected) + # Clip def test_clip(self): diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 26a2c6f9a5045..5cd5a3793ab46 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -11,12 +11,15 @@ import numpy as np from pandas import (DataFrame, Series, Index, - Timestamp, DatetimeIndex, - to_datetime, date_range) + Timestamp, DatetimeIndex, MultiIndex, + to_datetime, date_range, period_range) import pandas as pd import pandas.tseries.offsets as offsets -from pandas.util.testing import assert_series_equal, assert_frame_equal +from pandas.util.testing import (assert_series_equal, + assert_frame_equal, + assert_index_equal, + assert_raises_regex) import pandas.util.testing as tm from pandas.compat import product @@ -601,3 +604,76 @@ def test_frame_to_period(self): tm.assert_index_equal(pts.columns, exp.columns.asfreq('M')) pytest.raises(ValueError, df.to_period, axis=2) + + @pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert']) + def test_tz_convert_and_localize(self, fn): + l0 = date_range('20140701', periods=5, freq='D') + + # TODO: l1 should be a PeriodIndex for testing + # after GH2106 is addressed + with pytest.raises(NotImplementedError): + period_range('20140701', periods=1).tz_convert('UTC') + with pytest.raises(NotImplementedError): + period_range('20140701', periods=1).tz_localize('UTC') + # l1 = period_range('20140701', periods=5, freq='D') + l1 = date_range('20140701', periods=5, freq='D') + + int_idx = Index(range(5)) + + if fn == 'tz_convert': + l0 = l0.tz_localize('UTC') + l1 = l1.tz_localize('UTC') + + for idx in [l0, l1]: + + l0_expected = getattr(idx, fn)('US/Pacific') + l1_expected = getattr(idx, fn)('US/Pacific') + + df1 = DataFrame(np.ones(5), index=l0) + df1 = getattr(df1, fn)('US/Pacific') + assert_index_equal(df1.index, l0_expected) + + # MultiIndex + # GH7846 + df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) + + df3 = getattr(df2, fn)('US/Pacific', level=0) + assert not df3.index.levels[0].equals(l0) + assert_index_equal(df3.index.levels[0], l0_expected) + assert_index_equal(df3.index.levels[1], l1) + assert not df3.index.levels[1].equals(l1_expected) + + df3 = getattr(df2, fn)('US/Pacific', level=1) + assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + df4 = DataFrame(np.ones(5), + MultiIndex.from_arrays([int_idx, l0])) + + # TODO: untested + df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa + + assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + # Bad Inputs + + # Not DatetimeIndex / PeriodIndex + with assert_raises_regex(TypeError, 'DatetimeIndex'): + df = DataFrame(index=int_idx) + df = getattr(df, fn)('US/Pacific') + + # Not DatetimeIndex / PeriodIndex + with assert_raises_regex(TypeError, 'DatetimeIndex'): + df = DataFrame(np.ones(5), + MultiIndex.from_arrays([int_idx, l0])) + df = getattr(df, fn)('US/Pacific', level=0) + + # Invalid level + with assert_raises_regex(ValueError, 'not valid'): + df = DataFrame(index=l0) + df = getattr(df, fn)('US/Pacific', level=1) diff --git a/pandas/tests/generic/__init__.py b/pandas/tests/generic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py new file mode 100644 index 0000000000000..ae73664e224cf --- /dev/null +++ b/pandas/tests/generic/test_frame.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +# pylint: disable-msg=E1101,W0612 + +from operator import methodcaller +from copy import deepcopy +from distutils.version import LooseVersion + +import pytest +import numpy as np +import pandas as pd + +from pandas import Series, DataFrame, date_range, MultiIndex + +from pandas.compat import range +from pandas.util.testing import (assert_series_equal, + assert_frame_equal, + assert_almost_equal) + +import pandas.util.testing as tm +from .test_generic import Generic + +try: + import xarray + _XARRAY_INSTALLED = True +except ImportError: + _XARRAY_INSTALLED = False + + +class TestDataFrame(Generic): + _typ = DataFrame + _comparator = lambda self, x, y: assert_frame_equal(x, y) + + def test_rename_mi(self): + df = DataFrame([ + 11, 21, 31 + ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) + df.rename(str.lower) + + def test_set_axis_name(self): + df = pd.DataFrame([[1, 2], [3, 4]]) + funcs = ['_set_axis_name', 'rename_axis'] + for func in funcs: + result = methodcaller(func, 'foo')(df) + assert df.index.name is None + assert result.index.name == 'foo' + + result = methodcaller(func, 'cols', axis=1)(df) + assert df.columns.name is None + assert result.columns.name == 'cols' + + def test_set_axis_name_mi(self): + df = DataFrame( + np.empty((3, 3)), + index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), + columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) + ) + + level_names = ['L1', 'L2'] + funcs = ['_set_axis_name', 'rename_axis'] + for func in funcs: + result = methodcaller(func, level_names)(df) + assert result.index.names == level_names + assert result.columns.names == [None, None] + + result = methodcaller(func, level_names, axis=1)(df) + assert result.columns.names == ["L1", "L2"] + assert result.index.names == [None, None] + + def test_nonzero_single_element(self): + + # allow single item via bool method + df = DataFrame([[True]]) + assert df.bool() + + df = DataFrame([[False]]) + assert not df.bool() + + df = DataFrame([[False, False]]) + pytest.raises(ValueError, lambda: df.bool()) + pytest.raises(ValueError, lambda: bool(df)) + + def test_get_numeric_data_preserve_dtype(self): + + # get the numeric data + o = DataFrame({'A': [1, '2', 3.]}) + result = o._get_numeric_data() + expected = DataFrame(index=[0, 1, 2], dtype=object) + self._compare(result, expected) + + def test_metadata_propagation_indiv(self): + + # groupby + df = DataFrame( + {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.random.randn(8)}) + result = df.groupby('A').sum() + self.check_metadata(df, result) + + # resample + df = DataFrame(np.random.randn(1000, 2), + index=date_range('20130101', periods=1000, freq='s')) + result = df.resample('1T') + self.check_metadata(df, result) + + # merging with override + # GH 6923 + _metadata = DataFrame._metadata + _finalize = DataFrame.__finalize__ + + np.random.seed(10) + df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) + df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) + DataFrame._metadata = ['filename'] + df1.filename = 'fname1.csv' + df2.filename = 'fname2.csv' + + def finalize(self, other, method=None, **kwargs): + + for name in self._metadata: + if method == 'merge': + left, right = other.left, other.right + value = getattr(left, name, '') + '|' + getattr(right, + name, '') + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, '')) + + return self + + DataFrame.__finalize__ = finalize + result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') + assert result.filename == 'fname1.csv|fname2.csv' + + # concat + # GH 6927 + DataFrame._metadata = ['filename'] + df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) + df1.filename = 'foo' + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == 'concat': + value = '+'.join([getattr( + o, name) for o in other.objs if getattr(o, name, None) + ]) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + DataFrame.__finalize__ = finalize + + result = pd.concat([df1, df1]) + assert result.filename == 'foo+foo' + + # reset + DataFrame._metadata = _metadata + DataFrame.__finalize__ = _finalize + + def test_set_attribute(self): + # Test for consistent setattr behavior when an attribute and a column + # have the same name (Issue #8994) + df = DataFrame({'x': [1, 2, 3]}) + + df.y = 2 + df['y'] = [2, 4, 6] + df.y = 5 + + assert df.y == 5 + assert_series_equal(df['y'], Series([2, 4, 6], name='y')) + + @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and + LooseVersion(xarray.__version__) < '0.10.0', + reason='xarray >= 0.10.0 required') + @pytest.mark.parametrize( + "index", ['FloatIndex', 'IntIndex', + 'StringIndex', 'UnicodeIndex', + 'DateIndex', 'PeriodIndex', + 'CategoricalIndex', 'TimedeltaIndex']) + def test_to_xarray_index_types(self, index): + from xarray import Dataset + + index = getattr(tm, 'make{}'.format(index)) + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0, dtype='float64'), + 'e': [True, False, True], + 'f': pd.Categorical(list('abc')), + 'g': pd.date_range('20130101', periods=3), + 'h': pd.date_range('20130101', + periods=3, + tz='US/Eastern')} + ) + + df.index = index(3) + df.index.name = 'foo' + df.columns.name = 'bar' + result = df.to_xarray() + assert result.dims['foo'] == 3 + assert len(result.coords) == 1 + assert len(result.data_vars) == 8 + assert_almost_equal(list(result.coords.keys()), ['foo']) + assert isinstance(result, Dataset) + + # idempotency + # categoricals are not preserved + # datetimes w/tz are not preserved + # column names are lost + expected = df.copy() + expected['f'] = expected['f'].astype(object) + expected['h'] = expected['h'].astype('datetime64[ns]') + expected.columns.name = None + assert_frame_equal(result.to_dataframe(), expected, + check_index_type=False, check_categorical=False) + + def test_to_xarray(self): + tm._skip_if_no_xarray() + from xarray import Dataset + + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0, dtype='float64'), + 'e': [True, False, True], + 'f': pd.Categorical(list('abc')), + 'g': pd.date_range('20130101', periods=3), + 'h': pd.date_range('20130101', + periods=3, + tz='US/Eastern')} + ) + + df.index.name = 'foo' + result = df[0:0].to_xarray() + assert result.dims['foo'] == 0 + assert isinstance(result, Dataset) + + # available in 0.7.1 + # MultiIndex + df.index = pd.MultiIndex.from_product([['a'], range(3)], + names=['one', 'two']) + result = df.to_xarray() + assert result.dims['one'] == 1 + assert result.dims['two'] == 3 + assert len(result.coords) == 2 + assert len(result.data_vars) == 8 + assert_almost_equal(list(result.coords.keys()), ['one', 'two']) + assert isinstance(result, Dataset) + + result = result.to_dataframe() + expected = df.copy() + expected['f'] = expected['f'].astype(object) + expected['h'] = expected['h'].astype('datetime64[ns]') + expected.columns.name = None + assert_frame_equal(result, + expected, + check_index_type=False) + + def test_deepcopy_empty(self): + # This test covers empty frame copying with non-empty column sets + # as reported in issue GH15370 + empty_frame = DataFrame(data=[], index=[], columns=['A']) + empty_frame_copy = deepcopy(empty_frame) + + self._compare(empty_frame_copy, empty_frame) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py new file mode 100644 index 0000000000000..a37c1649e5677 --- /dev/null +++ b/pandas/tests/generic/test_generic.py @@ -0,0 +1,1026 @@ +# -*- coding: utf-8 -*- +# pylint: disable-msg=E1101,W0612 + +from copy import copy, deepcopy +from warnings import catch_warnings + +import pytest +import numpy as np +import pandas as pd + +from pandas.core.dtypes.common import is_scalar +from pandas import (Series, DataFrame, Panel, + date_range, Panel4D, + MultiIndex) + +import pandas.io.formats.printing as printing + +from pandas.compat import range, zip, PY3 +from pandas.util.testing import (assert_raises_regex, + assert_series_equal, + assert_panel_equal, + assert_frame_equal) + +import pandas.util.testing as tm + + +# ---------------------------------------------------------------------- +# Generic types test cases + +class Generic(object): + + @property + def _ndim(self): + return self._typ._AXIS_LEN + + def _axes(self): + """ return the axes for my object typ """ + return self._typ._AXIS_ORDERS + + def _construct(self, shape, value=None, dtype=None, **kwargs): + """ construct an object for the given shape + if value is specified use that if its a scalar + if value is an array, repeat it as needed """ + + if isinstance(shape, int): + shape = tuple([shape] * self._ndim) + if value is not None: + if is_scalar(value): + if value == 'empty': + arr = None + + # remove the info axis + kwargs.pop(self._typ._info_axis_name, None) + else: + arr = np.empty(shape, dtype=dtype) + arr.fill(value) + else: + fshape = np.prod(shape) + arr = value.ravel() + new_shape = fshape / arr.shape[0] + if fshape % arr.shape[0] != 0: + raise Exception("invalid value passed in _construct") + + arr = np.repeat(arr, new_shape).reshape(shape) + else: + arr = np.random.randn(*shape) + return self._typ(arr, dtype=dtype, **kwargs) + + def _compare(self, result, expected): + self._comparator(result, expected) + + def test_rename(self): + + # single axis + idx = list('ABCD') + # relabeling values passed into self.rename + args = [ + str.lower, + {x: x.lower() for x in idx}, + Series({x: x.lower() for x in idx}), + ] + + for axis in self._axes(): + kwargs = {axis: idx} + obj = self._construct(4, **kwargs) + + for arg in args: + # rename a single axis + result = obj.rename(**{axis: arg}) + expected = obj.copy() + setattr(expected, axis, list('abcd')) + self._compare(result, expected) + + # multiple axes at once + + def test_get_numeric_data(self): + + n = 4 + kwargs = {} + for i in range(self._ndim): + kwargs[self._typ._AXIS_NAMES[i]] = list(range(n)) + + # get the numeric data + o = self._construct(n, **kwargs) + result = o._get_numeric_data() + self._compare(result, o) + + # non-inclusion + result = o._get_bool_data() + expected = self._construct(n, value='empty', **kwargs) + self._compare(result, expected) + + # get the bool data + arr = np.array([True, True, False, True]) + o = self._construct(n, value=arr, **kwargs) + result = o._get_numeric_data() + self._compare(result, o) + + # _get_numeric_data is includes _get_bool_data, so can't test for + # non-inclusion + + def test_get_default(self): + + # GH 7725 + d0 = "a", "b", "c", "d" + d1 = np.arange(4, dtype='int64') + others = "e", 10 + + for data, index in ((d0, d1), (d1, d0)): + s = Series(data, index=index) + for i, d in zip(index, data): + assert s.get(i) == d + assert s.get(i, d) == d + assert s.get(i, "z") == d + for other in others: + assert s.get(other, "z") == "z" + assert s.get(other, other) == other + + def test_nonzero(self): + + # GH 4633 + # look at the boolean/nonzero behavior for objects + obj = self._construct(shape=4) + pytest.raises(ValueError, lambda: bool(obj == 0)) + pytest.raises(ValueError, lambda: bool(obj == 1)) + pytest.raises(ValueError, lambda: bool(obj)) + + obj = self._construct(shape=4, value=1) + pytest.raises(ValueError, lambda: bool(obj == 0)) + pytest.raises(ValueError, lambda: bool(obj == 1)) + pytest.raises(ValueError, lambda: bool(obj)) + + obj = self._construct(shape=4, value=np.nan) + pytest.raises(ValueError, lambda: bool(obj == 0)) + pytest.raises(ValueError, lambda: bool(obj == 1)) + pytest.raises(ValueError, lambda: bool(obj)) + + # empty + obj = self._construct(shape=0) + pytest.raises(ValueError, lambda: bool(obj)) + + # invalid behaviors + + obj1 = self._construct(shape=4, value=1) + obj2 = self._construct(shape=4, value=1) + + def f(): + if obj1: + printing.pprint_thing("this works and shouldn't") + + pytest.raises(ValueError, f) + pytest.raises(ValueError, lambda: obj1 and obj2) + pytest.raises(ValueError, lambda: obj1 or obj2) + pytest.raises(ValueError, lambda: not obj1) + + def test_downcast(self): + # test close downcasting + + o = self._construct(shape=4, value=9, dtype=np.int64) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, o) + + o = self._construct(shape=4, value=9.) + expected = o.astype(np.int64) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, expected) + + o = self._construct(shape=4, value=9.5) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, o) + + # are close + o = self._construct(shape=4, value=9.000000000005) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + expected = o.astype(np.int64) + self._compare(result, expected) + + def test_constructor_compound_dtypes(self): + # GH 5191 + # compound dtypes should raise not-implementederror + + def f(dtype): + return self._construct(shape=3, dtype=dtype) + + pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"), + ("B", "str"), + ("C", "int32")]) + + # these work (though results may be unexpected) + f('int64') + f('float64') + f('M8[ns]') + + def check_metadata(self, x, y=None): + for m in x._metadata: + v = getattr(x, m, None) + if y is None: + assert v is None + else: + assert v == getattr(y, m, None) + + def test_metadata_propagation(self): + # check that the metadata matches up on the resulting ops + + o = self._construct(shape=3) + o.name = 'foo' + o2 = self._construct(shape=3) + o2.name = 'bar' + + # TODO + # Once panel can do non-trivial combine operations + # (currently there is an a raise in the Panel arith_ops to prevent + # this, though it actually does work) + # can remove all of these try: except: blocks on the actual operations + + # ---------- + # preserving + # ---------- + + # simple ops with scalars + for op in ['__add__', '__sub__', '__truediv__', '__mul__']: + result = getattr(o, op)(1) + self.check_metadata(o, result) + + # ops with like + for op in ['__add__', '__sub__', '__truediv__', '__mul__']: + try: + result = getattr(o, op)(o) + self.check_metadata(o, result) + except (ValueError, AttributeError): + pass + + # simple boolean + for op in ['__eq__', '__le__', '__ge__']: + v1 = getattr(o, op)(o) + self.check_metadata(o, v1) + + try: + self.check_metadata(o, v1 & v1) + except (ValueError): + pass + + try: + self.check_metadata(o, v1 | v1) + except (ValueError): + pass + + # combine_first + try: + result = o.combine_first(o2) + self.check_metadata(o, result) + except (AttributeError): + pass + + # --------------------------- + # non-preserving (by default) + # --------------------------- + + # add non-like + try: + result = o + o2 + self.check_metadata(result) + except (ValueError, AttributeError): + pass + + # simple boolean + for op in ['__eq__', '__le__', '__ge__']: + + # this is a name matching op + v1 = getattr(o, op)(o) + + v2 = getattr(o, op)(o2) + self.check_metadata(v2) + + try: + self.check_metadata(v1 & v2) + except (ValueError): + pass + + try: + self.check_metadata(v1 | v2) + except (ValueError): + pass + + def test_head_tail(self): + # GH5370 + + o = self._construct(shape=10) + + # check all index types + for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex, + tm.makeUnicodeIndex, tm.makeDateIndex, + tm.makePeriodIndex]: + axis = o._get_axis_name(0) + setattr(o, axis, index(len(getattr(o, axis)))) + + # Panel + dims + try: + o.head() + except (NotImplementedError): + pytest.skip('not implemented on {0}'.format( + o.__class__.__name__)) + + self._compare(o.head(), o.iloc[:5]) + self._compare(o.tail(), o.iloc[-5:]) + + # 0-len + self._compare(o.head(0), o.iloc[0:0]) + self._compare(o.tail(0), o.iloc[0:0]) + + # bounded + self._compare(o.head(len(o) + 1), o) + self._compare(o.tail(len(o) + 1), o) + + # neg index + self._compare(o.head(-3), o.head(7)) + self._compare(o.tail(-3), o.tail(7)) + + def test_sample(self): + # Fixes issue: 2419 + + o = self._construct(shape=10) + + ### + # Check behavior of random_state argument + ### + + # Check for stability when receives seed or random state -- run 10 + # times. + for test in range(10): + seed = np.random.randint(0, 100) + self._compare( + o.sample(n=4, random_state=seed), o.sample(n=4, + random_state=seed)) + self._compare( + o.sample(frac=0.7, random_state=seed), o.sample( + frac=0.7, random_state=seed)) + + self._compare( + o.sample(n=4, random_state=np.random.RandomState(test)), + o.sample(n=4, random_state=np.random.RandomState(test))) + + self._compare( + o.sample(frac=0.7, random_state=np.random.RandomState(test)), + o.sample(frac=0.7, random_state=np.random.RandomState(test))) + + os1, os2 = [], [] + for _ in range(2): + np.random.seed(test) + os1.append(o.sample(n=4)) + os2.append(o.sample(frac=0.7)) + self._compare(*os1) + self._compare(*os2) + + # Check for error when random_state argument invalid. + with pytest.raises(ValueError): + o.sample(random_state='astring!') + + ### + # Check behavior of `frac` and `N` + ### + + # Giving both frac and N throws error + with pytest.raises(ValueError): + o.sample(n=3, frac=0.3) + + # Check that raises right error for negative lengths + with pytest.raises(ValueError): + o.sample(n=-3) + with pytest.raises(ValueError): + o.sample(frac=-0.3) + + # Make sure float values of `n` give error + with pytest.raises(ValueError): + o.sample(n=3.2) + + # Check lengths are right + assert len(o.sample(n=4) == 4) + assert len(o.sample(frac=0.34) == 3) + assert len(o.sample(frac=0.36) == 4) + + ### + # Check weights + ### + + # Weight length must be right + with pytest.raises(ValueError): + o.sample(n=3, weights=[0, 1]) + + with pytest.raises(ValueError): + bad_weights = [0.5] * 11 + o.sample(n=3, weights=bad_weights) + + with pytest.raises(ValueError): + bad_weight_series = Series([0, 0, 0.2]) + o.sample(n=4, weights=bad_weight_series) + + # Check won't accept negative weights + with pytest.raises(ValueError): + bad_weights = [-0.1] * 10 + o.sample(n=3, weights=bad_weights) + + # Check inf and -inf throw errors: + with pytest.raises(ValueError): + weights_with_inf = [0.1] * 10 + weights_with_inf[0] = np.inf + o.sample(n=3, weights=weights_with_inf) + + with pytest.raises(ValueError): + weights_with_ninf = [0.1] * 10 + weights_with_ninf[0] = -np.inf + o.sample(n=3, weights=weights_with_ninf) + + # All zeros raises errors + zero_weights = [0] * 10 + with pytest.raises(ValueError): + o.sample(n=3, weights=zero_weights) + + # All missing weights + nan_weights = [np.nan] * 10 + with pytest.raises(ValueError): + o.sample(n=3, weights=nan_weights) + + # Check np.nan are replaced by zeros. + weights_with_nan = [np.nan] * 10 + weights_with_nan[5] = 0.5 + self._compare( + o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) + + # Check None are also replaced by zeros. + weights_with_None = [None] * 10 + weights_with_None[5] = 0.5 + self._compare( + o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) + + def test_size_compat(self): + # GH8846 + # size property should be defined + + o = self._construct(shape=10) + assert o.size == np.prod(o.shape) + assert o.size == 10 ** len(o.axes) + + def test_split_compat(self): + # xref GH8846 + o = self._construct(shape=10) + assert len(np.array_split(o, 5)) == 5 + assert len(np.array_split(o, 2)) == 2 + + def test_unexpected_keyword(self): # GH8597 + df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe']) + ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) + ts = df['joe'].copy() + ts[2] = np.nan + + with assert_raises_regex(TypeError, 'unexpected keyword'): + df.drop('joe', axis=1, in_place=True) + + with assert_raises_regex(TypeError, 'unexpected keyword'): + df.reindex([1, 0], inplace=True) + + with assert_raises_regex(TypeError, 'unexpected keyword'): + ca.fillna(0, inplace=True) + + with assert_raises_regex(TypeError, 'unexpected keyword'): + ts.fillna(0, in_place=True) + + # See gh-12301 + def test_stat_unexpected_keyword(self): + obj = self._construct(5) + starwars = 'Star Wars' + errmsg = 'unexpected keyword' + + with assert_raises_regex(TypeError, errmsg): + obj.max(epic=starwars) # stat_function + with assert_raises_regex(TypeError, errmsg): + obj.var(epic=starwars) # stat_function_ddof + with assert_raises_regex(TypeError, errmsg): + obj.sum(epic=starwars) # cum_function + with assert_raises_regex(TypeError, errmsg): + obj.any(epic=starwars) # logical_function + + def test_api_compat(self): + + # GH 12021 + # compat for __name__, __qualname__ + + obj = self._construct(5) + for func in ['sum', 'cumsum', 'any', 'var']: + f = getattr(obj, func) + assert f.__name__ == func + if PY3: + assert f.__qualname__.endswith(func) + + def test_stat_non_defaults_args(self): + obj = self._construct(5) + out = np.array([0]) + errmsg = "the 'out' parameter is not supported" + + with assert_raises_regex(ValueError, errmsg): + obj.max(out=out) # stat_function + with assert_raises_regex(ValueError, errmsg): + obj.var(out=out) # stat_function_ddof + with assert_raises_regex(ValueError, errmsg): + obj.sum(out=out) # cum_function + with assert_raises_regex(ValueError, errmsg): + obj.any(out=out) # logical_function + + def test_truncate_out_of_bounds(self): + # GH11382 + + # small + shape = [int(2e3)] + ([1] * (self._ndim - 1)) + small = self._construct(shape, dtype='int8') + self._compare(small.truncate(), small) + self._compare(small.truncate(before=0, after=3e3), small) + self._compare(small.truncate(before=-1, after=2e3), small) + + # big + shape = [int(2e6)] + ([1] * (self._ndim - 1)) + big = self._construct(shape, dtype='int8') + self._compare(big.truncate(), big) + self._compare(big.truncate(before=0, after=3e6), big) + self._compare(big.truncate(before=-1, after=2e6), big) + + def test_validate_bool_args(self): + df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) + invalid_values = [1, "True", [1, 2, 3], 5.0] + + for value in invalid_values: + with pytest.raises(ValueError): + super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'}, + axis=1, inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).drop('a', axis=1, inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).sort_index(inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df)._consolidate(inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).fillna(value=0, inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).replace(to_replace=1, value=7, + inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).interpolate(inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df)._where(cond=df.a > 2, inplace=value) + + with pytest.raises(ValueError): + super(DataFrame, df).mask(cond=df.a > 2, inplace=value) + + def test_copy_and_deepcopy(self): + # GH 15444 + for shape in [0, 1, 2]: + obj = self._construct(shape) + for func in [copy, + deepcopy, + lambda x: x.copy(deep=False), + lambda x: x.copy(deep=True)]: + obj_copy = func(obj) + assert obj_copy is not obj + self._compare(obj_copy, obj) + + +class TestNDFrame(object): + # tests that don't fit elsewhere + + def test_sample(sel): + # Fixes issue: 2419 + # additional specific object based tests + + # A few dataframe test with degenerate weights. + easy_weight_list = [0] * 10 + easy_weight_list[5] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10, + 'easyweights': easy_weight_list}) + sample1 = df.sample(n=1, weights='easyweights') + assert_frame_equal(sample1, df.iloc[5:6]) + + # Ensure proper error if string given as weight for Series, panel, or + # DataFrame with axis = 1. + s = Series(range(10)) + with pytest.raises(ValueError): + s.sample(n=3, weights='weight_column') + + with catch_warnings(record=True): + panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4], + minor_axis=[3, 4, 5]) + with pytest.raises(ValueError): + panel.sample(n=1, weights='weight_column') + + with pytest.raises(ValueError): + df.sample(n=1, weights='weight_column', axis=1) + + # Check weighting key error + with pytest.raises(KeyError): + df.sample(n=3, weights='not_a_real_column_name') + + # Check that re-normalizes weights that don't sum to one. + weights_less_than_1 = [0] * 10 + weights_less_than_1[0] = 0.5 + tm.assert_frame_equal( + df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) + + ### + # Test axis argument + ### + + # Test axis argument + df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) + second_column_weight = [0, 1] + assert_frame_equal( + df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) + + # Different axis arg types + assert_frame_equal(df.sample(n=1, axis='columns', + weights=second_column_weight), + df[['col2']]) + + weight = [0] * 10 + weight[5] = 0.5 + assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), + df.iloc[5:6]) + assert_frame_equal(df.sample(n=1, axis='index', weights=weight), + df.iloc[5:6]) + + # Check out of range axis values + with pytest.raises(ValueError): + df.sample(n=1, axis=2) + + with pytest.raises(ValueError): + df.sample(n=1, axis='not_a_name') + + with pytest.raises(ValueError): + s = pd.Series(range(10)) + s.sample(n=1, axis=1) + + # Test weight length compared to correct axis + with pytest.raises(ValueError): + df.sample(n=1, axis=1, weights=[0.5] * 10) + + # Check weights with axis = 1 + easy_weight_list = [0] * 3 + easy_weight_list[2] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10}) + sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) + assert_frame_equal(sample1, df[['colString']]) + + # Test default axes + with catch_warnings(record=True): + p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6], + minor_axis=[1, 3, 5]) + assert_panel_equal( + p.sample(n=3, random_state=42), p.sample(n=3, axis=1, + random_state=42)) + assert_frame_equal( + df.sample(n=3, random_state=42), df.sample(n=3, axis=0, + random_state=42)) + + # Test that function aligns weights with frame + df = DataFrame( + {'col1': [5, 6, 7], + 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) + s = Series([1, 0, 0], index=[3, 5, 9]) + assert_frame_equal(df.loc[[3]], df.sample(1, weights=s)) + + # Weights have index values to be dropped because not in + # sampled DataFrame + s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) + assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2)) + + # Weights have empty values to be filed with zeros + s3 = Series([0.01, 0], index=[3, 5]) + assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3)) + + # No overlap in weight and sampled DataFrame indices + s4 = Series([1, 0], index=[1, 2]) + with pytest.raises(ValueError): + df.sample(1, weights=s4) + + def test_squeeze(self): + # noop + for s in [tm.makeFloatSeries(), tm.makeStringSeries(), + tm.makeObjectSeries()]: + tm.assert_series_equal(s.squeeze(), s) + for df in [tm.makeTimeDataFrame()]: + tm.assert_frame_equal(df.squeeze(), df) + with catch_warnings(record=True): + for p in [tm.makePanel()]: + tm.assert_panel_equal(p.squeeze(), p) + with catch_warnings(record=True): + for p4d in [tm.makePanel4D()]: + tm.assert_panel4d_equal(p4d.squeeze(), p4d) + + # squeezing + df = tm.makeTimeDataFrame().reindex(columns=['A']) + tm.assert_series_equal(df.squeeze(), df['A']) + + with catch_warnings(record=True): + p = tm.makePanel().reindex(items=['ItemA']) + tm.assert_frame_equal(p.squeeze(), p['ItemA']) + + p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A']) + tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A']) + + with catch_warnings(record=True): + p4d = tm.makePanel4D().reindex(labels=['label1']) + tm.assert_panel_equal(p4d.squeeze(), p4d['label1']) + + with catch_warnings(record=True): + p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA']) + tm.assert_frame_equal(p4d.squeeze(), p4d.loc['label1', 'ItemA']) + + # don't fail with 0 length dimensions GH11229 & GH8999 + empty_series = Series([], name='five') + empty_frame = DataFrame([empty_series]) + with catch_warnings(record=True): + empty_panel = Panel({'six': empty_frame}) + + [tm.assert_series_equal(empty_series, higher_dim.squeeze()) + for higher_dim in [empty_series, empty_frame, empty_panel]] + + # axis argument + df = tm.makeTimeDataFrame(nper=1).iloc[:, :1] + assert df.shape == (1, 1) + tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) + tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0]) + assert df.squeeze() == df.iloc[0, 0] + pytest.raises(ValueError, df.squeeze, axis=2) + pytest.raises(ValueError, df.squeeze, axis='x') + + df = tm.makeTimeDataFrame(3) + tm.assert_frame_equal(df.squeeze(axis=0), df) + + def test_numpy_squeeze(self): + s = tm.makeFloatSeries() + tm.assert_series_equal(np.squeeze(s), s) + + df = tm.makeTimeDataFrame().reindex(columns=['A']) + tm.assert_series_equal(np.squeeze(df), df['A']) + + def test_transpose(self): + msg = (r"transpose\(\) got multiple values for " + r"keyword argument 'axes'") + for s in [tm.makeFloatSeries(), tm.makeStringSeries(), + tm.makeObjectSeries()]: + # calls implementation in pandas/core/base.py + tm.assert_series_equal(s.transpose(), s) + for df in [tm.makeTimeDataFrame()]: + tm.assert_frame_equal(df.transpose().transpose(), df) + + with catch_warnings(record=True): + for p in [tm.makePanel()]: + tm.assert_panel_equal(p.transpose(2, 0, 1) + .transpose(1, 2, 0), p) + tm.assert_raises_regex(TypeError, msg, p.transpose, + 2, 0, 1, axes=(2, 0, 1)) + + with catch_warnings(record=True): + for p4d in [tm.makePanel4D()]: + tm.assert_panel4d_equal(p4d.transpose(2, 0, 3, 1) + .transpose(1, 3, 0, 2), p4d) + tm.assert_raises_regex(TypeError, msg, p4d.transpose, + 2, 0, 3, 1, axes=(2, 0, 3, 1)) + + def test_numpy_transpose(self): + msg = "the 'axes' parameter is not supported" + + s = tm.makeFloatSeries() + tm.assert_series_equal( + np.transpose(s), s) + tm.assert_raises_regex(ValueError, msg, + np.transpose, s, axes=1) + + df = tm.makeTimeDataFrame() + tm.assert_frame_equal(np.transpose( + np.transpose(df)), df) + tm.assert_raises_regex(ValueError, msg, + np.transpose, df, axes=1) + + with catch_warnings(record=True): + p = tm.makePanel() + tm.assert_panel_equal(np.transpose( + np.transpose(p, axes=(2, 0, 1)), + axes=(1, 2, 0)), p) + + with catch_warnings(record=True): + p4d = tm.makePanel4D() + tm.assert_panel4d_equal(np.transpose( + np.transpose(p4d, axes=(2, 0, 3, 1)), + axes=(1, 3, 0, 2)), p4d) + + def test_take(self): + indices = [1, 5, -2, 6, 3, -1] + for s in [tm.makeFloatSeries(), tm.makeStringSeries(), + tm.makeObjectSeries()]: + out = s.take(indices) + expected = Series(data=s.values.take(indices), + index=s.index.take(indices), dtype=s.dtype) + tm.assert_series_equal(out, expected) + for df in [tm.makeTimeDataFrame()]: + out = df.take(indices) + expected = DataFrame(data=df.values.take(indices, axis=0), + index=df.index.take(indices), + columns=df.columns) + tm.assert_frame_equal(out, expected) + + indices = [-3, 2, 0, 1] + with catch_warnings(record=True): + for p in [tm.makePanel()]: + out = p.take(indices) + expected = Panel(data=p.values.take(indices, axis=0), + items=p.items.take(indices), + major_axis=p.major_axis, + minor_axis=p.minor_axis) + tm.assert_panel_equal(out, expected) + + with catch_warnings(record=True): + for p4d in [tm.makePanel4D()]: + out = p4d.take(indices) + expected = Panel4D(data=p4d.values.take(indices, axis=0), + labels=p4d.labels.take(indices), + major_axis=p4d.major_axis, + minor_axis=p4d.minor_axis, + items=p4d.items) + tm.assert_panel4d_equal(out, expected) + + def test_take_invalid_kwargs(self): + indices = [-3, 2, 0, 1] + s = tm.makeFloatSeries() + df = tm.makeTimeDataFrame() + + with catch_warnings(record=True): + p = tm.makePanel() + p4d = tm.makePanel4D() + + for obj in (s, df, p, p4d): + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, obj.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, obj.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, obj.take, + indices, mode='clip') + + def test_equals(self): + s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) + s2 = s1.copy() + assert s1.equals(s2) + + s1[1] = 99 + assert not s1.equals(s2) + + # NaNs compare as equal + s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) + s2 = s1.copy() + assert s1.equals(s2) + + s2[0] = 9.9 + assert not s1.equals(s2) + + idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) + s1 = Series([1, 2, np.nan], index=idx) + s2 = s1.copy() + assert s1.equals(s2) + + # Add object dtype column with nans + index = np.random.random(10) + df1 = DataFrame( + np.random.random(10, ), index=index, columns=['floats']) + df1['text'] = 'the sky is so blue. we could use more chocolate.'.split( + ) + df1['start'] = date_range('2000-1-1', periods=10, freq='T') + df1['end'] = date_range('2000-1-1', periods=10, freq='D') + df1['diff'] = df1['end'] - df1['start'] + df1['bool'] = (np.arange(10) % 3 == 0) + df1.loc[::2] = np.nan + df2 = df1.copy() + assert df1['text'].equals(df2['text']) + assert df1['start'].equals(df2['start']) + assert df1['end'].equals(df2['end']) + assert df1['diff'].equals(df2['diff']) + assert df1['bool'].equals(df2['bool']) + assert df1.equals(df2) + assert not df1.equals(object) + + # different dtype + different = df1.copy() + different['floats'] = different['floats'].astype('float32') + assert not df1.equals(different) + + # different index + different_index = -index + different = df2.set_index(different_index) + assert not df1.equals(different) + + # different columns + different = df2.copy() + different.columns = df2.columns[::-1] + assert not df1.equals(different) + + # DatetimeIndex + index = pd.date_range('2000-1-1', periods=10, freq='T') + df1 = df1.set_index(index) + df2 = df1.copy() + assert df1.equals(df2) + + # MultiIndex + df3 = df1.set_index(['text'], append=True) + df2 = df1.set_index(['text'], append=True) + assert df3.equals(df2) + + df2 = df1.set_index(['floats'], append=True) + assert not df3.equals(df2) + + # NaN in index + df3 = df1.set_index(['floats'], append=True) + df2 = df1.set_index(['floats'], append=True) + assert df3.equals(df2) + + # GH 8437 + a = pd.Series([False, np.nan]) + b = pd.Series([False, np.nan]) + c = pd.Series(index=range(2)) + d = pd.Series(index=range(2)) + e = pd.Series(index=range(2)) + f = pd.Series(index=range(2)) + c[:-1] = d[:-1] = e[0] = f[0] = False + assert a.equals(a) + assert a.equals(b) + assert a.equals(c) + assert a.equals(d) + assert a.equals(e) + assert e.equals(f) + + def test_describe_raises(self): + with catch_warnings(record=True): + with pytest.raises(NotImplementedError): + tm.makePanel().describe() + + def test_pipe(self): + df = DataFrame({'A': [1, 2, 3]}) + f = lambda x, y: x ** y + result = df.pipe(f, 2) + expected = DataFrame({'A': [1, 4, 9]}) + assert_frame_equal(result, expected) + + result = df.A.pipe(f, 2) + assert_series_equal(result, expected.A) + + def test_pipe_tuple(self): + df = DataFrame({'A': [1, 2, 3]}) + f = lambda x, y: y + result = df.pipe((f, 'y'), 0) + assert_frame_equal(result, df) + + result = df.A.pipe((f, 'y'), 0) + assert_series_equal(result, df.A) + + def test_pipe_tuple_error(self): + df = DataFrame({"A": [1, 2, 3]}) + f = lambda x, y: y + with pytest.raises(ValueError): + df.pipe((f, 'y'), x=1, y=0) + + with pytest.raises(ValueError): + df.A.pipe((f, 'y'), x=1, y=0) + + def test_pipe_panel(self): + with catch_warnings(record=True): + wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})}) + f = lambda x, y: x + y + result = wp.pipe(f, 2) + expected = wp + 2 + assert_panel_equal(result, expected) + + result = wp.pipe((f, 'y'), x=1) + expected = wp + 1 + assert_panel_equal(result, expected) + + with pytest.raises(ValueError): + result = wp.pipe((f, 'y'), x=1, y=1) diff --git a/pandas/tests/generic/test_panel.py b/pandas/tests/generic/test_panel.py new file mode 100644 index 0000000000000..b1d9af9c8b0af --- /dev/null +++ b/pandas/tests/generic/test_panel.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# pylint: disable-msg=E1101,W0612 + +from warnings import catch_warnings + +import pytest + +from pandas import Panel, Panel4D +from pandas.util.testing import (assert_panel_equal, + assert_panel4d_equal, + assert_almost_equal) + +import pandas.util.testing as tm +from .test_generic import Generic + + +class TestPanel(Generic): + _typ = Panel + _comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True) + + def test_to_xarray(self): + + tm._skip_if_no_xarray() + from xarray import DataArray + + with catch_warnings(record=True): + p = tm.makePanel() + + result = p.to_xarray() + assert isinstance(result, DataArray) + assert len(result.coords) == 3 + assert_almost_equal(list(result.coords.keys()), + ['items', 'major_axis', 'minor_axis']) + assert len(result.dims) == 3 + + # idempotency + assert_panel_equal(result.to_pandas(), p) + + +class TestPanel4D(Generic): + _typ = Panel4D + _comparator = lambda self, x, y: assert_panel4d_equal(x, y, by_blocks=True) + + def test_sample(self): + pytest.skip("sample on Panel4D") + + def test_to_xarray(self): + + tm._skip_if_no_xarray() + from xarray import DataArray + + with catch_warnings(record=True): + p = tm.makePanel4D() + + result = p.to_xarray() + assert isinstance(result, DataArray) + assert len(result.coords) == 4 + assert_almost_equal(list(result.coords.keys()), + ['labels', 'items', 'major_axis', + 'minor_axis']) + assert len(result.dims) == 4 + + # non-convertible + pytest.raises(ValueError, lambda: result.to_pandas()) + + +# run all the tests, but wrap each in a warning catcher +for t in ['test_rename', 'test_get_numeric_data', + 'test_get_default', 'test_nonzero', + 'test_downcast', 'test_constructor_compound_dtypes', + 'test_head_tail', + 'test_size_compat', 'test_split_compat', + 'test_unexpected_keyword', + 'test_stat_unexpected_keyword', 'test_api_compat', + 'test_stat_non_defaults_args', + 'test_truncate_out_of_bounds', + 'test_metadata_propagation', 'test_copy_and_deepcopy', + 'test_sample']: + + def f(): + def tester(self): + f = getattr(super(TestPanel, self), t) + with catch_warnings(record=True): + f() + return tester + + setattr(TestPanel, t, f()) + + def f(): + def tester(self): + f = getattr(super(TestPanel4D, self), t) + with catch_warnings(record=True): + f() + return tester + + setattr(TestPanel4D, t, f()) diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py new file mode 100644 index 0000000000000..4773ff69e0982 --- /dev/null +++ b/pandas/tests/generic/test_series.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# pylint: disable-msg=E1101,W0612 + +from operator import methodcaller + +import pytest +import numpy as np +import pandas as pd + +from distutils.version import LooseVersion +from pandas import Series, date_range, MultiIndex + +from pandas.compat import range +from pandas.util.testing import (assert_series_equal, + assert_almost_equal) + +import pandas.util.testing as tm +from .test_generic import Generic + +try: + import xarray + _XARRAY_INSTALLED = True +except ImportError: + _XARRAY_INSTALLED = False + + +class TestSeries(Generic): + _typ = Series + _comparator = lambda self, x, y: assert_series_equal(x, y) + + def setup_method(self): + self.ts = tm.makeTimeSeries() # Was at top level in test_series + self.ts.name = 'ts' + + self.series = tm.makeStringSeries() + self.series.name = 'series' + + def test_rename_mi(self): + s = Series([11, 21, 31], + index=MultiIndex.from_tuples( + [("A", x) for x in ["a", "B", "c"]])) + s.rename(str.lower) + + def test_set_axis_name(self): + s = Series([1, 2, 3], index=['a', 'b', 'c']) + funcs = ['rename_axis', '_set_axis_name'] + name = 'foo' + for func in funcs: + result = methodcaller(func, name)(s) + assert s.index.name is None + assert result.index.name == name + + def test_set_axis_name_mi(self): + s = Series([11, 21, 31], index=MultiIndex.from_tuples( + [("A", x) for x in ["a", "B", "c"]], + names=['l1', 'l2']) + ) + funcs = ['rename_axis', '_set_axis_name'] + for func in funcs: + result = methodcaller(func, ['L1', 'L2'])(s) + assert s.index.name is None + assert s.index.names == ['l1', 'l2'] + assert result.index.name is None + assert result.index.names, ['L1', 'L2'] + + def test_set_axis_name_raises(self): + s = pd.Series([1]) + with pytest.raises(ValueError): + s._set_axis_name(name='a', axis=1) + + def test_get_numeric_data_preserve_dtype(self): + + # get the numeric data + o = Series([1, 2, 3]) + result = o._get_numeric_data() + self._compare(result, o) + + o = Series([1, '2', 3.]) + result = o._get_numeric_data() + expected = Series([], dtype=object, index=pd.Index([], dtype=object)) + self._compare(result, expected) + + o = Series([True, False, True]) + result = o._get_numeric_data() + self._compare(result, o) + + o = Series([True, False, True]) + result = o._get_bool_data() + self._compare(result, o) + + o = Series(date_range('20130101', periods=3)) + result = o._get_numeric_data() + expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object)) + self._compare(result, expected) + + def test_nonzero_single_element(self): + + # allow single item via bool method + s = Series([True]) + assert s.bool() + + s = Series([False]) + assert not s.bool() + + # single item nan to raise + for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), + Series([False])]: + pytest.raises(ValueError, lambda: bool(s)) + + for s in [Series([np.nan]), Series([pd.NaT])]: + pytest.raises(ValueError, lambda: s.bool()) + + # multiple bool are still an error + for s in [Series([True, True]), Series([False, False])]: + pytest.raises(ValueError, lambda: bool(s)) + pytest.raises(ValueError, lambda: s.bool()) + + # single non-bool are an error + for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: + pytest.raises(ValueError, lambda: bool(s)) + pytest.raises(ValueError, lambda: s.bool()) + + def test_metadata_propagation_indiv(self): + # check that the metadata matches up on the resulting ops + + o = Series(range(3), range(3)) + o.name = 'foo' + o2 = Series(range(3), range(3)) + o2.name = 'bar' + + result = o.T + self.check_metadata(o, result) + + # resample + ts = Series(np.random.rand(1000), + index=date_range('20130101', periods=1000, freq='s'), + name='foo') + result = ts.resample('1T').mean() + self.check_metadata(ts, result) + + result = ts.resample('1T').min() + self.check_metadata(ts, result) + + result = ts.resample('1T').apply(lambda x: x.sum()) + self.check_metadata(ts, result) + + _metadata = Series._metadata + _finalize = Series.__finalize__ + Series._metadata = ['name', 'filename'] + o.filename = 'foo' + o2.filename = 'bar' + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == 'concat' and name == 'filename': + value = '+'.join([getattr( + o, name) for o in other.objs if getattr(o, name, None) + ]) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + Series.__finalize__ = finalize + + result = pd.concat([o, o2]) + assert result.filename == 'foo+bar' + assert result.name is None + + # reset + Series._metadata = _metadata + Series.__finalize__ = _finalize + + @pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and + LooseVersion(xarray.__version__) < '0.10.0', + reason='xarray >= 0.10.0 required') + @pytest.mark.parametrize( + "index", + ['FloatIndex', 'IntIndex', + 'StringIndex', 'UnicodeIndex', + 'DateIndex', 'PeriodIndex', + 'TimedeltaIndex', 'CategoricalIndex']) + def test_to_xarray_index_types(self, index): + from xarray import DataArray + + index = getattr(tm, 'make{}'.format(index)) + s = Series(range(6), index=index(6)) + s.index.name = 'foo' + result = s.to_xarray() + repr(result) + assert len(result) == 6 + assert len(result.coords) == 1 + assert_almost_equal(list(result.coords.keys()), ['foo']) + assert isinstance(result, DataArray) + + # idempotency + assert_series_equal(result.to_series(), s, + check_index_type=False, + check_categorical=True) + + def test_to_xarray(self): + + tm._skip_if_no_xarray() + from xarray import DataArray + + s = Series([]) + s.index.name = 'foo' + result = s.to_xarray() + assert len(result) == 0 + assert len(result.coords) == 1 + assert_almost_equal(list(result.coords.keys()), ['foo']) + assert isinstance(result, DataArray) + + s = Series(range(6)) + s.index.name = 'foo' + s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)], + names=['one', 'two']) + result = s.to_xarray() + assert len(result) == 2 + assert_almost_equal(list(result.coords.keys()), ['one', 'two']) + assert isinstance(result, DataArray) + assert_series_equal(result.to_series(), s) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dec67bbea854f..a13ecef5dd1bf 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -526,7 +526,7 @@ def get_locales(prefix=None, normalize=True, """ try: raw_locales = locale_getter() - except: + except Exception: return None try: @@ -757,7 +757,7 @@ def set_trace(): from IPython.core.debugger import Pdb try: Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) - except: + except Exception: from pdb import Pdb as OldPdb OldPdb().set_trace(sys._getframe().f_back) @@ -1265,9 +1265,9 @@ def assert_series_equal(left, right, check_dtype=True, check_dtype=check_dtype) elif is_interval_dtype(left) or is_interval_dtype(right): # TODO: big hack here - l = pd.IntervalIndex(left) - r = pd.IntervalIndex(right) - assert_index_equal(l, r, obj='{obj}.index'.format(obj=obj)) + left = pd.IntervalIndex(left) + right = pd.IntervalIndex(right) + assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj)) else: _testing.assert_almost_equal(left.get_values(), right.get_values(), @@ -1439,8 +1439,8 @@ def assert_panelnd_equal(left, right, assert_index_equal(left_ind, right_ind, check_names=check_names) if by_blocks: - rblocks = right.blocks - lblocks = left.blocks + rblocks = right._to_dict_of_blocks() + lblocks = left._to_dict_of_blocks() for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): assert dtype in lblocks assert dtype in rblocks @@ -2345,7 +2345,7 @@ def wrapper(*args, **kwargs): try: e_str = traceback.format_exc(e) - except: + except Exception: e_str = str(e) if any([m.lower() in e_str.lower() for m in _skip_on_messages]): @@ -2582,7 +2582,7 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", for m in clear: try: m.__warningregistry__.clear() - except: + except Exception: pass saw_warning = False @@ -2849,7 +2849,7 @@ def setTZ(tz): if tz is None: try: del os.environ['TZ'] - except: + except KeyError: pass else: os.environ['TZ'] = tz diff --git a/setup.py b/setup.py index bd7c8f175607c..5531256387e49 100755 --- a/setup.py +++ b/setup.py @@ -740,6 +740,7 @@ def pxd(name): 'pandas.tests.computation', 'pandas.tests.sparse', 'pandas.tests.frame', + 'pandas.tests.generic', 'pandas.tests.indexing', 'pandas.tests.indexes', 'pandas.tests.indexes.datetimes',
https://api.github.com/repos/pandas-dev/pandas/pulls/18114
2017-11-04T20:52:47Z
2017-11-07T19:22:07Z
2017-11-07T19:22:07Z
2017-11-07T19:22:08Z
explicitly set 'include' to numpy_incls
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index db2e8b43d1ead..c4104b66e009f 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -10,7 +10,7 @@ np.import_array() from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float16_t, float32_t, float64_t) + uint32_t, uint64_t, float32_t, float64_t) cdef double NaN = <double> np.NaN cdef double nan = NaN diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 5484cbda5bdf9..bbdd5f0d8334c 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -1,5 +1,5 @@ from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t, - float64_t, float32_t, float16_t) + float64_t, float32_t) cimport numpy as np cimport cython diff --git a/setup.py b/setup.py index c3e0c037625da..80d4fb4a0a067 100755 --- a/setup.py +++ b/setup.py @@ -461,6 +461,13 @@ def pxd(name): return os.path.abspath(pjoin('pandas', name + '.pxd')) +if _have_setuptools: + # Note: this is a list, whereas `numpy_incl` in build_ext.build_extensions + # is a string + numpy_incls = [pkg_resources.resource_filename('numpy', 'core/include')] +else: + numpy_incls = [] + # args to ignore warnings if is_platform_windows(): extra_compile_args = [] @@ -503,7 +510,8 @@ def pxd(name): 'depends': _pxi_dep['index'], 'sources': np_datetime_sources}, '_libs.indexing': { - 'pyxfile': '_libs/indexing'}, + 'pyxfile': '_libs/indexing', + 'include': []}, '_libs.interval': { 'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'], @@ -536,10 +544,12 @@ def pxd(name): 'include': []}, '_libs.reshape': { 'pyxfile': '_libs/reshape', - 'depends': _pxi_dep['reshape']}, + 'depends': _pxi_dep['reshape'], + 'include': numpy_incls}, '_libs.sparse': { 'pyxfile': '_libs/sparse', - 'depends': _pxi_dep['sparse']}, + 'depends': _pxi_dep['sparse'], + 'include': numpy_incls}, '_libs.tslib': { 'pyxfile': '_libs/tslib', 'pxdfiles': ['_libs/src/util', @@ -579,8 +589,7 @@ def pxd(name): '_libs/tslibs/frequencies']}, '_libs.tslibs.parsing': { 'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util', - '_libs/src/khash']}, + 'include': numpy_incls}, '_libs.tslibs.resolution': { 'pyxfile': '_libs/tslibs/resolution', 'pxdfiles': ['_libs/src/util', @@ -604,14 +613,16 @@ def pxd(name): 'pyxfile': '_libs/tslibs/timezones', 'pxdfiles': ['_libs/src/util']}, '_libs.testing': { - 'pyxfile': '_libs/testing'}, + 'pyxfile': '_libs/testing', + 'include': []}, '_libs.window': { 'pyxfile': '_libs/window', 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], 'depends': ['pandas/_libs/src/skiplist.pyx', 'pandas/_libs/src/skiplist.h']}, 'io.sas._sas': { - 'pyxfile': 'io/sas/sas'}} + 'pyxfile': 'io/sas/sas', + 'include': numpy_incls}} extensions = []
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18112
2017-11-04T19:49:03Z
2017-11-16T00:25:59Z
2017-11-16T00:25:59Z
2017-12-08T19:38:43Z
BUG: Override mi-columns in to_csv if requested
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 69379ac2fc58c..6dc329c4aa732 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -79,6 +79,7 @@ I/O - Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. - Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) +- Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) Plotting ^^^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c5d4a0ecf44ab..ab98b9c4e4f49 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1695,7 +1695,7 @@ def _save_header(self): else: encoded_labels = [] - if not has_mi_columns: + if not has_mi_columns or has_aliases: encoded_labels += list(write_cols) writer.writerow(encoded_labels) else: diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 4162a586f8063..ca8a0d8bda3ab 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1203,3 +1203,16 @@ def test_period_index_date_overflow(self): expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n' assert result == expected + + def test_multi_index_header(self): + # see gh-5539 + columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), + ("b", 1), ("b", 2)]) + df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df.columns = columns + + header = ["a", "b", "c", "d"] + result = df.to_csv(header=header) + + expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n" + assert result == expected
Previously, `MultiIndex` columns weren't being overwritten when `header` was passed in for `to_csv`. Closes #5539 (almost four years to the day, wow...)
https://api.github.com/repos/pandas-dev/pandas/pulls/18110
2017-11-04T19:02:19Z
2017-11-05T04:36:52Z
2017-11-05T04:36:52Z
2017-11-05T04:37:22Z
BLD: Make sure to copy ZIP files for parser tests
diff --git a/setup.py b/setup.py index 684f32d1e7898..3464169e8d8d1 100755 --- a/setup.py +++ b/setup.py @@ -758,6 +758,7 @@ def pxd(name): 'parser/data/*.bz2', 'parser/data/*.txt', 'parser/data/*.tar', + 'parser/data/*.zip', 'parser/data/*.tar.gz', 'sas/data/*.csv', 'sas/data/*.xpt',
The ZIP file wasn't listed in `setup.py` from #18091. Will merge on green.
https://api.github.com/repos/pandas-dev/pandas/pulls/18108
2017-11-04T18:32:21Z
2017-11-04T20:29:46Z
2017-11-04T20:29:46Z
2017-12-11T20:24:54Z
Follow-Up to #18104
diff --git a/setup.py b/setup.py index 684f32d1e7898..1da0b1eb82745 100755 --- a/setup.py +++ b/setup.py @@ -501,7 +501,11 @@ def pxd(name): 'depends': _pxi_dep['index'], 'sources': np_datetime_sources}, '_libs.indexing': { - 'pyxfile': '_libs/indexing'}, + 'pyxfile': '_libs/indexing', + 'pxdfiles': [], + 'depends': [], + 'sources': [], + 'include': []}, '_libs.interval': { 'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'], @@ -526,6 +530,9 @@ def pxd(name): 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, '_libs.properties': { 'pyxfile': '_libs/properties', + 'pxdfiles': [], + 'depends': [], + 'sources': [], 'include': []}, '_libs.reshape': { 'pyxfile': '_libs/reshape', @@ -570,14 +577,21 @@ def pxd(name): '_libs.tslibs.timezones': { 'pyxfile': '_libs/tslibs/timezones'}, '_libs.testing': { - 'pyxfile': '_libs/testing'}, + 'pyxfile': '_libs/testing', + 'pxdfiles': [], + 'depends': [], + 'sources': [], + 'include': []}, '_libs.window': { 'pyxfile': '_libs/window', 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], 'depends': ['pandas/_libs/src/skiplist.pyx', 'pandas/_libs/src/skiplist.h']}, 'io.sas._sas': { - 'pyxfile': 'io/sas/sas'}} + 'pyxfile': 'io/sas/sas', + 'pxdfiles': [], + 'depends': [], + 'sources': []}} extensions = []
Adds empty lists where relevant. 1 more after this.
https://api.github.com/repos/pandas-dev/pandas/pulls/18106
2017-11-04T18:24:25Z
2017-11-04T19:17:52Z
null
2017-12-08T19:40:23Z
Revert "CI: temp disable scipy on windows 3.6 build (#18078)"
diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..44af73b498aa8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,7 +22,7 @@ environment: PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" - CONDA_NPY: "112" + CONDA_NPY: "113" - CONDA_ROOT: "C:\\Miniconda3_64" PYTHON_VERSION: "2.7" diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index 5d6c074ec1f85..db2d429a2a4ff 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -1,12 +1,12 @@ python-dateutil pytz -numpy=1.12* +numpy=1.13* bottleneck openpyxl xlsxwriter xlrd xlwt -# scipy +scipy feather-format numexpr pytables
This reverts commit cd6dc87466e119aabb76d8439df8289d082ea948. closes #18073
https://api.github.com/repos/pandas-dev/pandas/pulls/18105
2017-11-04T18:19:36Z
2017-11-04T21:14:11Z
2017-11-04T21:14:10Z
2017-11-04T21:22:41Z
CLN: Standardize indentation and alphabetize deps
diff --git a/setup.py b/setup.py index 783ded906eba2..684f32d1e7898 100755 --- a/setup.py +++ b/setup.py @@ -480,78 +480,104 @@ def pxd(name): libraries = ['m'] if not is_platform_windows() else [] ext_data = { - '_libs.lib': {'pyxfile': '_libs/lib', - 'depends': lib_depends + tseries_depends}, - '_libs.properties': {'pyxfile': '_libs/properties', 'include': []}, - '_libs.hashtable': {'pyxfile': '_libs/hashtable', - 'pxdfiles': ['_libs/hashtable'], - 'depends': (['pandas/_libs/src/klib/khash_python.h'] + - _pxi_dep['hashtable'])}, - '_libs.tslibs.strptime': {'pyxfile': '_libs/tslibs/strptime', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.tslibs.offsets': {'pyxfile': '_libs/tslibs/offsets'}, - '_libs.tslib': {'pyxfile': '_libs/tslib', - 'pxdfiles': ['_libs/src/util'], - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.tslibs.conversion': {'pyxfile': '_libs/tslibs/conversion', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.tslibs.np_datetime': {'pyxfile': '_libs/tslibs/np_datetime', - 'depends': np_datetime_headers, - 'sources': np_datetime_sources}, - '_libs.tslibs.timedeltas': {'pyxfile': '_libs/tslibs/timedeltas'}, - '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, - '_libs.tslibs.fields': {'pyxfile': '_libs/tslibs/fields', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.period': {'pyxfile': '_libs/period', - 'depends': (tseries_depends + - ['pandas/_libs/src/period_helper.h']), - 'sources': np_datetime_sources + [ - 'pandas/_libs/src/period_helper.c']}, - '_libs.tslibs.parsing': {'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util']}, - '_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies', - 'pxdfiles': ['_libs/src/util']}, - '_libs.tslibs.nattype': {'pyxfile': '_libs/tslibs/nattype', - 'pxdfiles': ['_libs/src/util']}, - '_libs.index': {'pyxfile': '_libs/index', - 'sources': np_datetime_sources, - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], - 'depends': _pxi_dep['index']}, - '_libs.algos': {'pyxfile': '_libs/algos', - 'pxdfiles': ['_libs/src/util', - '_libs/algos', '_libs/hashtable'], - 'depends': _pxi_dep['algos']}, - '_libs.groupby': {'pyxfile': '_libs/groupby', - 'pxdfiles': ['_libs/src/util', '_libs/algos'], - 'depends': _pxi_dep['groupby']}, - '_libs.join': {'pyxfile': '_libs/join', - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], - 'depends': _pxi_dep['join']}, - '_libs.reshape': {'pyxfile': '_libs/reshape', - 'depends': _pxi_dep['reshape']}, - '_libs.indexing': {'pyxfile': '_libs/indexing'}, - '_libs.interval': {'pyxfile': '_libs/interval', - 'pxdfiles': ['_libs/hashtable'], - 'depends': _pxi_dep['interval']}, - '_libs.window': {'pyxfile': '_libs/window', - 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], - 'depends': ['pandas/_libs/src/skiplist.pyx', - 'pandas/_libs/src/skiplist.h']}, - '_libs.parsers': {'pyxfile': '_libs/parsers', - 'depends': ['pandas/_libs/src/parser/tokenizer.h', - 'pandas/_libs/src/parser/io.h', - 'pandas/_libs/src/numpy_helper.h'], - 'sources': ['pandas/_libs/src/parser/tokenizer.c', - 'pandas/_libs/src/parser/io.c']}, - '_libs.sparse': {'pyxfile': '_libs/sparse', - 'depends': _pxi_dep['sparse']}, - '_libs.testing': {'pyxfile': '_libs/testing'}, - '_libs.hashing': {'pyxfile': '_libs/hashing'}, - 'io.sas._sas': {'pyxfile': 'io/sas/sas'}} + '_libs.algos': { + 'pyxfile': '_libs/algos', + 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'], + 'depends': _pxi_dep['algos']}, + '_libs.groupby': { + 'pyxfile': '_libs/groupby', + 'pxdfiles': ['_libs/src/util', '_libs/algos'], + 'depends': _pxi_dep['groupby']}, + '_libs.hashing': { + 'pyxfile': '_libs/hashing'}, + '_libs.hashtable': { + 'pyxfile': '_libs/hashtable', + 'pxdfiles': ['_libs/hashtable'], + 'depends': (['pandas/_libs/src/klib/khash_python.h'] + + _pxi_dep['hashtable'])}, + '_libs.index': { + 'pyxfile': '_libs/index', + 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], + 'depends': _pxi_dep['index'], + 'sources': np_datetime_sources}, + '_libs.indexing': { + 'pyxfile': '_libs/indexing'}, + '_libs.interval': { + 'pyxfile': '_libs/interval', + 'pxdfiles': ['_libs/hashtable'], + 'depends': _pxi_dep['interval']}, + '_libs.join': { + 'pyxfile': '_libs/join', + 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], + 'depends': _pxi_dep['join']}, + '_libs.lib': { + 'pyxfile': '_libs/lib', + 'depends': lib_depends + tseries_depends}, + '_libs.parsers': { + 'pyxfile': '_libs/parsers', + 'depends': ['pandas/_libs/src/parser/tokenizer.h', + 'pandas/_libs/src/parser/io.h', + 'pandas/_libs/src/numpy_helper.h'], + 'sources': ['pandas/_libs/src/parser/tokenizer.c', + 'pandas/_libs/src/parser/io.c']}, + '_libs.period': { + 'pyxfile': '_libs/period', + 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], + 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, + '_libs.properties': { + 'pyxfile': '_libs/properties', + 'include': []}, + '_libs.reshape': { + 'pyxfile': '_libs/reshape', + 'depends': _pxi_dep['reshape']}, + '_libs.sparse': { + 'pyxfile': '_libs/sparse', + 'depends': _pxi_dep['sparse']}, + '_libs.tslib': { + 'pyxfile': '_libs/tslib', + 'pxdfiles': ['_libs/src/util'], + 'depends': tseries_depends, + 'sources': np_datetime_sources}, + '_libs.tslibs.conversion': { + 'pyxfile': '_libs/tslibs/conversion', + 'depends': tseries_depends, + 'sources': np_datetime_sources}, + '_libs.tslibs.fields': { + 'pyxfile': '_libs/tslibs/fields', + 'depends': tseries_depends, + 'sources': np_datetime_sources}, + '_libs.tslibs.frequencies': { + 'pyxfile': '_libs/tslibs/frequencies', + 'pxdfiles': ['_libs/src/util']}, + '_libs.tslibs.nattype': { + 'pyxfile': '_libs/tslibs/nattype', + 'pxdfiles': ['_libs/src/util']}, + '_libs.tslibs.np_datetime': { + 'pyxfile': '_libs/tslibs/np_datetime', + 'depends': np_datetime_headers, + 'sources': np_datetime_sources}, + '_libs.tslibs.offsets': { + 'pyxfile': '_libs/tslibs/offsets'}, + '_libs.tslibs.parsing': { + 'pyxfile': '_libs/tslibs/parsing', + 'pxdfiles': ['_libs/src/util']}, + '_libs.tslibs.strptime': { + 'pyxfile': '_libs/tslibs/strptime', + 'depends': tseries_depends, + 'sources': np_datetime_sources}, + '_libs.tslibs.timedeltas': { + 'pyxfile': '_libs/tslibs/timedeltas'}, + '_libs.tslibs.timezones': { + 'pyxfile': '_libs/tslibs/timezones'}, + '_libs.testing': { + 'pyxfile': '_libs/testing'}, + '_libs.window': { + 'pyxfile': '_libs/window', + 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], + 'depends': ['pandas/_libs/src/skiplist.pyx', + 'pandas/_libs/src/skiplist.h']}, + 'io.sas._sas': { + 'pyxfile': 'io/sas/sas'}} extensions = []
I can't fix a problem that makes zero sense to me (#18089), but in the interests of being a team player, I can try to contribute around the periphery. This just takes the `ext_data` dict and a) standardizes the indentation, b) arranges the entries in alphabetical order. It is straightforward to check that the version of the dict here `==` the version in master. The main reason to do this is so that the diff in the next step is easy to review. The next step is to go through and identify pyx files that don't cimport _anything_ e.g. `_libs.testing` and fill out their `ext_data` entries with the appropriate `[]`s (note that the processing of `ext_data` inserts default values for missing keys, so adding `[]` entries _is_ part of the specification). There are a few modules that meet that description, then a handful of others that only have cimports from numpy (e.g. io.sas._sas). Those entries can be fully filled out independently of any other outstanding issues.
https://api.github.com/repos/pandas-dev/pandas/pulls/18104
2017-11-04T17:32:07Z
2017-11-04T18:10:46Z
2017-11-04T18:10:46Z
2017-11-04T18:26:38Z
BLD: Clean up dependencies for Cython files
diff --git a/setup.py b/setup.py index 783ded906eba2..1f35e26de2c68 100755 --- a/setup.py +++ b/setup.py @@ -466,6 +466,11 @@ def pxd(name): else: extra_compile_args = ['-Wno-unused-function'] +util_and_deps = ['pandas/_libs/src/util.pxd', + 'pandas/_libs/src/numpy_helper.h', + 'pandas/_libs/src/helper.h', + 'pandas/_libs/src/headers/stdint.h'] + lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h', 'pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] @@ -481,42 +486,79 @@ def pxd(name): ext_data = { '_libs.lib': {'pyxfile': '_libs/lib', - 'depends': lib_depends + tseries_depends}, + 'depends': (lib_depends + tseries_depends + + ['pandas/_libs/tslib.pyx', + 'pandas/_libs/tslib.pxd', + 'pandas/_libs/tslibs/timezones.pyx', + 'pandas/_libs/tslibs/timezones.pxd'])}, '_libs.properties': {'pyxfile': '_libs/properties', 'include': []}, '_libs.hashtable': {'pyxfile': '_libs/hashtable', 'pxdfiles': ['_libs/hashtable'], 'depends': (['pandas/_libs/src/klib/khash_python.h'] + _pxi_dep['hashtable'])}, - '_libs.tslibs.strptime': {'pyxfile': '_libs/tslibs/strptime', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.tslibs.offsets': {'pyxfile': '_libs/tslibs/offsets'}, - '_libs.tslib': {'pyxfile': '_libs/tslib', - 'pxdfiles': ['_libs/src/util'], - 'depends': tseries_depends, - 'sources': np_datetime_sources}, - '_libs.tslibs.conversion': {'pyxfile': '_libs/tslibs/conversion', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, + '_libs.tslibs.strptime': { + 'pyxfile': '_libs/tslibs/strptime', + 'depends': (tseries_depends + util_and_deps + + ['pandas/_libs/tslibs/nattype.pyx', + 'pandas/_libs/tslibs/nattype.pxd', + 'pandas/_libs/tslibs/np_datetime.pyx', + 'pandas/_libs/tslibs/np_datetime.pxd'])}, + '_libs.tslibs.offsets': { + 'pyxfile': '_libs/tslibs/offsets', + 'depends': (util_and_deps + + ['pandas/_libs/tslibs/conversion.pyx', + 'pandas/_libs/tslibs/conversion.pxd'])}, + '_libs.tslib': { + 'pyxfile': '_libs/tslib', + 'pxdfiles': ['_libs/src/util'], + 'depends': (tseries_depends + util_and_deps + + ['pandas/_libs/tslibs/conversion.pyx', + 'pandas/_libs/tslibs/conversion.pxd', + 'pandas/_libs/tslibs/nattype.pyx', + 'pandas/_libs/tslibs/nattype.pxd', + 'pandas/_libs/tslibs/np_datetime.pyx', + 'pandas/_libs/tslibs/np_datetime.pxd', + 'pandas/_libs/tslibs/timedeltas.pyx', + 'pandas/_libs/tslibs/timedeltas.pxd', + 'pandas/_libs/tslibs/timezones.pyx', + 'pandas/_libs/tslibs/timezones.pxd']), + 'sources': np_datetime_sources}, + '_libs.tslibs.conversion': { + 'pyxfile': '_libs/tslibs/conversion', + 'depends': (tseries_depends + util_and_deps + + ['pandas/_libs/tslibs/np_datetime.pyx', + 'pandas/_libs/tslibs/np_datetime.pxd', + 'pandas/_libs/tslibs/timedeltas.pyx', + 'pandas/_libs/tslibs/timedeltas.pxd', + 'pandas/_libs/tslibs/timezones.pyx', + 'pandas/_libs/tslibs/timezones.pxd']), + 'sources': np_datetime_sources}, '_libs.tslibs.np_datetime': {'pyxfile': '_libs/tslibs/np_datetime', 'depends': np_datetime_headers, 'sources': np_datetime_sources}, - '_libs.tslibs.timedeltas': {'pyxfile': '_libs/tslibs/timedeltas'}, - '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'}, - '_libs.tslibs.fields': {'pyxfile': '_libs/tslibs/fields', - 'depends': tseries_depends, - 'sources': np_datetime_sources}, + '_libs.tslibs.timedeltas': {'pyxfile': '_libs/tslibs/timedeltas', + 'depends': util_and_deps}, + '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones', + 'depends': util_and_deps}, + '_libs.tslibs.fields': { + 'pyxfile': '_libs/tslibs/fields', + 'depends': (tseries_depends + util_and_deps + + ['pandas/_libs/tslibs/np_datetime.pyx', + 'pandas/_libs/tslibs/np_datetime.pxd']), + 'sources': np_datetime_sources}, '_libs.period': {'pyxfile': '_libs/period', 'depends': (tseries_depends + ['pandas/_libs/src/period_helper.h']), 'sources': np_datetime_sources + [ 'pandas/_libs/src/period_helper.c']}, '_libs.tslibs.parsing': {'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util']}, + 'depends': []}, '_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies', - 'pxdfiles': ['_libs/src/util']}, + 'pxdfiles': ['_libs/src/util'], + 'depends': util_and_deps}, '_libs.tslibs.nattype': {'pyxfile': '_libs/tslibs/nattype', - 'pxdfiles': ['_libs/src/util']}, + 'pxdfiles': ['_libs/src/util'], + 'depends': util_and_deps}, '_libs.index': {'pyxfile': '_libs/index', 'sources': np_datetime_sources, 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18102
2017-11-04T02:34:16Z
2017-11-04T18:34:35Z
null
2023-05-11T01:16:40Z
Remove out-of-date numpy.pxd; remove unused float16_t
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index e9ef9c4ffe24b..bb7f69f04b32d 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -20,7 +20,7 @@ from numpy cimport (ndarray, NPY_FLOAT32, NPY_FLOAT64, NPY_OBJECT, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float16_t, float32_t, float64_t, + uint32_t, uint64_t, float32_t, float64_t, double_t) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 2fbbc81c4b5a1..6df6e241a99c8 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -10,7 +10,7 @@ cnp.import_array() from numpy cimport (ndarray, double_t, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float16_t, float32_t, float64_t) + uint32_t, uint64_t, float32_t, float64_t) from libc.stdlib cimport malloc, free diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 78eb7b3ae483e..fe1c01054b596 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -1,7 +1,6 @@ # cython: profile=False -from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t, - NPY_DATETIME, NPY_TIMEDELTA) +from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t) cimport cython cimport numpy as cnp @@ -17,6 +16,7 @@ from tslib cimport _to_i8 from hashtable cimport HashTable +from tslibs.np_datetime cimport NPY_DATETIME, NPY_TIMEDELTA from pandas._libs import algos, period as periodlib, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta from datetime import datetime, timedelta diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 8dbc70a0bdbe9..6befc5e60f5f6 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -10,7 +10,7 @@ np.import_array() from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float16_t, float32_t, float64_t) + uint32_t, uint64_t, float32_t, float64_t) cdef double NaN = <double> np.NaN cdef double nan = NaN diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index db2e8b43d1ead..c4104b66e009f 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -10,7 +10,7 @@ np.import_array() from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float16_t, float32_t, float64_t) + uint32_t, uint64_t, float32_t, float64_t) cdef double NaN = <double> np.NaN cdef double nan = NaN diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 5484cbda5bdf9..bbdd5f0d8334c 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -1,5 +1,5 @@ from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t, - float64_t, float32_t, float16_t) + float64_t, float32_t) cimport numpy as np cimport cython diff --git a/pandas/_libs/src/numpy.pxd b/pandas/_libs/src/numpy.pxd deleted file mode 100644 index 9ab3b9b1b81ae..0000000000000 --- a/pandas/_libs/src/numpy.pxd +++ /dev/null @@ -1,984 +0,0 @@ -# NumPy static imports for Cython -# -# If any of the PyArray_* functions are called, import_array must be -# called first. -# -# This also defines backwards-compatability buffer acquisition -# code for use in Python 2.x (or Python <= 2.5 when NumPy starts -# implementing PEP-3118 directly). -# -# Because of laziness, the format string of the buffer is statically -# allocated. Increase the size if this is not enough, or submit a -# patch to do this properly. -# -# Author: Dag Sverre Seljebotn -# - -DEF _buffer_format_string_len = 255 - -cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF, Py_XDECREF -from cpython.object cimport PyObject -cimport libc.stdlib as stdlib -cimport libc.stdio as stdio - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - -cdef extern from "numpy/arrayobject.h": - ctypedef Py_intptr_t npy_intp - ctypedef size_t npy_uintp - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - NPY_INT8 - NPY_INT16 - NPY_INT32 - NPY_INT64 - NPY_INT128 - NPY_INT256 - NPY_UINT8 - NPY_UINT16 - NPY_UINT32 - NPY_UINT64 - NPY_UINT128 - NPY_UINT256 - NPY_FLOAT16 - NPY_FLOAT32 - NPY_FLOAT64 - NPY_FLOAT80 - NPY_FLOAT96 - NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 - NPY_COMPLEX64 - NPY_COMPLEX128 - NPY_COMPLEX160 - NPY_COMPLEX192 - NPY_COMPLEX256 - NPY_COMPLEX512 - - NPY_DATETIME - NPY_TIMEDELTA - - NPY_INTP - - ctypedef enum NPY_ORDER: - NPY_ANYORDER - NPY_CORDER - NPY_FORTRANORDER - - ctypedef enum NPY_CLIPMODE: - NPY_CLIP - NPY_WRAP - NPY_RAISE - - ctypedef enum NPY_SCALARKIND: - NPY_NOSCALAR, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR - - ctypedef enum NPY_SORTKIND: - NPY_QUICKSORT - NPY_HEAPSORT - NPY_MERGESORT - - ctypedef enum NPY_SEARCHSIDE: - NPY_SEARCHLEFT - NPY_SEARCHRIGHT - - enum: - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum: - NPY_MAXDIMS - - npy_intp NPY_MAX_ELSIZE - - ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) - - ctypedef class numpy.dtype [object PyArray_Descr]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. Please - # ask on cython-dev if you need more. - cdef int type_num - cdef int itemsize "elsize" - cdef char byteorder - cdef object fields - cdef tuple names - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - # Use through macros - pass - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - # Use through macros - pass - - ctypedef struct PyArrayObject: - # For use in situations where ndarray can't replace PyArrayObject*, - # like PyArrayObject**. - pass - - ctypedef class numpy.ndarray [object PyArrayObject]: - cdef __cythonbufferdefaults__ = {"mode": "strided"} - - cdef: - # Only taking a few of the most commonly used and stable fields. - # One should use PyArray_* macros instead to access the C fields. - char *data - int ndim "nd" - npy_intp *shape "dimensions" - npy_intp *strides - dtype descr - PyObject* base - - # Note: This syntax (function definition in pxd files) is an - # experimental exception made for __getbuffer__ and __releasebuffer__ - # -- the details of this may change. - def __getbuffer__(ndarray self, Py_buffer* info, int flags): - # This implementation of getbuffer is geared towards Cython - # requirements, and does not yet fullfill the PEP. - # In particular strided access is always provided regardless - # of flags - - if info == NULL: return - - cdef int copy_shape, i, ndim - cdef int endian_detector = 1 - cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) - - ndim = PyArray_NDIM(self) - - if sizeof(npy_intp) != sizeof(Py_ssize_t): - copy_shape = 1 - else: - copy_shape = 0 - - if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - raise ValueError(u"ndarray is not C contiguous") - - if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - raise ValueError(u"ndarray is not Fortran contiguous") - - info.buf = PyArray_DATA(self) - info.ndim = ndim - if copy_shape: - # Allocate new buffer for strides and shape info. - # This is allocated as one block, strides first. - info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) - info.shape = info.strides + ndim - for i in range(ndim): - info.strides[i] = PyArray_STRIDES(self)[i] - info.shape[i] = PyArray_DIMS(self)[i] - else: - info.strides = <Py_ssize_t*>PyArray_STRIDES(self) - info.shape = <Py_ssize_t*>PyArray_DIMS(self) - info.suboffsets = NULL - info.itemsize = PyArray_ITEMSIZE(self) - info.readonly = not PyArray_ISWRITEABLE(self) - - cdef int t - cdef char* f = NULL - cdef dtype descr = self.descr - cdef list stack - cdef int offset - - cdef bint hasfields = PyDataType_HASFIELDS(descr) - - if not hasfields and not copy_shape: - # do not call releasebuffer - info.obj = None - else: - # need to call releasebuffer - info.obj = self - - if not hasfields: - t = descr.type_num - if ((descr.byteorder == '>' and little_endian) or - (descr.byteorder == '<' and not little_endian)): - raise ValueError(u"Non-native byte order not supported") - if t == NPY_BYTE: f = "b" - elif t == NPY_UBYTE: f = "B" - elif t == NPY_SHORT: f = "h" - elif t == NPY_USHORT: f = "H" - elif t == NPY_INT: f = "i" - elif t == NPY_UINT: f = "I" - elif t == NPY_LONG: f = "l" - elif t == NPY_ULONG: f = "L" - elif t == NPY_LONGLONG: f = "q" - elif t == NPY_ULONGLONG: f = "Q" - elif t == NPY_FLOAT: f = "f" - elif t == NPY_DOUBLE: f = "d" - elif t == NPY_LONGDOUBLE: f = "g" - elif t == NPY_CFLOAT: f = "Zf" - elif t == NPY_CDOUBLE: f = "Zd" - elif t == NPY_CLONGDOUBLE: f = "Zg" - elif t == NPY_OBJECT: f = "O" - else: - raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - info.format = f - return - else: - info.format = <char*>stdlib.malloc(_buffer_format_string_len) - info.format[0] = '^' # Native data types, manual alignment - offset = 0 - f = _util_dtypestring(descr, info.format + 1, - info.format + _buffer_format_string_len, - &offset) - f[0] = 0 # Terminate format string - - def __releasebuffer__(ndarray self, Py_buffer* info): - if PyArray_HASFIELDS(self): - stdlib.free(info.format) - if sizeof(npy_intp) != sizeof(Py_ssize_t): - stdlib.free(info.strides) - # info.shape was stored after info.strides in the same block - - - ctypedef signed char npy_bool - - ctypedef signed char npy_byte - ctypedef signed short npy_short - ctypedef signed int npy_int - ctypedef signed long npy_long - ctypedef signed long long npy_longlong - - ctypedef unsigned char npy_ubyte - ctypedef unsigned short npy_ushort - ctypedef unsigned int npy_uint - ctypedef unsigned long npy_ulong - ctypedef unsigned long long npy_ulonglong - - ctypedef float npy_float - ctypedef double npy_double - ctypedef long double npy_longdouble - - ctypedef signed char npy_int8 - ctypedef signed short npy_int16 - ctypedef signed int npy_int32 - ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 - - ctypedef unsigned char npy_uint8 - ctypedef unsigned short npy_uint16 - ctypedef unsigned int npy_uint32 - ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 - - ctypedef float npy_float16 - ctypedef float npy_float32 - ctypedef double npy_float64 - ctypedef long double npy_float80 - ctypedef long double npy_float96 - ctypedef long double npy_float128 - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_clongdouble: - double real - double imag - - ctypedef struct npy_complex64: - double real - double imag - - ctypedef struct npy_complex128: - double real - double imag - - ctypedef struct npy_complex160: - double real - double imag - - ctypedef struct npy_complex192: - double real - double imag - - ctypedef struct npy_complex256: - double real - double imag - - ctypedef struct PyArray_Dims: - npy_intp *ptr - int len - - void import_array() - - # - # Macros from ndarrayobject.h - # - bint PyArray_CHKFLAGS(ndarray m, int flags) - bint PyArray_ISCONTIGUOUS(ndarray m) - bint PyArray_ISWRITEABLE(ndarray m) - bint PyArray_ISALIGNED(ndarray m) - - int PyArray_NDIM(ndarray) - bint PyArray_ISONESEGMENT(ndarray) - bint PyArray_ISFORTRAN(ndarray) - int PyArray_FORTRANIF(ndarray) - - void* PyArray_DATA(ndarray) - char* PyArray_BYTES(ndarray) - npy_intp* PyArray_DIMS(ndarray) - npy_intp* PyArray_STRIDES(ndarray) - npy_intp PyArray_DIM(ndarray, size_t) - npy_intp PyArray_STRIDE(ndarray, size_t) - - # object PyArray_BASE(ndarray) wrong refcount semantics - # dtype PyArray_DESCR(ndarray) wrong refcount semantics - int PyArray_FLAGS(ndarray) - npy_intp PyArray_ITEMSIZE(ndarray) - int PyArray_TYPE(ndarray arr) - - object PyArray_GETITEM(ndarray arr, void *itemptr) - int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) - - bint PyTypeNum_ISBOOL(int) - bint PyTypeNum_ISUNSIGNED(int) - bint PyTypeNum_ISSIGNED(int) - bint PyTypeNum_ISINTEGER(int) - bint PyTypeNum_ISFLOAT(int) - bint PyTypeNum_ISNUMBER(int) - bint PyTypeNum_ISSTRING(int) - bint PyTypeNum_ISCOMPLEX(int) - bint PyTypeNum_ISPYTHON(int) - bint PyTypeNum_ISFLEXIBLE(int) - bint PyTypeNum_ISUSERDEF(int) - bint PyTypeNum_ISEXTENDED(int) - bint PyTypeNum_ISOBJECT(int) - - bint PyDataType_ISBOOL(dtype) - bint PyDataType_ISUNSIGNED(dtype) - bint PyDataType_ISSIGNED(dtype) - bint PyDataType_ISINTEGER(dtype) - bint PyDataType_ISFLOAT(dtype) - bint PyDataType_ISNUMBER(dtype) - bint PyDataType_ISSTRING(dtype) - bint PyDataType_ISCOMPLEX(dtype) - bint PyDataType_ISPYTHON(dtype) - bint PyDataType_ISFLEXIBLE(dtype) - bint PyDataType_ISUSERDEF(dtype) - bint PyDataType_ISEXTENDED(dtype) - bint PyDataType_ISOBJECT(dtype) - bint PyDataType_HASFIELDS(dtype) - - bint PyArray_ISBOOL(ndarray) - bint PyArray_ISUNSIGNED(ndarray) - bint PyArray_ISSIGNED(ndarray) - bint PyArray_ISINTEGER(ndarray) - bint PyArray_ISFLOAT(ndarray) - bint PyArray_ISNUMBER(ndarray) - bint PyArray_ISSTRING(ndarray) - bint PyArray_ISCOMPLEX(ndarray) - bint PyArray_ISPYTHON(ndarray) - bint PyArray_ISFLEXIBLE(ndarray) - bint PyArray_ISUSERDEF(ndarray) - bint PyArray_ISEXTENDED(ndarray) - bint PyArray_ISOBJECT(ndarray) - bint PyArray_HASFIELDS(ndarray) - - bint PyArray_ISVARIABLE(ndarray) - - bint PyArray_SAFEALIGNEDCOPY(ndarray) - bint PyArray_ISNBO(ndarray) - bint PyArray_IsNativeByteOrder(ndarray) - bint PyArray_ISNOTSWAPPED(ndarray) - bint PyArray_ISBYTESWAPPED(ndarray) - - bint PyArray_FLAGSWAP(ndarray, int) - - bint PyArray_ISCARRAY(ndarray) - bint PyArray_ISCARRAY_RO(ndarray) - bint PyArray_ISFARRAY(ndarray) - bint PyArray_ISFARRAY_RO(ndarray) - bint PyArray_ISBEHAVED(ndarray) - bint PyArray_ISBEHAVED_RO(ndarray) - - - bint PyDataType_ISNOTSWAPPED(dtype) - bint PyDataType_ISBYTESWAPPED(dtype) - - bint PyArray_DescrCheck(object) - - bint PyArray_Check(object) - bint PyArray_CheckExact(object) - - # Cannot be supported due to out arg: - # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) - # bint PyArray_HasArrayInterface(op, out) - - - bint PyArray_IsZeroDim(object) - # Cannot be supported due to ## ## in macro: - # bint PyArray_IsScalar(object, verbatim work) - bint PyArray_CheckScalar(object) - bint PyArray_IsPythonNumber(object) - bint PyArray_IsPythonScalar(object) - bint PyArray_IsAnyScalar(object) - bint PyArray_CheckAnyScalar(object) - ndarray PyArray_GETCONTIGUOUS(ndarray) - bint PyArray_SAMESHAPE(ndarray, ndarray) - npy_intp PyArray_SIZE(ndarray) - npy_intp PyArray_NBYTES(ndarray) - - object PyArray_FROM_O(object) - object PyArray_FROM_OF(object m, int flags) - bint PyArray_FROM_OT(object m, int type) - bint PyArray_FROM_OTF(object m, int type, int flags) - object PyArray_FROMANY(object m, int type, int min, int max, int flags) - object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) - object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) - void PyArray_FILLWBYTE(object, int val) - npy_intp PyArray_REFCOUNT(object) - object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) - unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) - bint PyArray_EquivByteorders(int b1, int b2) - object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) - #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) - object PyArray_ToScalar(void* data, ndarray arr) - - void* PyArray_GETPTR1(ndarray m, npy_intp i) - void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) - void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) - void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) - - void PyArray_XDECREF_ERR(ndarray) - # Cannot be supported due to out arg - # void PyArray_DESCR_REPLACE(descr) - - - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) - - object PyArray_Cast(ndarray mp, int type_num) - object PyArray_Take(ndarray ap, object items, int axis) - object PyArray_Put(ndarray ap, object items, object values) - - void PyArray_ITER_RESET(flatiter it) nogil - void PyArray_ITER_NEXT(flatiter it) nogil - void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil - void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil - void* PyArray_ITER_DATA(flatiter it) nogil - bint PyArray_ITER_NOTDONE(flatiter it) nogil - - void PyArray_MultiIter_RESET(broadcast multi) nogil - void PyArray_MultiIter_NEXT(broadcast multi) nogil - void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil - void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil - void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil - void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil - bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil - - # Functions from __multiarray_api.h - - # Functions taking dtype and returning object/ndarray are disabled - # for now as they steal dtype references. I'm conservative and disable - # more than is probably needed until it can be checked further. - int PyArray_SetNumericOps (object) - object PyArray_GetNumericOps () - int PyArray_INCREF (ndarray) - int PyArray_XDECREF (ndarray) - void PyArray_SetStringFunction (object, int) - dtype PyArray_DescrFromType (int) - object PyArray_TypeObjectFromType (int) - char * PyArray_Zero (ndarray) - char * PyArray_One (ndarray) - #object PyArray_CastToType (ndarray, dtype, int) - int PyArray_CastTo (ndarray, ndarray) - int PyArray_CastAnyTo (ndarray, ndarray) - int PyArray_CanCastSafely (int, int) - npy_bool PyArray_CanCastTo (dtype, dtype) - int PyArray_ObjectType (object, int) - dtype PyArray_DescrFromObject (object, dtype) - #ndarray* PyArray_ConvertToCommonType (object, int *) - dtype PyArray_DescrFromScalar (object) - dtype PyArray_DescrFromTypeObject (object) - npy_intp PyArray_Size (object) - #object PyArray_Scalar (void *, dtype, object) - #object PyArray_FromScalar (object, dtype) - void PyArray_ScalarAsCtype (object, void *) - #int PyArray_CastScalarToCtype (object, void *, dtype) - #int PyArray_CastScalarDirect (object, dtype, void *, int) - object PyArray_ScalarFromObject (object) - #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) - object PyArray_FromDims (int, int *, int) - #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) - #object PyArray_FromAny (object, dtype, int, int, int, object) - object PyArray_EnsureArray (object) - object PyArray_EnsureAnyArray (object) - #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) - #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) - #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) - #object PyArray_FromIter (object, dtype, npy_intp) - object PyArray_Return (ndarray) - #object PyArray_GetField (ndarray, dtype, int) - #int PyArray_SetField (ndarray, dtype, int, object) - object PyArray_Byteswap (ndarray, npy_bool) - object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) - int PyArray_MoveInto (ndarray, ndarray) - int PyArray_CopyInto (ndarray, ndarray) - int PyArray_CopyAnyInto (ndarray, ndarray) - int PyArray_CopyObject (ndarray, object) - object PyArray_NewCopy (ndarray, NPY_ORDER) - object PyArray_ToList (ndarray) - object PyArray_ToString (ndarray, NPY_ORDER) - int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) - int PyArray_Dump (object, object, int) - object PyArray_Dumps (object, int) - int PyArray_ValidType (int) - void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) - #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) - #dtype PyArray_DescrNew (dtype) - dtype PyArray_DescrNewFromType (int) - double PyArray_GetPriority (object, double) - object PyArray_IterNew (object) - object PyArray_MultiIterNew (int, ...) - - int PyArray_PyIntAsInt (object) - npy_intp PyArray_PyIntAsIntp (object) - int PyArray_Broadcast (broadcast) - void PyArray_FillObjectArray (ndarray, object) - int PyArray_FillWithScalar (ndarray, object) - npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) - dtype PyArray_DescrNewByteorder (dtype, char) - object PyArray_IterAllButAxis (object, int *) - #object PyArray_CheckFromAny (object, dtype, int, int, int, object) - #object PyArray_FromArray (ndarray, dtype, int) - object PyArray_FromInterface (object) - object PyArray_FromStructInterface (object) - #object PyArray_FromArrayAttr (object, dtype, object) - #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) - int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) - object PyArray_NewFlagsObject (object) - npy_bool PyArray_CanCastScalar (type, type) - #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) - int PyArray_RemoveSmallest (broadcast) - int PyArray_ElementStrides (object) - void PyArray_Item_INCREF (char *, dtype) - void PyArray_Item_XDECREF (char *, dtype) - object PyArray_FieldNames (object) - object PyArray_Transpose (ndarray, PyArray_Dims *) - object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) - object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) - object PyArray_PutMask (ndarray, object, object) - object PyArray_Repeat (ndarray, object, int) - object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) - int PyArray_Sort (ndarray, int, NPY_SORTKIND) - object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) - object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE) - object PyArray_ArgMax (ndarray, int, ndarray) - object PyArray_ArgMin (ndarray, int, ndarray) - object PyArray_Reshape (ndarray, object) - object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) - object PyArray_Squeeze (ndarray) - #object PyArray_View (ndarray, dtype, type) - object PyArray_SwapAxes (ndarray, int, int) - object PyArray_Max (ndarray, int, ndarray) - object PyArray_Min (ndarray, int, ndarray) - object PyArray_Ptp (ndarray, int, ndarray) - object PyArray_Mean (ndarray, int, int, ndarray) - object PyArray_Trace (ndarray, int, int, int, int, ndarray) - object PyArray_Diagonal (ndarray, int, int, int) - object PyArray_Clip (ndarray, object, object, ndarray) - object PyArray_Conjugate (ndarray, ndarray) - object PyArray_Nonzero (ndarray) - object PyArray_Std (ndarray, int, int, ndarray, int) - object PyArray_Sum (ndarray, int, int, ndarray) - object PyArray_CumSum (ndarray, int, int, ndarray) - object PyArray_Prod (ndarray, int, int, ndarray) - object PyArray_CumProd (ndarray, int, int, ndarray) - object PyArray_All (ndarray, int, ndarray) - object PyArray_Any (ndarray, int, ndarray) - object PyArray_Compress (ndarray, object, int, ndarray) - object PyArray_Flatten (ndarray, NPY_ORDER) - object PyArray_Ravel (ndarray, NPY_ORDER) - npy_intp PyArray_MultiplyList (npy_intp *, int) - int PyArray_MultiplyIntList (int *, int) - void * PyArray_GetPtr (ndarray, npy_intp*) - int PyArray_CompareLists (npy_intp *, npy_intp *, int) - #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) - #int PyArray_As1D (object*, char **, int *, int) - #int PyArray_As2D (object*, char ***, int *, int *, int) - int PyArray_Free (object, void *) - #int PyArray_Converter (object, object*) - int PyArray_IntpFromSequence (object, npy_intp *, int) - object PyArray_Concatenate (object, int) - object PyArray_InnerProduct (object, object) - object PyArray_MatrixProduct (object, object) - object PyArray_CopyAndTranspose (object) - object PyArray_Correlate (object, object, int) - int PyArray_TypestrConvert (int, int) - #int PyArray_DescrConverter (object, dtype*) - #int PyArray_DescrConverter2 (object, dtype*) - int PyArray_IntpConverter (object, PyArray_Dims *) - #int PyArray_BufferConverter (object, chunk) - int PyArray_AxisConverter (object, int *) - int PyArray_BoolConverter (object, npy_bool *) - int PyArray_ByteorderConverter (object, char *) - int PyArray_OrderConverter (object, NPY_ORDER *) - unsigned char PyArray_EquivTypes (dtype, dtype) - #object PyArray_Zeros (int, npy_intp *, dtype, int) - #object PyArray_Empty (int, npy_intp *, dtype, int) - object PyArray_Where (object, object, object) - object PyArray_Arange (double, double, double, int) - #object PyArray_ArangeObj (object, object, object, dtype) - int PyArray_SortkindConverter (object, NPY_SORTKIND *) - object PyArray_LexSort (object, int) - object PyArray_Round (ndarray, int, ndarray) - unsigned char PyArray_EquivTypenums (int, int) - int PyArray_RegisterDataType (dtype) - int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) - int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) - #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) - object PyArray_IntTupleFromIntp (int, npy_intp *) - int PyArray_TypeNumFromName (char *) - int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) - #int PyArray_OutputConverter (object, ndarray*) - object PyArray_BroadcastToShape (object, npy_intp *, int) - void _PyArray_SigintHandler (int) - void* _PyArray_GetSigintBuf () - #int PyArray_DescrAlignConverter (object, dtype*) - #int PyArray_DescrAlignConverter2 (object, dtype*) - int PyArray_SearchsideConverter (object, void *) - object PyArray_CheckAxis (ndarray, int *, int) - npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) - int PyArray_CompareString (char *, char *, size_t) - - -# Typedefs that matches the runtime dtype objects in -# the numpy module. - -# The ones that are commented out needs an IFDEF function -# in Cython to enable them only on the right systems. - -ctypedef npy_int8 int8_t -ctypedef npy_int16 int16_t -ctypedef npy_int32 int32_t -ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t - -ctypedef npy_uint8 uint8_t -ctypedef npy_uint16 uint16_t -ctypedef npy_uint32 uint32_t -ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t - -ctypedef npy_float16 float16_t -ctypedef npy_float32 float32_t -ctypedef npy_float64 float64_t -#ctypedef npy_float80 float80_t -#ctypedef npy_float128 float128_t - -ctypedef float complex complex64_t -ctypedef double complex complex128_t - -# The int types are mapped a bit surprising -- -# numpy.int corresponds to 'l' and numpy.long to 'q' -ctypedef npy_long int_t -ctypedef npy_longlong long_t -ctypedef npy_longlong longlong_t - -ctypedef npy_ulong uint_t -ctypedef npy_ulonglong ulong_t -ctypedef npy_ulonglong ulonglong_t - -ctypedef npy_intp intp_t -ctypedef npy_uintp uintp_t - -ctypedef npy_double float_t -ctypedef npy_double double_t -ctypedef npy_longdouble longdouble_t - -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t - -cdef inline object PyArray_MultiIterNew1(a): - return PyArray_MultiIterNew(1, <void*>a) - -cdef inline object PyArray_MultiIterNew2(a, b): - return PyArray_MultiIterNew(2, <void*>a, <void*>b) - -cdef inline object PyArray_MultiIterNew3(a, b, c): - return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - -cdef inline object PyArray_MultiIterNew4(a, b, c, d): - return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - -cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) - -cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: - # Recursive utility function used in __getbuffer__ to get format - # string. The new location in the format string is returned. - - cdef dtype child - cdef int delta_offset - cdef tuple i - cdef int endian_detector = 1 - cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) - cdef tuple fields - - for childname in descr.names: - fields = descr.fields[childname] - child, new_offset = fields - - if (end - f) - (new_offset - offset[0]) < 15: - raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") - - if ((child.byteorder == '>' and little_endian) or - (child.byteorder == '<' and not little_endian)): - raise ValueError(u"Non-native byte order not supported") - # One could encode it in the format string and have Cython - # complain instead, BUT: < and > in format strings also imply - # standardized sizes for datatypes, and we rely on native in - # order to avoid reencoding data types based on their size. - # - # A proper PEP 3118 exporter for other clients than Cython - # must deal properly with this! - - # Output padding bytes - while offset[0] < new_offset: - f[0] = 120 # "x"; pad byte - f += 1 - offset[0] += 1 - - offset[0] += child.itemsize - - if not PyDataType_HASFIELDS(child): - t = child.type_num - if end - f < 5: - raise RuntimeError(u"Format string allocated too short.") - - # Until ticket #99 is fixed, use integers to avoid warnings - if t == NPY_BYTE: f[0] = 98 #"b" - elif t == NPY_UBYTE: f[0] = 66 #"B" - elif t == NPY_SHORT: f[0] = 104 #"h" - elif t == NPY_USHORT: f[0] = 72 #"H" - elif t == NPY_INT: f[0] = 105 #"i" - elif t == NPY_UINT: f[0] = 73 #"I" - elif t == NPY_LONG: f[0] = 108 #"l" - elif t == NPY_ULONG: f[0] = 76 #"L" - elif t == NPY_LONGLONG: f[0] = 113 #"q" - elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - elif t == NPY_FLOAT: f[0] = 102 #"f" - elif t == NPY_DOUBLE: f[0] = 100 #"d" - elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - elif t == NPY_OBJECT: f[0] = 79 #"O" - else: - raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - f += 1 - else: - # Cython ignores struct boundary information ("T{...}"), - # so don't output it - f = _util_dtypestring(child, f, end, offset) - return f - - -# -# ufunc API -# - -cdef extern from "numpy/ufuncobject.h": - - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) - - ctypedef extern class numpy.ufunc [object PyUFuncObject]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops - - cdef enum: - PyUFunc_Zero - PyUFunc_One - PyUFunc_None - UFUNC_ERR_IGNORE - UFUNC_ERR_WARN - UFUNC_ERR_RAISE - UFUNC_ERR_CALL - UFUNC_ERR_PRINT - UFUNC_ERR_LOG - UFUNC_MASK_DIVIDEBYZERO - UFUNC_MASK_OVERFLOW - UFUNC_MASK_UNDERFLOW - UFUNC_MASK_INVALID - UFUNC_SHIFT_DIVIDEBYZERO - UFUNC_SHIFT_OVERFLOW - UFUNC_SHIFT_UNDERFLOW - UFUNC_SHIFT_INVALID - UFUNC_FPE_DIVIDEBYZERO - UFUNC_FPE_OVERFLOW - UFUNC_FPE_UNDERFLOW - UFUNC_FPE_INVALID - UFUNC_ERR_DEFAULT - UFUNC_ERR_DEFAULT2 - - object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, - void **, char *, int, int, int, int, char *, char *, int) - int PyUFunc_RegisterLoopForType(ufunc, int, - PyUFuncGenericFunction, int *, void *) - int PyUFunc_GenericFunction \ - (ufunc, PyObject *, PyObject *, PyArrayObject **) - void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *) - int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **) - int PyUFunc_checkfperr \ - (int, PyObject *, int *) - void PyUFunc_clearfperr() - int PyUFunc_getfperr() - int PyUFunc_handlefperr \ - (int, PyObject *, int, int *) - int PyUFunc_ReplaceLoopBySignature \ - (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) - object PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, - int, char *, char *, int, char *) - - void import_ufunc() - - -cdef inline void set_array_base(ndarray arr, object base): - cdef PyObject* baseptr - if base is None: - baseptr = NULL - else: - Py_INCREF(base) # important to do this before decref below! - baseptr = <PyObject*>base - Py_XDECREF(arr.base) - arr.base = baseptr - -cdef inline object get_array_base(ndarray arr): - if arr.base is NULL: - return None - else: - return <object>arr.base diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 5a4af4550f589..0260cbc194555 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -6,7 +6,7 @@ cimport numpy as np from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray, - float64_t, NPY_DATETIME, NPY_TIMEDELTA) + float64_t) import numpy as np import sys @@ -59,7 +59,8 @@ from tslibs.np_datetime cimport (check_dts_bounds, cmp_scalar, pandas_datetimestruct, dt64_to_dtstruct, dtstruct_to_dt64, - pydatetime_to_dt64, pydate_to_dt64) + pydatetime_to_dt64, pydate_to_dt64, + NPY_TIMEDELTA, NPY_DATETIME) from tslibs.np_datetime import OutOfBoundsDatetime from khash cimport ( diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index ab77049a9ff5b..92ebe830fd897 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -5,6 +5,10 @@ from cpython.datetime cimport date, datetime from numpy cimport int64_t, int32_t +cdef extern from "numpy/ndarraytypes.h": + cdef enum NPY_TYPES: + NPY_DATETIME + NPY_TIMEDELTA cdef extern from "../src/datetime/np_datetime.h": ctypedef struct pandas_datetimestruct:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18101
2017-11-04T02:13:10Z
2017-11-06T21:55:36Z
null
2017-12-08T19:40:24Z
DOC: Improve replace docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 96d28581cfdd9..201d8ba427c8a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3080,6 +3080,14 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, inplace=inplace, limit=limit, downcast=downcast, **kwargs) + @Appender(_shared_docs['replace'] % _shared_doc_kwargs) + def replace(self, to_replace=None, value=None, inplace=False, limit=None, + regex=False, method='pad', axis=None): + return super(DataFrame, self).replace(to_replace=to_replace, + value=value, inplace=inplace, + limit=limit, regex=regex, + method=method, axis=axis) + @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0): return super(DataFrame, self).shift(periods=periods, freq=freq, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d34a85b5b4388..0f038cd687dfd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -69,6 +69,10 @@ def _single_replace(self, to_replace, method, inplace, limit): + """ + Replaces values in a Series using the fill method specified when no + replacement value is given in the replace method + """ if self.ndim != 1: raise TypeError('cannot replace {0} with method {1} on a {2}' .format(to_replace, method, type(self).__name__)) @@ -4787,94 +4791,111 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) - def replace(self, to_replace=None, value=None, inplace=False, limit=None, - regex=False, method='pad', axis=None): - """ + _shared_docs['replace'] = (""" Replace values given in 'to_replace' with 'value'. Parameters ---------- to_replace : str, regex, list, dict, Series, numeric, or None - * str or regex: + * numeric, str or regex: - - str: string exactly matching `to_replace` will be replaced - with `value` - - regex: regexs matching `to_replace` will be replaced with - `value` + - numeric: numeric values equal to ``to_replace`` will be + replaced with ``value`` + - str: string exactly matching ``to_replace`` will be replaced + with ``value`` + - regex: regexs matching ``to_replace`` will be replaced with + ``value`` * list of str, regex, or numeric: - - First, if `to_replace` and `value` are both lists, they + - First, if ``to_replace`` and ``value`` are both lists, they **must** be the same length. - Second, if ``regex=True`` then all of the strings in **both** lists will be interpreted as regexs otherwise they will match - directly. This doesn't matter much for `value` since there + directly. This doesn't matter much for ``value`` since there are only a few possible substitution regexes you can use. - - str and regex rules apply as above. + - str, regex and numeric rules apply as above. * dict: - - Nested dictionaries, e.g., {'a': {'b': nan}}, are read as - follows: look in column 'a' for the value 'b' and replace it - with nan. You can nest regular expressions as well. Note that + - Dicts can be used to specify different replacement values + for different existing values. For example, + {'a': 'b', 'y': 'z'} replaces the value 'a' with 'b' and + 'y' with 'z'. To use a dict in this way the ``value`` + parameter should be ``None``. + - For a DataFrame a dict can specify that different values + should be replaced in different columns. For example, + {'a': 1, 'b': 'z'} looks for the value 1 in column 'a' and + the value 'z' in column 'b' and replaces these values with + whatever is specified in ``value``. The ``value`` parameter + should not be ``None`` in this case. You can treat this as a + special case of passing two lists except that you are + specifying the column to search in. + - For a DataFrame nested dictionaries, e.g., + {'a': {'b': np.nan}}, are read as follows: look in column 'a' + for the value 'b' and replace it with NaN. The ``value`` + parameter should be ``None`` to use a nested dict in this + way. You can nest regular expressions as well. Note that column names (the top-level dictionary keys in a nested dictionary) **cannot** be regular expressions. - - Keys map to column names and values map to substitution - values. You can treat this as a special case of passing two - lists except that you are specifying the column to search in. * None: - This means that the ``regex`` argument must be a string, compiled regular expression, or list, dict, ndarray or Series - of such elements. If `value` is also ``None`` then this + of such elements. If ``value`` is also ``None`` then this **must** be a nested dictionary or ``Series``. See the examples section for examples of each of these. value : scalar, dict, list, str, regex, default None - Value to use to fill holes (e.g. 0), alternately a dict of values - specifying which value to use for each column (columns not in the - dict will not be filled). Regular expressions, strings and lists or - dicts of such objects are also allowed. + Value to replace any values matching ``to_replace`` with. + For a DataFrame a dict of values can be used to specify which + value to use for each column (columns not in the dict will not be + filled). Regular expressions, strings and lists or dicts of such + objects are also allowed. inplace : boolean, default False If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. limit : int, default None Maximum size gap to forward or backward fill - regex : bool or same types as `to_replace`, default False - Whether to interpret `to_replace` and/or `value` as regular - expressions. If this is ``True`` then `to_replace` *must* be a - string. Otherwise, `to_replace` must be ``None`` because this - parameter will be interpreted as a regular expression or a list, - dict, or array of regular expressions. + regex : bool or same types as ``to_replace``, default False + Whether to interpret ``to_replace`` and/or ``value`` as regular + expressions. If this is ``True`` then ``to_replace`` *must* be a + string. Alternatively, this could be a regular expression or a + list, dict, or array of regular expressions in which case + ``to_replace`` must be ``None``. method : string, optional, {'pad', 'ffill', 'bfill'} The method to use when for replacement, when ``to_replace`` is a ``list``. See Also -------- - NDFrame.reindex - NDFrame.asfreq - NDFrame.fillna + %(klass)s.fillna : Fill NA/NaN values + %(klass)s.where : Replace values based on boolean condition Returns ------- - filled : NDFrame + filled : %(klass)s Raises ------ AssertionError - * If `regex` is not a ``bool`` and `to_replace` is not ``None``. + * If ``regex`` is not a ``bool`` and ``to_replace`` is not + ``None``. TypeError - * If `to_replace` is a ``dict`` and `value` is not a ``list``, + * If ``to_replace`` is a ``dict`` and ``value`` is not a ``list``, ``dict``, ``ndarray``, or ``Series`` - * If `to_replace` is ``None`` and `regex` is not compilable into a - regular expression or is a list, dict, ndarray, or Series. + * If ``to_replace`` is ``None`` and ``regex`` is not compilable + into a regular expression or is a list, dict, ndarray, or + Series. + * When replacing multiple ``bool`` or ``datetime64`` objects and + the arguments to ``to_replace`` does not match the type of the + value being replaced ValueError - * If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but - they are not the same length. + * If a ``list`` or an ``ndarray`` is passed to ``to_replace`` and + `value` but they are not the same length. Notes ----- @@ -4883,12 +4904,121 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, * Regular expressions will only substitute on strings, meaning you cannot provide, for example, a regular expression matching floating point numbers and expect the columns in your frame that have a - numeric dtype to be matched. However, if those floating point numbers - *are* strings, then you can do this. + numeric dtype to be matched. However, if those floating point + numbers *are* strings, then you can do this. * This method has *a lot* of options. You are encouraged to experiment and play with this method to gain intuition about how it works. - """ + Examples + -------- + + >>> s = pd.Series([0, 1, 2, 3, 4]) + >>> s.replace(0, 5) + 0 5 + 1 1 + 2 2 + 3 3 + 4 4 + dtype: int64 + >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4], + ... 'B': [5, 6, 7, 8, 9], + ... 'C': ['a', 'b', 'c', 'd', 'e']}) + >>> df.replace(0, 5) + A B C + 0 5 5 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + + >>> df.replace([0, 1, 2, 3], 4) + A B C + 0 4 5 a + 1 4 6 b + 2 4 7 c + 3 4 8 d + 4 4 9 e + >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1]) + A B C + 0 4 5 a + 1 3 6 b + 2 2 7 c + 3 1 8 d + 4 4 9 e + >>> s.replace([1, 2], method='bfill') + 0 0 + 1 3 + 2 3 + 3 3 + 4 4 + dtype: int64 + + >>> df.replace({0: 10, 1: 100}) + A B C + 0 10 5 a + 1 100 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + >>> df.replace({'A': 0, 'B': 5}, 100) + A B C + 0 100 100 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + >>> df.replace({'A': {0: 100, 4: 400}}) + A B C + 0 100 5 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 400 9 e + + >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'], + ... 'B': ['abc', 'bar', 'xyz']}) + >>> df.replace(to_replace=r'^ba.$', value='new', regex=True) + A B + 0 new abc + 1 foo new + 2 bait xyz + >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True) + A B + 0 new abc + 1 foo bar + 2 bait xyz + >>> df.replace(regex=r'^ba.$', value='new') + A B + 0 new abc + 1 foo new + 2 bait xyz + >>> df.replace(regex={r'^ba.$':'new', 'foo':'xyz'}) + A B + 0 new abc + 1 xyz new + 2 bait xyz + >>> df.replace(regex=[r'^ba.$', 'foo'], value='new') + A B + 0 new abc + 1 new new + 2 bait xyz + + Note that when replacing multiple ``bool`` or ``datetime64`` objects, + the data types in the ``to_replace`` parameter must match the data + type of the value being replaced: + + >>> df = pd.DataFrame({'A': [True, False, True], + ... 'B': [False, True, False]}) + >>> df.replace({'a string': 'new value', True: False}) # raises + TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' + + This raises a ``TypeError`` because one of the ``dict`` keys is not of + the correct type for replacement. + """) + + @Appender(_shared_docs['replace'] % _shared_doc_kwargs) + def replace(self, to_replace=None, value=None, inplace=False, limit=None, + regex=False, method='pad', axis=None): inplace = validate_bool_kwarg(inplace, 'inplace') if not is_bool(regex) and to_replace is not None: raise AssertionError("'to_replace' must be 'None' if 'regex' is " diff --git a/pandas/core/series.py b/pandas/core/series.py index 78b4c3a70a519..e4b8979d6393a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2671,6 +2671,14 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, limit=limit, downcast=downcast, **kwargs) + @Appender(generic._shared_docs['replace'] % _shared_doc_kwargs) + def replace(self, to_replace=None, value=None, inplace=False, limit=None, + regex=False, method='pad', axis=None): + return super(Series, self).replace(to_replace=to_replace, value=value, + inplace=inplace, limit=limit, + regex=regex, method=method, + axis=axis) + @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0): return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
xref #17673 closes #13852 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` I have tried to separate the docstrings for series and dataframe as suggested in the original issue but can try to combine them if that works better. As noted in the original issue, ``replace`` can do a lot things and I've tried to cover most of them with the examples but would welcome other suggestions.
https://api.github.com/repos/pandas-dev/pandas/pulls/18100
2017-11-04T01:25:32Z
2018-02-04T16:32:53Z
2018-02-04T16:32:52Z
2018-02-08T22:01:14Z
ENH: Implement DataFrame.astype('category')
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index efcc04d688334..3d4bb8ec57794 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -46,9 +46,14 @@ The categorical data type is useful in the following cases: See also the :ref:`API docs on categoricals<api.categorical>`. +.. _categorical.objectcreation: + Object Creation --------------- +Series Creation +~~~~~~~~~~~~~~~ + Categorical ``Series`` or columns in a ``DataFrame`` can be created in several ways: By specifying ``dtype="category"`` when constructing a ``Series``: @@ -77,7 +82,7 @@ discrete bins. See the :ref:`example on tiling <reshaping.tile.cut>` in the docs df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) df.head(10) -By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to a `DataFrame`. +By passing a :class:`pandas.Categorical` object to a ``Series`` or assigning it to a ``DataFrame``. .. ipython:: python @@ -89,6 +94,55 @@ By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to df["B"] = raw_cat df +Categorical data has a specific ``category`` :ref:`dtype <basics.dtypes>`: + +.. ipython:: python + + df.dtypes + +DataFrame Creation +~~~~~~~~~~~~~~~~~~ + +Similar to the previous section where a single column was converted to categorical, all columns in a +``DataFrame`` can be batch converted to categorical either during or after construction. + +This can be done during construction by specifying ``dtype="category"`` in the ``DataFrame`` constructor: + +.. ipython:: python + + df = pd.DataFrame({'A': list('abca'), 'B': list('bccd')}, dtype="category") + df.dtypes + +Note that the categories present in each column differ; the conversion is done column by column, so +only labels present in a given column are categories: + +.. ipython:: python + + df['A'] + df['B'] + + +.. versionadded:: 0.23.0 + +Analogously, all columns in an existing ``DataFrame`` can be batch converted using :meth:`DataFrame.astype`: + +.. ipython:: python + + df = pd.DataFrame({'A': list('abca'), 'B': list('bccd')}) + df_cat = df.astype('category') + df_cat.dtypes + +This conversion is likewise done column by column: + +.. ipython:: python + + df_cat['A'] + df_cat['B'] + + +Controlling Behavior +~~~~~~~~~~~~~~~~~~~~ + In the examples above where we passed ``dtype='category'``, we used the default behavior: @@ -108,21 +162,36 @@ of :class:`~pandas.api.types.CategoricalDtype`. s_cat = s.astype(cat_type) s_cat -Categorical data has a specific ``category`` :ref:`dtype <basics.dtypes>`: +Similarly, a ``CategoricalDtype`` can be used with a ``DataFrame`` to ensure that categories +are consistent among all columns. .. ipython:: python - df.dtypes + df = pd.DataFrame({'A': list('abca'), 'B': list('bccd')}) + cat_type = CategoricalDtype(categories=list('abcd'), + ordered=True) + df_cat = df.astype(cat_type) + df_cat['A'] + df_cat['B'] .. note:: - In contrast to R's `factor` function, categorical data is not converting input values to - strings and categories will end up the same data type as the original values. + To perform table-wise conversion, where all labels in the entire ``DataFrame`` are used as + categories for each column, the ``categories`` parameter can be determined programatically by + ``categories = pd.unique(df.values.ravel())``. -.. note:: +If you already have ``codes`` and ``categories``, you can use the +:func:`~pandas.Categorical.from_codes` constructor to save the factorize step +during normal constructor mode: - In contrast to R's `factor` function, there is currently no way to assign/change labels at - creation time. Use `categories` to change the categories after creation time. +.. ipython:: python + + splitter = np.random.choice([0,1], 5, p=[0.5,0.5]) + s = pd.Series(pd.Categorical.from_codes(splitter, categories=["train", "test"])) + + +Regaining Original Data +~~~~~~~~~~~~~~~~~~~~~~~ To get back to the original ``Series`` or NumPy array, use ``Series.astype(original_dtype)`` or ``np.asarray(categorical)``: @@ -136,14 +205,15 @@ To get back to the original ``Series`` or NumPy array, use s2.astype(str) np.asarray(s2) -If you already have `codes` and `categories`, you can use the -:func:`~pandas.Categorical.from_codes` constructor to save the factorize step -during normal constructor mode: +.. note:: -.. ipython:: python + In contrast to R's `factor` function, categorical data is not converting input values to + strings; categories will end up the same data type as the original values. - splitter = np.random.choice([0,1], 5, p=[0.5,0.5]) - s = pd.Series(pd.Categorical.from_codes(splitter, categories=["train", "test"])) +.. note:: + + In contrast to R's `factor` function, there is currently no way to assign/change labels at + creation time. Use `categories` to change the categories after creation time. .. _categorical.categoricaldtype: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6865428c352c1..d65d1f64e14ba 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -268,6 +268,37 @@ The :func:`DataFrame.assign` now accepts dependent keyword arguments for python df.assign(A=df.A+1, C= lambda df: df.A* -1) + +.. _whatsnew_0230.enhancements.astype_category: + +``DataFrame.astype`` performs column-wise conversion to ``Categorical`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`DataFrame.astype` can now perform column-wise conversion to ``Categorical`` by supplying the string ``'category'`` or +a :class:`~pandas.api.types.CategoricalDtype`. Previously, attempting this would raise a ``NotImplementedError``. See the +:ref:`categorical.objectcreation` section of the documentation for more details and examples. (:issue:`12860`, :issue:`18099`) + +Supplying the string ``'category'`` performs column-wise conversion, with only labels appearing in a given column set as categories: + +.. ipython:: python + + df = pd.DataFrame({'A': list('abca'), 'B': list('bccd')}) + df = df.astype('category') + df['A'].dtype + df['B'].dtype + + +Supplying a ``CategoricalDtype`` will make the categories in each column consistent with the supplied dtype: + +.. ipython:: python + + from pandas.api.types import CategoricalDtype + df = pd.DataFrame({'A': list('abca'), 'B': list('bccd')}) + cdt = CategoricalDtype(categories=list('abcd'), ordered=True) + df = df.astype(cdt) + df['A'].dtype + df['B'].dtype + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e1ed6ae9c8a6c..c4eb7dd7e7a7e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -18,6 +18,7 @@ is_number, is_integer, is_bool, is_bool_dtype, + is_categorical_dtype, is_numeric_dtype, is_datetime64_dtype, is_timedelta64_dtype, @@ -4429,14 +4430,18 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): if col_name not in self: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') - from pandas import concat results = [] for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype[col_name], copy=copy)) else: results.append(results.append(col.copy() if copy else col)) - return concat(results, axis=1, copy=False) + return pd.concat(results, axis=1, copy=False) + + elif is_categorical_dtype(dtype) and self.ndim > 1: + # GH 18099: columnwise conversion to categorical + results = (self[col].astype(dtype, copy=copy) for col in self) + return pd.concat(results, axis=1, copy=False) # else, only a single dtype is given new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index e9e5b2a447a4a..430d43019afc2 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -8,11 +8,11 @@ import numpy as np from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, - compat, concat, option_context) + Categorical, compat, concat, option_context) from pandas.compat import u from pandas import _np_version_under1p14 -from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, assert_frame_equal, @@ -619,12 +619,21 @@ def test_astype_duplicate_col(self): expected = concat([a1_str, b, a2_str], axis=1) assert_frame_equal(result, expected) - @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']]) - def test_categorical_astype_ndim_raises(self, columns): - # GH 18004 - msg = '> 1 ndim Categorical are not supported at this time' - with tm.assert_raises_regex(NotImplementedError, msg): - DataFrame(columns=columns).astype('category') + @pytest.mark.parametrize('dtype', [ + 'category', + CategoricalDtype(), + CategoricalDtype(ordered=True), + CategoricalDtype(ordered=False), + CategoricalDtype(categories=list('abcdef')), + CategoricalDtype(categories=list('edba'), ordered=False), + CategoricalDtype(categories=list('edcb'), ordered=True)], ids=repr) + def test_astype_categorical(self, dtype): + # GH 18099 + d = {'A': list('abbc'), 'B': list('bccd'), 'C': list('cdde')} + df = DataFrame(d) + result = df.astype(dtype) + expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d}) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("cls", [ pd.api.types.CategoricalDtype,
- [X] closes #12860 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18099
2017-11-03T23:13:10Z
2018-03-01T01:07:16Z
2018-03-01T01:07:16Z
2018-03-01T01:51:39Z
fix error messages on import with broken build
diff --git a/pandas/__init__.py b/pandas/__init__.py index 8d9b75ccd6c2c..5fc347f4a5aea 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -28,7 +28,11 @@ tslib as _tslib) except ImportError as e: # pragma: no cover # hack but overkill to use re - module = str(e).replace('cannot import name ', '') + module = str(e).replace('cannot import name ', '')\ + .replace('No module named ', '') + if 'does not export' in module: + # GH#18089 + raise raise ImportError("C extension: {0} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --inplace --force' to build "
There is some disagreement over whether this should close #18089, the canonical answer being "no". This at least makes it slightly less bad.
https://api.github.com/repos/pandas-dev/pandas/pulls/18098
2017-11-03T20:10:30Z
2017-11-03T23:59:00Z
null
2017-12-08T19:40:27Z
TST: Add regression test for empty DataFrame groupby
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 6f022aeff577b..2f750a7621905 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2732,6 +2732,16 @@ def h(df, arg3): assert_series_equal(result, expected) + def test_empty_dataframe_groupby(self): + # GH8093 + df = DataFrame(columns=['A', 'B', 'C']) + + result = df.groupby('A').sum() + expected = DataFrame(columns=['B', 'C'], dtype=np.float64) + expected.index.name = 'A' + + assert_frame_equal(result, expected) + def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values)
- [x] closes #8093 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18097
2017-11-03T18:12:55Z
2017-11-04T14:43:07Z
2017-11-04T14:43:06Z
2017-11-04T14:43:09Z
Conda appveyor master
diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..8e6a4f03750e0 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,13 +18,13 @@ environment: matrix: - - CONDA_ROOT: "C:\\Miniconda3_64" + - CONDA_ROOT: "C:\\Miniconda36_64" PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" CONDA_NPY: "112" - - CONDA_ROOT: "C:\\Miniconda3_64" + - CONDA_ROOT: "C:\\Miniconda36_64" PYTHON_VERSION: "2.7" PYTHON_ARCH: "64" CONDA_PY: "27" @@ -74,18 +74,12 @@ install: # create our env - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist - cmd: activate pandas - - cmd: pip install moto - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run - cmd: echo "installing requirements from %REQ%" - cmd: conda install -n pandas --file=%REQ% - cmd: conda list -n pandas - cmd: echo "installing requirements from %REQ% - done" - # add some pip only reqs to the env - - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.pip - - cmd: echo "installing requirements from %REQ%" - - cmd: pip install -Ur %REQ% - # build em using the local source checkout in the correct windows env - cmd: '%CMD_IN_ENV% python setup.py build_ext --inplace' diff --git a/ci/requirements-2.7_WIN.run b/ci/requirements-2.7_WIN.run index c4ca7fc736bb1..4f638c03b8817 100644 --- a/ci/requirements-2.7_WIN.run +++ b/ci/requirements-2.7_WIN.run @@ -16,3 +16,4 @@ bottleneck html5lib beautifulsoup4 jinja2=2.8 +moto \ No newline at end of file diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index 5d6c074ec1f85..263e20b85a1f6 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -6,7 +6,7 @@ openpyxl xlsxwriter xlrd xlwt -# scipy +scipy feather-format numexpr pytables @@ -14,3 +14,4 @@ matplotlib blosc fastparquet pyarrow +moto \ No newline at end of file
xref https://github.com/conda/conda/issues/6251 Closes https://github.com/pandas-dev/pandas/issues/18073
https://api.github.com/repos/pandas-dev/pandas/pulls/18096
2017-11-03T17:50:29Z
2017-11-04T23:27:35Z
null
2023-05-11T01:16:40Z
BUG: MultiIndex mangling during parsing (#18062)
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index c41da4d67afe5..df7a4cb46b0ec 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -89,6 +89,7 @@ Bug Fixes - Bug in ``pd.read_msgpack()`` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in ``DataFrame.groupby`` where key as tuple in a ``MultiIndex`` were interpreted as a list of keys (:issue:`17979`) +- Bug in :func:`pd.read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) Conversion ^^^^^^^^^^ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1b6414ea974fa..79422ddcaf609 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1106,6 +1106,24 @@ def _is_index_col(col): return col is not None and col is not False +def _is_potential_multi_index(columns): + """ + Check whether or not the `columns` parameter + could be converted into a MultiIndex. + + Parameters + ---------- + columns : array-like + Object which may or may not be convertible into a MultiIndex + + Returns + ------- + boolean : Whether or not columns could become a MultiIndex + """ + return (len(columns) and not isinstance(columns, MultiIndex) and + all([isinstance(c, tuple) for c in columns])) + + def _evaluate_usecols(usecols, names): """ Check whether or not the 'usecols' parameter @@ -1374,6 +1392,7 @@ def _maybe_dedup_names(self, names): if self.mangle_dupe_cols: names = list(names) # so we can index counts = defaultdict(int) + is_potential_mi = _is_potential_multi_index(names) for i, col in enumerate(names): cur_count = counts[col] @@ -1381,7 +1400,10 @@ def _maybe_dedup_names(self, names): while cur_count > 0: counts[col] = cur_count + 1 - col = '%s.%d' % (col, cur_count) + if is_potential_mi: + col = col[:-1] + ('%s.%d' % (col[-1], cur_count),) + else: + col = '%s.%d' % (col, cur_count) cur_count = counts[col] names[i] = col @@ -1391,9 +1413,7 @@ def _maybe_dedup_names(self, names): def _maybe_make_multi_index_columns(self, columns, col_names=None): # possibly create a column mi here - if (not self.tupleize_cols and len(columns) and - not isinstance(columns, MultiIndex) and - all([isinstance(c, tuple) for c in columns])): + if _is_potential_multi_index(columns): columns = MultiIndex.from_tuples(columns, names=col_names) return columns diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index ff3beb70b774f..58dae112c59b7 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -290,3 +290,30 @@ def test_singleton_header(self): df = self.read_csv(StringIO(data), header=[0]) expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]}) tm.assert_frame_equal(df, expected) + + def test_mangles_multi_index(self): + # See GH 18062 + data = """A,A,A,B\none,one,one,two\n0,40,34,0.1""" + df = self.read_csv(StringIO(data), header=[0, 1]) + expected = DataFrame([[0, 40, 34, 0.1]], + columns=MultiIndex.from_tuples( + [('A', 'one'), ('A', 'one.1'), + ('A', 'one.2'), ('B', 'two')])) + tm.assert_frame_equal(df, expected) + + data = """A,A,A,B\none,one,one.1,two\n0,40,34,0.1""" + df = self.read_csv(StringIO(data), header=[0, 1]) + expected = DataFrame([[0, 40, 34, 0.1]], + columns=MultiIndex.from_tuples( + [('A', 'one'), ('A', 'one.1'), + ('A', 'one.1.1'), ('B', 'two')])) + tm.assert_frame_equal(df, expected) + + data = """A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1""" + df = self.read_csv(StringIO(data), header=[0, 1]) + expected = DataFrame([[0, 40, 34, 0.1, 0.1]], + columns=MultiIndex.from_tuples( + [('A', 'one'), ('A', 'one.1'), + ('A', 'one.1.1'), ('B', 'two'), + ('B', 'two.1')])) + tm.assert_frame_equal(df, expected)
- [X] closes #18062 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18094
2017-11-03T16:48:03Z
2017-11-06T17:41:31Z
2017-11-06T17:41:31Z
2017-11-06T22:09:01Z
BUG: Fix the error when reading the compressed UTF-16 file
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 4adafe7c06450..270d81dda6b45 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -76,7 +76,7 @@ I/O ^^^ - Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. - +- Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) Plotting ^^^^^^^^ diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index a5ce6c560d844..85857c158f96e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -374,6 +374,17 @@ cdef class TextReader: float_precision=None, skip_blank_lines=True): + # set encoding for native Python and C library + if encoding is not None: + if not isinstance(encoding, bytes): + encoding = encoding.encode('utf-8') + encoding = encoding.lower() + self.c_encoding = <char*> encoding + else: + self.c_encoding = NULL + + self.encoding = encoding + self.parser = parser_new() self.parser.chunksize = tokenize_chunksize @@ -495,17 +506,6 @@ cdef class TextReader: self.parser.double_converter_nogil = NULL self.parser.double_converter_withgil = round_trip - # encoding - if encoding is not None: - if not isinstance(encoding, bytes): - encoding = encoding.encode('utf-8') - encoding = encoding.lower() - self.c_encoding = <char*> encoding - else: - self.c_encoding = NULL - - self.encoding = encoding - if isinstance(dtype, dict): dtype = {k: pandas_dtype(dtype[k]) for k in dtype} @@ -684,6 +684,14 @@ cdef class TextReader: else: raise ValueError('Unrecognized compression type: %s' % self.compression) + + if b'utf-16' in (self.encoding or b''): + # we need to read utf-16 through UTF8Recoder. + # if source is utf-16, convert source to utf-8 by UTF8Recoder. + source = com.UTF8Recoder(source, self.encoding.decode('utf-8')) + self.encoding = b'utf-8' + self.c_encoding = <char*> self.encoding + self.handle = source if isinstance(source, basestring): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1b6414ea974fa..7f3f5630e49f9 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1671,7 +1671,9 @@ def __init__(self, src, **kwds): ParserBase.__init__(self, kwds) - if 'utf-16' in (kwds.get('encoding') or ''): + if (kwds.get('compression') is None + and 'utf-16' in (kwds.get('encoding') or '')): + # if source is utf-16 plain text, convert source to utf-8 if isinstance(src, compat.string_types): src = open(src, 'rb') self.handles.append(src) diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 797c12139656d..84db9d14eee07 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -7,6 +7,7 @@ import pytest +import pandas as pd import pandas.util.testing as tm @@ -157,6 +158,19 @@ def test_read_csv_infer_compression(self): inputs[3].close() + def test_read_csv_compressed_utf16_example(self): + # GH18071 + path = tm.get_data_path('utf16_ex_small.zip') + + result = self.read_csv(path, encoding='utf-16', + compression='zip', sep='\t') + expected = pd.DataFrame({ + u'Country': [u'Venezuela', u'Venezuela'], + u'Twitter': [u'Hugo Chávez Frías', u'Henrique Capriles R.'] + }) + + tm.assert_frame_equal(result, expected) + def test_invalid_compression(self): msg = 'Unrecognized compression type: sfark' with tm.assert_raises_regex(ValueError, msg): diff --git a/pandas/tests/io/parser/data/utf16_ex_small.zip b/pandas/tests/io/parser/data/utf16_ex_small.zip new file mode 100644 index 0000000000000..b0560c1b1f6c4 Binary files /dev/null and b/pandas/tests/io/parser/data/utf16_ex_small.zip differ
- [X] closes #18071 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18091
2017-11-03T11:42:20Z
2017-11-04T15:19:59Z
2017-11-04T15:19:59Z
2017-12-11T20:25:01Z
COMPAT: compare platform return on 32-bit
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6a5c0ae11abb7..240a7ad4b22f9 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1243,13 +1243,21 @@ def test_groupsort_indexer(): result = libalgos.groupsort_indexer(a, 1000)[0] # need to use a stable sort + # np.argsort returns int, groupsort_indexer + # always returns int64 expected = np.argsort(a, kind='mergesort') + expected = expected.astype(np.int64) + tm.assert_numpy_array_equal(result, expected) # compare with lexsort + # np.lexsort returns int, groupsort_indexer + # always returns int64 key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) + expected = expected.astype(np.int64) + tm.assert_numpy_array_equal(result, expected)
xref #18047
https://api.github.com/repos/pandas-dev/pandas/pulls/18090
2017-11-03T10:29:36Z
2017-11-03T12:48:23Z
2017-11-03T12:48:23Z
2017-12-11T20:25:47Z
TST: Remove even more uses np.array_equal in tests
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 986ba54314192..430f3e12ae32e 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -237,7 +237,7 @@ def test_modulo(self): s = p[0] res = s % p res2 = p % s - assert not np.array_equal(res.fillna(0), res2.fillna(0)) + assert not res.fillna(0).equals(res2.fillna(0)) def test_div(self): @@ -271,7 +271,7 @@ def test_div(self): s = p[0] res = s / p res2 = p / s - assert not np.array_equal(res.fillna(0), res2.fillna(0)) + assert not res.fillna(0).equals(res2.fillna(0)) def test_logical_operators(self): @@ -1030,7 +1030,7 @@ def test_boolean_comparison(self): assert_numpy_array_equal(result, expected.values) pytest.raises(ValueError, lambda: df == b_c) - assert not np.array_equal(df.values, b_c) + assert df.values.shape != b_c.shape # with alignment df = DataFrame(np.arange(6).reshape((3, 2)), diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 2625f4be840c4..ff788fb2347b8 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -694,7 +694,7 @@ def test_modulo(self): p = p.astype('float64') result = p['first'] % p['second'] result2 = p['second'] % p['first'] - assert not np.array_equal(result, result2) + assert not result.equals(result2) # GH 9144 s = Series([0, 1]) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index c8cc80b1cf4b1..0d064983fb546 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -122,7 +122,7 @@ def test_div(self): assert_series_equal(result, p['first'].astype('float64'), check_names=False) assert result.name is None - assert not np.array_equal(result, p['second'] / p['first']) + assert not result.equals(p['second'] / p['first']) # inf signing s = Series([np.nan, 1., -1.])
Implement `assert_not` as a way to check that assertions should fail (for methods more sophisticated than a simple bare `assert`). Also takes the opportunity to remove several more `np.array_equal` assertions. Follow-up to #18047
https://api.github.com/repos/pandas-dev/pandas/pulls/18087
2017-11-03T05:37:40Z
2017-11-16T00:23:52Z
2017-11-16T00:23:52Z
2017-11-16T08:12:40Z
Move normalization funcs up to conversion
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 2fbbc81c4b5a1..e1312a40971f0 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # cython: profile=False cimport numpy as cnp diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index bf22a3a528259..b5285d158b1ed 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -97,9 +97,8 @@ from tslibs.conversion cimport (tz_convert_single, _TSObject, convert_to_tsobject, convert_datetime_to_tsobject, get_datetime64_nanos) -from tslibs.conversion import ( - tz_localize_to_utc, tz_convert, - tz_convert_single) +from tslibs.conversion import (tz_localize_to_utc, + tz_convert_single, date_normalize) from tslibs.nattype import NaT, nat_strings from tslibs.nattype cimport _checknull_with_nat @@ -1849,26 +1848,6 @@ cdef inline _to_i8(object val): return val -cpdef pydt_to_i8(object pydt): - """ - Convert to int64 representation compatible with numpy datetime64; converts - to UTC - """ - cdef: - _TSObject ts - - ts = convert_to_tsobject(pydt, None, None, 0, 0) - - return ts.value - - -def i8_to_pydt(int64_t i8, object tzinfo=None): - """ - Inverse of pydt_to_i8 - """ - return Timestamp(i8) - - # ---------------------------------------------------------------------- # Accessors @@ -1892,130 +1871,6 @@ def get_time_micros(ndarray[int64_t] dtindex): return micros -cdef int64_t DAY_NS = 86400000000000LL - - -@cython.wraparound(False) -@cython.boundscheck(False) -def date_normalize(ndarray[int64_t] stamps, tz=None): - cdef: - Py_ssize_t i, n = len(stamps) - pandas_datetimestruct dts - ndarray[int64_t] result = np.empty(n, dtype=np.int64) - - if tz is not None: - tz = maybe_get_tz(tz) - result = _normalize_local(stamps, tz) - else: - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = _normalized_stamp(&dts) - - return result - - -@cython.wraparound(False) -@cython.boundscheck(False) -cdef _normalize_local(ndarray[int64_t] stamps, object tz): - cdef: - Py_ssize_t n = len(stamps) - ndarray[int64_t] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts - - if is_utc(tz): - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = _normalized_stamp(&dts) - elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - dt64_to_dtstruct(stamps[i] + delta, &dts) - result[i] = _normalized_stamp(&dts) - else: - # Adjust datetime64 timestamp, recompute datetimestruct - trans, deltas, typ = get_dst_info(tz) - - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos - - # statictzinfo - if typ not in ['pytz', 'dateutil']: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) - result[i] = _normalized_stamp(&dts) - else: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - result[i] = _normalized_stamp(&dts) - - return result - -cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil: - dts.hour = 0 - dts.min = 0 - dts.sec = 0 - dts.us = 0 - dts.ps = 0 - return dtstruct_to_dt64(dts) - - -def dates_normalized(ndarray[int64_t] stamps, tz=None): - cdef: - Py_ssize_t i, n = len(stamps) - ndarray[int64_t] trans, deltas - pandas_datetimestruct dts - - if tz is None or is_utc(tz): - for i in range(n): - dt64_to_dtstruct(stamps[i], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: - return False - elif is_tzlocal(tz): - for i in range(n): - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, - dts.sec, dts.us, tz) - dt = dt + tz.utcoffset(dt) - if (dt.hour + dt.minute + dt.second + dt.microsecond) > 0: - return False - else: - trans, deltas, typ = get_dst_info(tz) - - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(stamps[i]) - 1 - inf = tz._transition_info[pos] - - dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: - return False - - return True - - # ---------------------------------------------------------------------- # Some general helper functions diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 843a688a2630c..ad817ce8852f2 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -26,3 +26,5 @@ cdef void _localize_tso(_TSObject obj, object tz) cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2) cdef int64_t get_datetime64_nanos(object val) except? -1 + +cpdef int64_t pydt_to_i8(object pydt) except? -1 diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 61efc865112a9..88372699911c4 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -53,7 +53,6 @@ UTC = pytz.UTC # ---------------------------------------------------------------------- # Misc Helpers - # TODO: How to declare np.datetime64 as the input type? cdef inline int64_t get_datetime64_nanos(object val) except? -1: """ @@ -90,6 +89,27 @@ cdef class _TSObject: return self.value +cpdef int64_t pydt_to_i8(object pydt) except? -1: + """ + Convert to int64 representation compatible with numpy datetime64; converts + to UTC + + Parameters + ---------- + pydt : object + + Returns + ------- + i8value : np.int64 + """ + cdef: + _TSObject ts + + ts = convert_to_tsobject(pydt, None, None, 0, 0) + + return ts.value + + cdef convert_to_tsobject(object ts, object tz, object unit, bint dayfirst, bint yearfirst): """ @@ -334,18 +354,18 @@ cdef inline void _localize_tso(_TSObject obj, object tz): Py_ssize_t delta, posn datetime dt + assert obj.tzinfo is None + if is_utc(tz): - obj.tzinfo = tz + pass + elif obj.value == NPY_NAT: + pass elif is_tzlocal(tz): dt64_to_dtstruct(obj.value, &obj.dts) dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour, obj.dts.min, obj.dts.sec, obj.dts.us, tz) delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - if obj.value != NPY_NAT: - dt64_to_dtstruct(obj.value + delta, &obj.dts) - else: - dt64_to_dtstruct(obj.value, &obj.dts) - obj.tzinfo = tz + dt64_to_dtstruct(obj.value + delta, &obj.dts) else: # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) @@ -355,26 +375,17 @@ cdef inline void _localize_tso(_TSObject obj, object tz): # static/pytz/dateutil specific code if is_fixed_offset(tz): # statictzinfo - if len(deltas) > 0 and obj.value != NPY_NAT: - dt64_to_dtstruct(obj.value + deltas[0], &obj.dts) - else: - dt64_to_dtstruct(obj.value, &obj.dts) - obj.tzinfo = tz + assert len(deltas) == 1, len(deltas) + dt64_to_dtstruct(obj.value + deltas[0], &obj.dts) elif treat_tz_as_pytz(tz): - inf = tz._transition_info[pos] - if obj.value != NPY_NAT: - dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) - else: - dt64_to_dtstruct(obj.value, &obj.dts) - obj.tzinfo = tz._tzinfos[inf] + tz = tz._tzinfos[tz._transition_info[pos]] + dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) elif treat_tz_as_dateutil(tz): - if obj.value != NPY_NAT: - dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) - else: - dt64_to_dtstruct(obj.value, &obj.dts) - obj.tzinfo = tz + dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) else: - obj.tzinfo = tz + pass + + obj.tzinfo = tz cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz): @@ -782,3 +793,183 @@ cdef inline str _render_tstamp(int64_t val): """ Helper function to render exception messages""" from pandas._libs.tslib import Timestamp return str(Timestamp(val)) + + +# ---------------------------------------------------------------------- +# Normalization + +@cython.wraparound(False) +@cython.boundscheck(False) +def date_normalize(ndarray[int64_t] stamps, tz=None): + """ + Normalize each of the (nanosecond) timestamps in the given array by + rounding down to the beginning of the day (i.e. midnight). If `tz` + is not None, then this is midnight for this timezone. + + Parameters + ---------- + stamps : int64 ndarray + tz : tzinfo or None + + Returns + ------- + result : int64 ndarray of converted of normalized nanosecond timestamps + """ + cdef: + Py_ssize_t i, n = len(stamps) + pandas_datetimestruct dts + ndarray[int64_t] result = np.empty(n, dtype=np.int64) + + if tz is not None: + tz = maybe_get_tz(tz) + result = _normalize_local(stamps, tz) + else: + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + result[i] = _normalized_stamp(&dts) + + return result + + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): + """ + Normalize each of the (nanosecond) timestamps in the given array by + rounding down to the beginning of the day (i.e. midnight) for the + given timezone `tz`. + + Parameters + ---------- + stamps : int64 ndarray + tz : tzinfo or None + + Returns + ------- + result : int64 ndarray of converted of normalized nanosecond timestamps + """ + cdef: + Py_ssize_t n = len(stamps) + ndarray[int64_t] result = np.empty(n, dtype=np.int64) + ndarray[int64_t] trans, deltas, pos + pandas_datetimestruct dts + datetime dt + + if is_utc(tz): + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + result[i] = _normalized_stamp(&dts) + elif is_tzlocal(tz): + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + dt = datetime(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, tz) + delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 + dt64_to_dtstruct(stamps[i] + delta, &dts) + result[i] = _normalized_stamp(&dts) + else: + # Adjust datetime64 timestamp, recompute datetimestruct + trans, deltas, typ = get_dst_info(tz) + + _pos = trans.searchsorted(stamps, side='right') - 1 + if _pos.dtype != np.int64: + _pos = _pos.astype(np.int64) + pos = _pos + + # statictzinfo + if typ not in ['pytz', 'dateutil']: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + result[i] = _normalized_stamp(&dts) + else: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) + result[i] = _normalized_stamp(&dts) + + return result + + +cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil: + """ + Normalize the given datetimestruct to midnight, then convert to int64_t. + + Parameters + ---------- + *dts : pointer to pandas_datetimestruct + + Returns + ------- + stamp : int64 + """ + dts.hour = 0 + dts.min = 0 + dts.sec = 0 + dts.us = 0 + dts.ps = 0 + return dtstruct_to_dt64(dts) + + +def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): + """ + Check if all of the given (nanosecond) timestamps are normalized to + midnight, i.e. hour == minute == second == 0. If the optional timezone + `tz` is not None, then this is midnight for this timezone. + + Parameters + ---------- + stamps : int64 ndarray + tz : tzinfo or None + + Returns + ------- + is_normalized : bool True if all stamps are normalized + """ + cdef: + Py_ssize_t i, n = len(stamps) + ndarray[int64_t] trans, deltas + pandas_datetimestruct dts + datetime dt + + if tz is None or is_utc(tz): + for i in range(n): + dt64_to_dtstruct(stamps[i], &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False + elif is_tzlocal(tz): + for i in range(n): + dt64_to_dtstruct(stamps[i], &dts) + dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, + dts.sec, dts.us, tz) + dt = dt + tz.utcoffset(dt) + if (dt.hour + dt.minute + dt.second + dt.microsecond) > 0: + return False + else: + trans, deltas, typ = get_dst_info(tz) + + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + pos = trans.searchsorted(stamps[i]) - 1 + inf = tz._transition_info[pos] + + dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False + + return True diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index c64b6568a0495..2d8ce4c59fedc 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -15,10 +15,10 @@ np.import_array() from util cimport is_string_object, is_integer_object -from pandas._libs.tslib import pydt_to_i8, monthrange +from pandas._libs.tslib import monthrange +from conversion cimport tz_convert_single, pydt_to_i8 from frequencies cimport get_freq_code -from conversion cimport tz_convert_single # --------------------------------------------------------------------- # Constants diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index aa99e8920d9b5..2e022cb151008 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -55,8 +55,7 @@ from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, Timestamp, period as libperiod) -from pandas._libs.tslibs import timezones - +from pandas._libs.tslibs import timezones, conversion # -------- some conversion wrapper functions @@ -384,8 +383,8 @@ def __new__(cls, data=None, getattr(data, 'tz', None) is None): # Convert tz-naive to UTC ints = subarr.view('i8') - subarr = libts.tz_localize_to_utc(ints, tz, - ambiguous=ambiguous) + subarr = conversion.tz_localize_to_utc(ints, tz, + ambiguous=ambiguous) subarr = subarr.view(_NS_DTYPE) subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz) @@ -531,8 +530,8 @@ def _generate(cls, start, end, periods, name, offset, index = _generate_regular_range(start, end, periods, offset) if tz is not None and getattr(index, 'tz', None) is None: - index = libts.tz_localize_to_utc(_ensure_int64(index), tz, - ambiguous=ambiguous) + index = conversion.tz_localize_to_utc(_ensure_int64(index), tz, + ambiguous=ambiguous) index = index.view(_NS_DTYPE) # index is localized datetime64 array -> have to convert @@ -561,11 +560,11 @@ def _convert_for_op(self, value): def _local_timestamps(self): if self.is_monotonic: - return libts.tz_convert(self.asi8, utc, self.tz) + return conversion.tz_convert(self.asi8, utc, self.tz) else: values = self.asi8 indexer = values.argsort() - result = libts.tz_convert(values.take(indexer), utc, self.tz) + result = conversion.tz_convert(values.take(indexer), utc, self.tz) n = len(indexer) reverse = np.empty(n, dtype=np.int_) @@ -1644,7 +1643,7 @@ def normalize(self): ------- normalized : DatetimeIndex """ - new_values = libts.date_normalize(self.asi8, self.tz) + new_values = conversion.date_normalize(self.asi8, self.tz) return DatetimeIndex(new_values, freq='infer', name=self.name, tz=self.tz) @@ -1683,7 +1682,7 @@ def is_normalized(self): """ Returns True if all of the dates are at midnight ("no time") """ - return libts.dates_normalized(self.asi8, self.tz) + return conversion.is_date_array_normalized(self.asi8, self.tz) @cache_readonly def _resolution(self): @@ -1724,7 +1723,7 @@ def insert(self, loc, item): new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)) if self.tz is not None: - new_dates = libts.tz_convert(new_dates, 'UTC', self.tz) + new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) @@ -1764,7 +1763,7 @@ def delete(self, loc): freq = self.freq if self.tz is not None: - new_dates = libts.tz_convert(new_dates, 'UTC', self.tz) + new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) def tz_convert(self, tz): @@ -1844,16 +1843,16 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'): """ if self.tz is not None: if tz is None: - new_dates = libts.tz_convert(self.asi8, 'UTC', self.tz) + new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: tz = timezones.maybe_get_tz(tz) # Convert to UTC - new_dates = libts.tz_localize_to_utc(self.asi8, tz, - ambiguous=ambiguous, - errors=errors) + new_dates = conversion.tz_localize_to_utc(self.asi8, tz, + ambiguous=ambiguous, + errors=errors) new_dates = new_dates.view(_NS_DTYPE) return self._shallow_copy(new_dates, tz=tz) @@ -2194,7 +2193,7 @@ def _to_m8(key, tz=None): # this also converts strings key = Timestamp(key, tz=tz) - return np.int64(libts.pydt_to_i8(key)).view(_NS_DTYPE) + return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) _CACHE_START = Timestamp(datetime(1950, 1, 1)) diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 1d1eeb9da2364..a79fb554f9454 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -16,8 +16,9 @@ import pandas.util.testing as tm from pandas.tseries import offsets, frequencies -from pandas._libs import tslib, period +from pandas._libs import period from pandas._libs.tslibs.timezones import get_timezone +from pandas._libs.tslibs import conversion from pandas.compat import lrange, long, PY3 from pandas.util.testing import assert_series_equal @@ -77,12 +78,12 @@ def test_constructor(self): for result in [Timestamp(date_str), Timestamp(date)]: # only with timestring assert result.value == expected - assert tslib.pydt_to_i8(result) == expected + assert conversion.pydt_to_i8(result) == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.value == expected - assert tslib.pydt_to_i8(result) == expected + assert conversion.pydt_to_i8(result) == expected # with timezone for tz, offset in timezones: @@ -90,18 +91,18 @@ def test_constructor(self): tz=tz)]: expected_tz = expected - offset * 3600 * 1000000000 assert result.value == expected_tz - assert tslib.pydt_to_i8(result) == expected_tz + assert conversion.pydt_to_i8(result) == expected_tz # should preserve tz result = Timestamp(result) assert result.value == expected_tz - assert tslib.pydt_to_i8(result) == expected_tz + assert conversion.pydt_to_i8(result) == expected_tz # should convert to UTC result = Timestamp(result, tz='UTC') expected_utc = expected - offset * 3600 * 1000000000 assert result.value == expected_utc - assert tslib.pydt_to_i8(result) == expected_utc + assert conversion.pydt_to_i8(result) == expected_utc def test_constructor_with_stringoffset(self): # GH 7833 @@ -129,30 +130,30 @@ def test_constructor_with_stringoffset(self): for result in [Timestamp(date_str)]: # only with timestring assert result.value == expected - assert tslib.pydt_to_i8(result) == expected + assert conversion.pydt_to_i8(result) == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.value == expected - assert tslib.pydt_to_i8(result) == expected + assert conversion.pydt_to_i8(result) == expected # with timezone for tz, offset in timezones: result = Timestamp(date_str, tz=tz) expected_tz = expected assert result.value == expected_tz - assert tslib.pydt_to_i8(result) == expected_tz + assert conversion.pydt_to_i8(result) == expected_tz # should preserve tz result = Timestamp(result) assert result.value == expected_tz - assert tslib.pydt_to_i8(result) == expected_tz + assert conversion.pydt_to_i8(result) == expected_tz # should convert to UTC result = Timestamp(result, tz='UTC') expected_utc = expected assert result.value == expected_utc - assert tslib.pydt_to_i8(result) == expected_utc + assert conversion.pydt_to_i8(result) == expected_utc # This should be 2013-11-01 05:00 in UTC # converted to Chicago tz diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 724628649796d..3dfad2d4af75e 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -17,7 +17,7 @@ from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib -from pandas._libs.tslibs import timezones +from pandas._libs.tslibs import timezones, conversion from pandas import (Index, Series, DataFrame, isna, Timestamp, NaT, DatetimeIndex, to_datetime) from pandas.util.testing import (assert_frame_equal, assert_series_equal, @@ -1738,14 +1738,14 @@ class TestTslib(object): def test_tslib_tz_convert(self): def compare_utc_to_local(tz_didx, utc_didx): - f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz) - result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz) + f = lambda x: conversion.tz_convert_single(x, 'UTC', tz_didx.tz) + result = conversion.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz) result_single = np.vectorize(f)(tz_didx.asi8) tm.assert_numpy_array_equal(result, result_single) def compare_local_to_utc(tz_didx, utc_didx): - f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC') - result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC') + f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, 'UTC') + result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC') result_single = np.vectorize(f)(utc_didx.asi8) tm.assert_numpy_array_equal(result, result_single) @@ -1770,14 +1770,14 @@ def compare_local_to_utc(tz_didx, utc_didx): compare_local_to_utc(tz_didx, utc_didx) # Check empty array - result = tslib.tz_convert(np.array([], dtype=np.int64), - timezones.maybe_get_tz('US/Eastern'), - timezones.maybe_get_tz('Asia/Tokyo')) + result = conversion.tz_convert(np.array([], dtype=np.int64), + timezones.maybe_get_tz('US/Eastern'), + timezones.maybe_get_tz('Asia/Tokyo')) tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) # Check all-NaT array - result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64), - timezones.maybe_get_tz('US/Eastern'), - timezones.maybe_get_tz('Asia/Tokyo')) + result = conversion.tz_convert(np.array([tslib.iNaT], dtype=np.int64), + timezones.maybe_get_tz('US/Eastern'), + timezones.maybe_get_tz('Asia/Tokyo')) tm.assert_numpy_array_equal(result, np.array( [tslib.iNaT], dtype=np.int64)) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index be25a439f9075..128dd51a2abea 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -21,6 +21,7 @@ from pandas._libs import lib, tslib from pandas._libs.tslib import Timedelta +from pandas._libs.tslibs import conversion from pandas._libs.tslibs.frequencies import ( # noqa get_freq_code, _base_and_stride, _period_str_to_code, _INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase, @@ -583,7 +584,8 @@ def __init__(self, index, warn=True): # the timezone so they are in local time if hasattr(index, 'tz'): if index.tz is not None: - self.values = tslib.tz_convert(self.values, 'UTC', index.tz) + self.values = conversion.tz_convert(self.values, + 'UTC', index.tz) self.warn = warn
We're going to need `date_normalize` upstream of `tslib` before long, so this moves it to conversion. Simplifies repeated checking in `localize_tso`, closes #17944
https://api.github.com/repos/pandas-dev/pandas/pulls/18086
2017-11-03T03:57:15Z
2017-11-12T21:04:47Z
2017-11-12T21:04:47Z
2017-12-08T19:38:48Z
move implementation of Timedelta to tslibs.timedeltas
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 0456033dbb731..40d970c7b20f2 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -30,6 +30,7 @@ from pandas._libs import tslib from pandas._libs.tslib import Timestamp, iNaT, NaT from tslibs.timezones cimport ( is_utc, is_tzlocal, get_utcoffset, get_dst_info, maybe_get_tz) +from tslibs.timedeltas cimport delta_to_nanoseconds from tslibs.parsing import parse_time_string, NAT_SENTINEL from tslibs.frequencies cimport get_freq_code @@ -716,8 +717,8 @@ cdef class _Period(object): if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)): offset = frequencies.to_offset(self.freq.rule_code) if isinstance(offset, offsets.Tick): - nanos = tslib._delta_to_nanoseconds(other) - offset_nanos = tslib._delta_to_nanoseconds(offset) + nanos = delta_to_nanoseconds(other) + offset_nanos = delta_to_nanoseconds(offset) if nanos % offset_nanos == 0: ordinal = self.ordinal + (nanos // offset_nanos) diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index ec060335c220e..f2edf48a6b829 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -3,7 +3,8 @@ from decimal import Decimal cimport util cimport cython from tslibs.nattype import NaT -from tslib cimport convert_to_tsobject, convert_to_timedelta64 +from tslib cimport convert_to_tsobject +from tslibs.timedeltas cimport convert_to_timedelta64 from tslibs.timezones cimport get_timezone from datetime import datetime, timedelta iNaT = util.get_nat() diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd index 443b3867eb2b5..1c2c679904868 100644 --- a/pandas/_libs/tslib.pxd +++ b/pandas/_libs/tslib.pxd @@ -2,7 +2,6 @@ from numpy cimport ndarray, int64_t from tslibs.conversion cimport convert_to_tsobject -cpdef convert_to_timedelta64(object, object) cdef bint _check_all_nulls(obj) cdef _to_i8(object val) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 08a0ed713d936..6d793b6770113 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -71,8 +71,6 @@ from .tslibs.parsing import parse_datetime_string cimport cython -from pandas.compat import iteritems - import warnings import pytz @@ -85,7 +83,8 @@ import_array() cdef int64_t NPY_NAT = util.get_nat() iNaT = NPY_NAT -from tslibs.timedeltas cimport parse_timedelta_string, cast_from_unit +from tslibs.timedeltas cimport cast_from_unit, delta_to_nanoseconds +from tslibs.timedeltas import Timedelta from tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_dateutil, treat_tz_as_pytz, @@ -1069,7 +1068,7 @@ cdef class _Timestamp(datetime): return Timestamp((self.freq * other).apply(self), freq=self.freq) elif PyDelta_Check(other) or hasattr(other, 'delta'): - nanos = _delta_to_nanoseconds(other) + nanos = delta_to_nanoseconds(other) result = Timestamp(self.value + nanos, tz=self.tzinfo, freq=self.freq) if getattr(other, 'normalize', False): @@ -1789,366 +1788,6 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', return oresult -from tslibs.timedeltas cimport _Timedelta as __Timedelta - -# Similar to Timestamp/datetime, this is a construction requirement for -# timedeltas that we need to do object instantiation in python. This will -# serve as a C extension type that shadows the Python class, where we do any -# heavy lifting. -cdef class _Timedelta(__Timedelta): - - def __hash__(_Timedelta self): - if self._has_ns(): - return hash(self.value) - else: - return timedelta.__hash__(self) - - def __richcmp__(_Timedelta self, object other, int op): - cdef: - _Timedelta ots - int ndim - - if isinstance(other, _Timedelta): - ots = other - elif PyDelta_Check(other): - ots = Timedelta(other) - else: - ndim = getattr(other, _NDIM_STRING, -1) - - if ndim != -1: - if ndim == 0: - if is_timedelta64_object(other): - other = Timedelta(other) - else: - if op == Py_EQ: - return False - elif op == Py_NE: - return True - - # only allow ==, != ops - raise TypeError('Cannot compare type %r with type %r' % - (type(self).__name__, - type(other).__name__)) - if util.is_array(other): - return PyObject_RichCompare(np.array([self]), other, op) - return PyObject_RichCompare(other, self, reverse_ops[op]) - else: - if op == Py_EQ: - return False - elif op == Py_NE: - return True - raise TypeError('Cannot compare type %r with type %r' % - (type(self).__name__, type(other).__name__)) - - return cmp_scalar(self.value, ots.value, op) - - -def _binary_op_method_timedeltalike(op, name): - # define a binary operation that only works if the other argument is - # timedelta like or an array of timedeltalike - def f(self, other): - # an offset - if hasattr(other, 'delta') and not isinstance(other, Timedelta): - return op(self, other.delta) - - # a datetimelike - if (isinstance(other, (datetime, np.datetime64)) - and not (isinstance(other, Timestamp) or other is NaT)): - return op(self, Timestamp(other)) - - # nd-array like - if hasattr(other, 'dtype'): - if other.dtype.kind not in ['m', 'M']: - # raise rathering than letting numpy return wrong answer - return NotImplemented - return op(self.to_timedelta64(), other) - - if not _validate_ops_compat(other): - return NotImplemented - - if other is NaT: - return NaT - - try: - other = Timedelta(other) - except ValueError: - # failed to parse as timedelta - return NotImplemented - - return Timedelta(op(self.value, other.value), unit='ns') - - f.__name__ = name - return f - - -def _op_unary_method(func, name): - - def f(self): - return Timedelta(func(self.value), unit='ns') - f.__name__ = name - return f - - -cdef bint _validate_ops_compat(other): - # return True if we are compat with operating - if _checknull_with_nat(other): - return True - elif PyDelta_Check(other) or is_timedelta64_object(other): - return True - elif util.is_string_object(other): - return True - elif hasattr(other, 'delta'): - return True - return False - - -# Python front end to C extension type _Timedelta -# This serves as the box for timedelta64 - - -class Timedelta(_Timedelta): - """ - Represents a duration, the difference between two dates or times. - - Timedelta is the pandas equivalent of python's ``datetime.timedelta`` - and is interchangable with it in most cases. - - Parameters - ---------- - value : Timedelta, timedelta, np.timedelta64, string, or integer - unit : string, [D,h,m,s,ms,us,ns] - Denote the unit of the input, if input is an integer. Default 'ns'. - days, seconds, microseconds, - milliseconds, minutes, hours, weeks : numeric, optional - Values for construction in compat with datetime.timedelta. - np ints and floats will be coereced to python ints and floats. - - Notes - ----- - The ``.value`` attribute is always in ns. - - """ - - def __new__(cls, object value=_no_input, unit=None, **kwargs): - cdef _Timedelta td_base - - if value is _no_input: - if not len(kwargs): - raise ValueError("cannot construct a Timedelta without a " - "value/unit or descriptive keywords " - "(days,seconds....)") - - def _to_py_int_float(v): - if is_integer_object(v): - return int(v) - elif is_float_object(v): - return float(v) - raise TypeError("Invalid type {0}. Must be int or " - "float.".format(type(v))) - - kwargs = dict([(k, _to_py_int_float(v)) - for k, v in iteritems(kwargs)]) - - try: - nano = kwargs.pop('nanoseconds', 0) - value = convert_to_timedelta64( - timedelta(**kwargs), 'ns') + nano - except TypeError as e: - raise ValueError("cannot construct a Timedelta from the " - "passed arguments, allowed keywords are " - "[weeks, days, hours, minutes, seconds, " - "milliseconds, microseconds, nanoseconds]") - - if isinstance(value, Timedelta): - value = value.value - elif is_string_object(value): - value = np.timedelta64(parse_timedelta_string(value)) - elif PyDelta_Check(value): - value = convert_to_timedelta64(value, 'ns') - elif is_timedelta64_object(value): - if unit is not None: - value = value.astype('timedelta64[{0}]'.format(unit)) - value = value.astype('timedelta64[ns]') - elif hasattr(value, 'delta'): - value = np.timedelta64(_delta_to_nanoseconds(value.delta), 'ns') - elif is_integer_object(value) or is_float_object(value): - # unit=None is de-facto 'ns' - value = convert_to_timedelta64(value, unit) - elif _checknull_with_nat(value): - return NaT - else: - raise ValueError("Value must be Timedelta, string, integer, " - "float, timedelta or convertible") - - if is_timedelta64_object(value): - value = value.view('i8') - - # nat - if value == NPY_NAT: - return NaT - - # make timedelta happy - td_base = _Timedelta.__new__(cls, microseconds=int(value) / 1000) - td_base.value = value - td_base.is_populated = 0 - return td_base - - def _round(self, freq, rounder): - - cdef int64_t result, unit - - from pandas.tseries.frequencies import to_offset - unit = to_offset(freq).nanos - result = unit * rounder(self.value / float(unit)) - return Timedelta(result, unit='ns') - - def round(self, freq): - """ - Round the Timedelta to the specified resolution - - Returns - ------- - a new Timedelta rounded to the given resolution of `freq` - - Parameters - ---------- - freq : a freq string indicating the rounding resolution - - Raises - ------ - ValueError if the freq cannot be converted - """ - return self._round(freq, np.round) - - def floor(self, freq): - """ - return a new Timedelta floored to this resolution - - Parameters - ---------- - freq : a freq string indicating the flooring resolution - """ - return self._round(freq, np.floor) - - def ceil(self, freq): - """ - return a new Timedelta ceiled to this resolution - - Parameters - ---------- - freq : a freq string indicating the ceiling resolution - """ - return self._round(freq, np.ceil) - - def __setstate__(self, state): - (value) = state - self.value = value - - def __reduce__(self): - object_state = self.value, - return (Timedelta, object_state) - - __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__') - __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__') - __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__') - __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__') - - def __mul__(self, other): - - # nd-array like - if hasattr(other, 'dtype'): - return other * self.to_timedelta64() - - if other is NaT: - return NaT - - # only integers and floats allowed - if not (is_integer_object(other) or is_float_object(other)): - return NotImplemented - - return Timedelta(other * self.value, unit='ns') - - __rmul__ = __mul__ - - def __truediv__(self, other): - - if hasattr(other, 'dtype'): - return self.to_timedelta64() / other - - # integers or floats - if is_integer_object(other) or is_float_object(other): - return Timedelta(self.value /other, unit='ns') - - if not _validate_ops_compat(other): - return NotImplemented - - other = Timedelta(other) - if other is NaT: - return np.nan - return self.value /float(other.value) - - def __rtruediv__(self, other): - if hasattr(other, 'dtype'): - return other / self.to_timedelta64() - - if not _validate_ops_compat(other): - return NotImplemented - - other = Timedelta(other) - if other is NaT: - return NaT - return float(other.value) / self.value - - if not PY3: - __div__ = __truediv__ - __rdiv__ = __rtruediv__ - - def __floordiv__(self, other): - - if hasattr(other, 'dtype'): - - # work with i8 - other = other.astype('m8[ns]').astype('i8') - - return self.value // other - - # integers only - if is_integer_object(other): - return Timedelta(self.value // other, unit='ns') - - if not _validate_ops_compat(other): - return NotImplemented - - other = Timedelta(other) - if other is NaT: - return np.nan - return self.value // other.value - - def __rfloordiv__(self, other): - if hasattr(other, 'dtype'): - - # work with i8 - other = other.astype('m8[ns]').astype('i8') - return other // self.value - - if not _validate_ops_compat(other): - return NotImplemented - - other = Timedelta(other) - if other is NaT: - return NaT - return other.value // self.value - - __inv__ = _op_unary_method(lambda x: -x, '__inv__') - __neg__ = _op_unary_method(lambda x: -x, '__neg__') - __pos__ = _op_unary_method(lambda x: x, '__pos__') - __abs__ = _op_unary_method(lambda x: abs(x), '__abs__') - - -# resolution in ns -Timedelta.min = Timedelta(np.iinfo(np.int64).min +1) -Timedelta.max = Timedelta(np.iinfo(np.int64).max) - cdef PyTypeObject* td_type = <PyTypeObject*> Timedelta @@ -2156,122 +1795,9 @@ cdef inline bint is_timedelta(object o): return Py_TYPE(o) == td_type # isinstance(o, Timedelta) -cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'): - """ - Convert an ndarray to an array of timedeltas. If errors == 'coerce', - coerce non-convertible objects to NaT. Otherwise, raise. - """ - - cdef: - Py_ssize_t i, n - ndarray[int64_t] iresult - - if errors not in ('ignore', 'raise', 'coerce'): - raise ValueError("errors must be one of 'ignore', " - "'raise', or 'coerce'}") - - n = values.shape[0] - result = np.empty(n, dtype='m8[ns]') - iresult = result.view('i8') - - # Usually, we have all strings. If so, we hit the fast path. - # If this path fails, we try conversion a different way, and - # this is where all of the error handling will take place. - try: - for i in range(n): - result[i] = parse_timedelta_string(values[i]) - except: - for i in range(n): - try: - result[i] = convert_to_timedelta64(values[i], unit) - except ValueError: - if errors == 'coerce': - result[i] = NPY_NAT - else: - raise - - return iresult - - -cpdef convert_to_timedelta64(object ts, object unit): - """ - Convert an incoming object to a timedelta64 if possible - - Handle these types of objects: - - timedelta/Timedelta - - timedelta64 - - an offset - - np.int64 (with unit providing a possible modifier) - - None/NaT - - Return an ns based int64 - - # kludgy here until we have a timedelta scalar - # handle the numpy < 1.7 case - """ - if _checknull_with_nat(ts): - return np.timedelta64(NPY_NAT) - elif isinstance(ts, Timedelta): - # already in the proper format - ts = np.timedelta64(ts.value) - elif is_datetime64_object(ts): - # only accept a NaT here - if ts.astype('int64') == NPY_NAT: - return np.timedelta64(NPY_NAT) - elif is_timedelta64_object(ts): - ts = ts.astype("m8[{0}]".format(unit.lower())) - elif is_integer_object(ts): - if ts == NPY_NAT: - return np.timedelta64(NPY_NAT) - else: - if util.is_array(ts): - ts = ts.astype('int64').item() - if unit in ['Y', 'M', 'W']: - ts = np.timedelta64(ts, unit) - else: - ts = cast_from_unit(ts, unit) - ts = np.timedelta64(ts) - elif is_float_object(ts): - if util.is_array(ts): - ts = ts.astype('int64').item() - if unit in ['Y', 'M', 'W']: - ts = np.timedelta64(int(ts), unit) - else: - ts = cast_from_unit(ts, unit) - ts = np.timedelta64(ts) - elif is_string_object(ts): - ts = np.timedelta64(parse_timedelta_string(ts)) - elif hasattr(ts, 'delta'): - ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns') - - if PyDelta_Check(ts): - ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns') - elif not is_timedelta64_object(ts): - raise ValueError("Invalid type for timedelta " - "scalar: %s" % type(ts)) - return ts.astype('timedelta64[ns]') - - # ---------------------------------------------------------------------- # Conversion routines -cpdef int64_t _delta_to_nanoseconds(delta) except? -1: - if util.is_array(delta): - return delta.astype('m8[ns]').astype('int64') - if hasattr(delta, 'nanos'): - return delta.nanos - if hasattr(delta, 'delta'): - delta = delta.delta - if is_timedelta64_object(delta): - return delta.astype("timedelta64[ns]").item() - if is_integer_object(delta): - return delta - - return (delta.days * 24 * 60 * 60 * 1000000 + - delta.seconds * 1000000 + - delta.microseconds) * 1000 - - def cast_to_nanoseconds(ndarray arr): cdef: Py_ssize_t i, n = arr.size diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index 4dfd3f3e9eca5..3e7b88b208e89 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -3,19 +3,11 @@ from cpython.datetime cimport timedelta -from numpy cimport int64_t +from numpy cimport int64_t, ndarray # Exposed for tslib, not intended for outside use. cdef parse_timedelta_string(object ts) cpdef int64_t cast_from_unit(object ts, object unit) except? -1 - - -cdef class _Timedelta(timedelta): - cdef readonly: - int64_t value # nanoseconds - object freq # frequency reference - bint is_populated # are my components populated - int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns - - cpdef timedelta to_pytimedelta(_Timedelta self) - cpdef bint _has_ns(self) +cpdef int64_t delta_to_nanoseconds(delta) except? -1 +cpdef convert_to_timedelta64(object ts, object unit) +cpdef array_to_timedelta64(ndarray[object] values, unit=*, errors=*) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2f177868a6947..623babe5422a8 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -5,22 +5,31 @@ import collections import sys cdef bint PY3 = (sys.version_info[0] >= 3) -from cpython cimport PyUnicode_Check +from cython cimport Py_ssize_t + +from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare import numpy as np cimport numpy as np -from numpy cimport int64_t +from numpy cimport int64_t, ndarray np.import_array() from cpython.datetime cimport (datetime, timedelta, - PyDelta_Check, PyDateTime_IMPORT) + PyDateTime_CheckExact, + PyDateTime_Check, PyDelta_Check, + PyDateTime_IMPORT) PyDateTime_IMPORT cimport util -from util cimport is_timedelta64_object +from util cimport (is_timedelta64_object, is_datetime64_object, + is_integer_object, is_float_object, + is_string_object) + +from np_datetime cimport cmp_scalar, reverse_ops -from nattype import nat_strings +from nattype import nat_strings, NaT +from nattype cimport _checknull_with_nat # ---------------------------------------------------------------------- # Constants @@ -66,8 +75,122 @@ cdef dict timedelta_abbrevs = { 'D': 'd', 'nanos': 'ns', 'nanosecond': 'ns'} +_no_input = object() + # ---------------------------------------------------------------------- +cpdef int64_t delta_to_nanoseconds(delta) except? -1: + if util.is_array(delta): + return delta.astype('m8[ns]').astype('int64') + if hasattr(delta, 'nanos'): + return delta.nanos + if hasattr(delta, 'delta'): + delta = delta.delta + if is_timedelta64_object(delta): + return delta.astype("timedelta64[ns]").item() + if is_integer_object(delta): + return delta + + return (delta.days * 24 * 60 * 60 * 1000000 + + delta.seconds * 1000000 + + delta.microseconds) * 1000 + + +cpdef convert_to_timedelta64(object ts, object unit): + """ + Convert an incoming object to a timedelta64 if possible + + Handle these types of objects: + - timedelta/Timedelta + - timedelta64 + - an offset + - np.int64 (with unit providing a possible modifier) + - None/NaT + + Return an ns based int64 + + # kludgy here until we have a timedelta scalar + # handle the numpy < 1.7 case + """ + if _checknull_with_nat(ts): + return np.timedelta64(NPY_NAT) + elif isinstance(ts, Timedelta): + # already in the proper format + ts = np.timedelta64(ts.value) + elif is_datetime64_object(ts): + # only accept a NaT here + if ts.astype('int64') == NPY_NAT: + return np.timedelta64(NPY_NAT) + elif is_timedelta64_object(ts): + ts = ts.astype("m8[{0}]".format(unit.lower())) + elif is_integer_object(ts): + if ts == NPY_NAT: + return np.timedelta64(NPY_NAT) + else: + if util.is_array(ts): + ts = ts.astype('int64').item() + if unit in ['Y', 'M', 'W']: + ts = np.timedelta64(ts, unit) + else: + ts = cast_from_unit(ts, unit) + ts = np.timedelta64(ts) + elif is_float_object(ts): + if util.is_array(ts): + ts = ts.astype('int64').item() + if unit in ['Y', 'M', 'W']: + ts = np.timedelta64(int(ts), unit) + else: + ts = cast_from_unit(ts, unit) + ts = np.timedelta64(ts) + elif is_string_object(ts): + ts = np.timedelta64(parse_timedelta_string(ts)) + elif hasattr(ts, 'delta'): + ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') + + if PyDelta_Check(ts): + ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') + elif not is_timedelta64_object(ts): + raise ValueError("Invalid type for timedelta " + "scalar: %s" % type(ts)) + return ts.astype('timedelta64[ns]') + + +cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'): + """ + Convert an ndarray to an array of timedeltas. If errors == 'coerce', + coerce non-convertible objects to NaT. Otherwise, raise. + """ + + cdef: + Py_ssize_t i, n + ndarray[int64_t] iresult + + if errors not in ('ignore', 'raise', 'coerce'): + raise ValueError("errors must be one of 'ignore', " + "'raise', or 'coerce'}") + + n = values.shape[0] + result = np.empty(n, dtype='m8[ns]') + iresult = result.view('i8') + + # Usually, we have all strings. If so, we hit the fast path. + # If this path fails, we try conversion a different way, and + # this is where all of the error handling will take place. + try: + for i in range(n): + result[i] = parse_timedelta_string(values[i]) + except: + for i in range(n): + try: + result[i] = convert_to_timedelta64(values[i], unit) + except ValueError: + if errors == 'coerce': + result[i] = NPY_NAT + else: + raise + + return iresult + cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: """ return a casting of the unit represented to nanoseconds @@ -315,23 +438,145 @@ cdef inline timedelta_from_spec(object number, object frac, object unit): n = ''.join(number) + '.' + ''.join(frac) return cast_from_unit(float(n), unit) + +# ---------------------------------------------------------------------- +# Timedelta ops utilities + +cdef bint _validate_ops_compat(other): + # return True if we are compat with operating + if _checknull_with_nat(other): + return True + elif PyDelta_Check(other) or is_timedelta64_object(other): + return True + elif is_string_object(other): + return True + elif hasattr(other, 'delta'): + return True + return False + + +def _op_unary_method(func, name): + def f(self): + return Timedelta(func(self.value), unit='ns') + f.__name__ = name + return f + + +def _binary_op_method_timedeltalike(op, name): + # define a binary operation that only works if the other argument is + # timedelta like or an array of timedeltalike + def f(self, other): + if hasattr(other, 'delta') and not PyDelta_Check(other): + # offsets.Tick + return op(self, other.delta) + + elif other is NaT: + return NaT + + elif is_datetime64_object(other) or PyDateTime_CheckExact(other): + # the PyDateTime_CheckExact case is for a datetime object that + # is specifically *not* a Timestamp, as the Timestamp case will be + # handled after `_validate_ops_compat` returns False below + from ..tslib import Timestamp + return op(self, Timestamp(other)) + # We are implicitly requiring the canonical behavior to be + # defined by Timestamp methods. + + elif hasattr(other, 'dtype'): + # nd-array like + if other.dtype.kind not in ['m', 'M']: + # raise rathering than letting numpy return wrong answer + return NotImplemented + return op(self.to_timedelta64(), other) + + elif not _validate_ops_compat(other): + return NotImplemented + + try: + other = Timedelta(other) + except ValueError: + # failed to parse as timedelta + return NotImplemented + + return Timedelta(op(self.value, other.value), unit='ns') + + f.__name__ = name + return f + + # ---------------------------------------------------------------------- # Timedelta Construction +cdef _to_py_int_float(v): + # Note: This used to be defined inside Timedelta.__new__ + # but cython will not allow `cdef` functions to be defined dynamically. + if is_integer_object(v): + return int(v) + elif is_float_object(v): + return float(v) + raise TypeError("Invalid type {0}. Must be int or " + "float.".format(type(v))) + + # Similar to Timestamp/datetime, this is a construction requirement for # timedeltas that we need to do object instantiation in python. This will # serve as a C extension type that shadows the Python class, where we do any # heavy lifting. cdef class _Timedelta(timedelta): - # cdef readonly: - # int64_t value # nanoseconds - # object freq # frequency reference - # bint is_populated # are my components populated - # int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns + cdef readonly: + int64_t value # nanoseconds + object freq # frequency reference + bint is_populated # are my components populated + int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns # higher than np.ndarray and np.matrix __array_priority__ = 100 + def __hash__(_Timedelta self): + if self._has_ns(): + return hash(self.value) + else: + return timedelta.__hash__(self) + + def __richcmp__(_Timedelta self, object other, int op): + cdef: + _Timedelta ots + int ndim + + if isinstance(other, _Timedelta): + ots = other + elif PyDelta_Check(other): + ots = Timedelta(other) + else: + ndim = getattr(other, "ndim", -1) + + if ndim != -1: + if ndim == 0: + if is_timedelta64_object(other): + other = Timedelta(other) + else: + if op == Py_EQ: + return False + elif op == Py_NE: + return True + + # only allow ==, != ops + raise TypeError('Cannot compare type %r with type %r' % + (type(self).__name__, + type(other).__name__)) + if util.is_array(other): + return PyObject_RichCompare(np.array([self]), other, op) + return PyObject_RichCompare(other, self, reverse_ops[op]) + else: + if op == Py_EQ: + return False + elif op == Py_NE: + return True + raise TypeError('Cannot compare type %r with type %r' % + (type(self).__name__, type(other).__name__)) + + return cmp_scalar(self.value, ots.value, op) + cpdef bint _has_ns(self): return self.value % 1000 != 0 @@ -621,3 +866,239 @@ cdef class _Timedelta(timedelta): tpl = 'P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'.format( td=components, seconds=seconds) return tpl + + +# Python front end to C extension type _Timedelta +# This serves as the box for timedelta64 + +class Timedelta(_Timedelta): + """ + Represents a duration, the difference between two dates or times. + + Timedelta is the pandas equivalent of python's ``datetime.timedelta`` + and is interchangable with it in most cases. + + Parameters + ---------- + value : Timedelta, timedelta, np.timedelta64, string, or integer + unit : string, [D,h,m,s,ms,us,ns] + Denote the unit of the input, if input is an integer. Default 'ns'. + days, seconds, microseconds, + milliseconds, minutes, hours, weeks : numeric, optional + Values for construction in compat with datetime.timedelta. + np ints and floats will be coereced to python ints and floats. + + Notes + ----- + The ``.value`` attribute is always in ns. + + """ + def __new__(cls, object value=_no_input, unit=None, **kwargs): + cdef _Timedelta td_base + + if value is _no_input: + if not len(kwargs): + raise ValueError("cannot construct a Timedelta without a " + "value/unit or descriptive keywords " + "(days,seconds....)") + + kwargs = {key: _to_py_int_float(kwargs[key]) for key in kwargs} + + nano = kwargs.pop('nanoseconds', 0) + try: + value = nano + convert_to_timedelta64(timedelta(**kwargs), + 'ns') + except TypeError as e: + raise ValueError("cannot construct a Timedelta from the " + "passed arguments, allowed keywords are " + "[weeks, days, hours, minutes, seconds, " + "milliseconds, microseconds, nanoseconds]") + + if isinstance(value, Timedelta): + value = value.value + elif util.is_string_object(value): + value = np.timedelta64(parse_timedelta_string(value)) + elif PyDelta_Check(value): + value = convert_to_timedelta64(value, 'ns') + elif is_timedelta64_object(value): + if unit is not None: + value = value.astype('timedelta64[{0}]'.format(unit)) + value = value.astype('timedelta64[ns]') + elif hasattr(value, 'delta'): + value = np.timedelta64(delta_to_nanoseconds(value.delta), 'ns') + elif is_integer_object(value) or util.is_float_object(value): + # unit=None is de-facto 'ns' + value = convert_to_timedelta64(value, unit) + elif _checknull_with_nat(value): + return NaT + else: + raise ValueError( + "Value must be Timedelta, string, integer, " + "float, timedelta or convertible") + + if is_timedelta64_object(value): + value = value.view('i8') + + # nat + if value == NPY_NAT: + return NaT + + # make timedelta happy + td_base = _Timedelta.__new__(cls, microseconds=int(value) / 1000) + td_base.value = value + td_base.is_populated = 0 + return td_base + + def __setstate__(self, state): + (value) = state + self.value = value + + def __reduce__(self): + object_state = self.value, + return (Timedelta, object_state) + + def _round(self, freq, rounder): + cdef: + int64_t result, unit + + from pandas.tseries.frequencies import to_offset + unit = to_offset(freq).nanos + result = unit * rounder(self.value / float(unit)) + return Timedelta(result, unit='ns') + + def round(self, freq): + """ + Round the Timedelta to the specified resolution + + Returns + ------- + a new Timedelta rounded to the given resolution of `freq` + + Parameters + ---------- + freq : a freq string indicating the rounding resolution + + Raises + ------ + ValueError if the freq cannot be converted + """ + return self._round(freq, np.round) + + def floor(self, freq): + """ + return a new Timedelta floored to this resolution + + Parameters + ---------- + freq : a freq string indicating the flooring resolution + """ + return self._round(freq, np.floor) + + def ceil(self, freq): + """ + return a new Timedelta ceiled to this resolution + + Parameters + ---------- + freq : a freq string indicating the ceiling resolution + """ + return self._round(freq, np.ceil) + + # ---------------------------------------------------------------- + # Arithmetic Methods + # TODO: Can some of these be defined in the cython class? + + __inv__ = _op_unary_method(lambda x: -x, '__inv__') + __neg__ = _op_unary_method(lambda x: -x, '__neg__') + __pos__ = _op_unary_method(lambda x: x, '__pos__') + __abs__ = _op_unary_method(lambda x: abs(x), '__abs__') + + __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__') + __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__') + __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__') + __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__') + + def __mul__(self, other): + if hasattr(other, 'dtype'): + # ndarray-like + return other * self.to_timedelta64() + + elif other is NaT: + return NaT + + elif not (is_integer_object(other) or is_float_object(other)): + # only integers and floats allowed + return NotImplemented + + return Timedelta(other * self.value, unit='ns') + + __rmul__ = __mul__ + + def __truediv__(self, other): + if hasattr(other, 'dtype'): + return self.to_timedelta64() / other + + elif is_integer_object(other) or is_float_object(other): + # integers or floats + return Timedelta(self.value / other, unit='ns') + + elif not _validate_ops_compat(other): + return NotImplemented + + other = Timedelta(other) + if other is NaT: + return np.nan + return self.value / float(other.value) + + def __rtruediv__(self, other): + if hasattr(other, 'dtype'): + return other / self.to_timedelta64() + + elif not _validate_ops_compat(other): + return NotImplemented + + other = Timedelta(other) + if other is NaT: + return NaT + return float(other.value) / self.value + + if not PY3: + __div__ = __truediv__ + __rdiv__ = __rtruediv__ + + def __floordiv__(self, other): + if hasattr(other, 'dtype'): + # work with i8 + other = other.astype('m8[ns]').astype('i8') + return self.value // other + + elif is_integer_object(other): + # integers only + return Timedelta(self.value // other, unit='ns') + + elif not _validate_ops_compat(other): + return NotImplemented + + other = Timedelta(other) + if other is NaT: + return np.nan + return self.value // other.value + + def __rfloordiv__(self, other): + if hasattr(other, 'dtype'): + # work with i8 + other = other.astype('m8[ns]').astype('i8') + return other // self.value + + elif not _validate_ops_compat(other): + return NotImplemented + + other = Timedelta(other) + if other is NaT: + return NaT + return other.value // self.value + + +# resolution in ns +Timedelta.min = Timedelta(np.iinfo(np.int64).min +1) +Timedelta.max = Timedelta(np.iinfo(np.int64).max) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index ebc0d50d8ba05..4934ccb49b844 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -24,8 +24,9 @@ from pandas.core.common import AbstractMethodError import pandas.io.formats.printing as printing -from pandas._libs import (tslib as libts, lib, iNaT, NaT) +from pandas._libs import lib, iNaT, NaT from pandas._libs.period import Period +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly @@ -701,7 +702,7 @@ def _add_delta_td(self, other): # add a delta of a timedeltalike # return the i8 result view - inc = libts._delta_to_nanoseconds(other) + inc = delta_to_nanoseconds(other) new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view('i8') if self.hasnans: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index c4938b556c8dd..bd069c1d22403 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -36,6 +36,7 @@ get_period_field_arr, _validate_end_alias, _quarter_to_myear) from pandas._libs.tslibs.fields import isleapyear_arr +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs, _ensure_index @@ -652,10 +653,10 @@ def _maybe_convert_timedelta(self, other): offset = frequencies.to_offset(self.freq.rule_code) if isinstance(offset, offsets.Tick): if isinstance(other, np.ndarray): - nanos = np.vectorize(tslib._delta_to_nanoseconds)(other) + nanos = np.vectorize(delta_to_nanoseconds)(other) else: - nanos = tslib._delta_to_nanoseconds(other) - offset_nanos = tslib._delta_to_nanoseconds(offset) + nanos = delta_to_nanoseconds(other) + offset_nanos = delta_to_nanoseconds(offset) check = np.all(nanos % offset_nanos == 0) if check: return nanos // offset_nanos @@ -672,8 +673,8 @@ def _maybe_convert_timedelta(self, other): elif is_timedelta64_dtype(other): offset = frequencies.to_offset(self.freq) if isinstance(offset, offsets.Tick): - nanos = tslib._delta_to_nanoseconds(other) - offset_nanos = tslib._delta_to_nanoseconds(offset) + nanos = delta_to_nanoseconds(other) + offset_nanos = delta_to_nanoseconds(offset) if (nanos % offset_nanos).all() == 0: return nanos // offset_nanos elif is_integer(other): diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 729edc81bb642..c9701d0d8dae8 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -34,6 +34,7 @@ from pandas.tseries.offsets import Tick, DateOffset from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timedelta, NaT, iNaT) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 def _td_index_cmp(opname, nat_result=False): @@ -286,7 +287,7 @@ def _box_func(self): def _simple_new(cls, values, name=None, freq=None, **kwargs): values = np.array(values, copy=False) if values.dtype == np.object_: - values = libts.array_to_timedelta64(values) + values = array_to_timedelta64(values) if values.dtype != _TD_DTYPE: values = _ensure_int64(values).view(_TD_DTYPE) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5a571f9077999..eeb6faf20ffce 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -13,7 +13,7 @@ from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod from pandas.core.indexes.datetimes import DatetimeIndex, date_range from pandas.core.indexes.timedeltas import TimedeltaIndex -from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds +from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds from pandas.core.indexes.period import PeriodIndex import pandas.core.common as com import pandas.core.algorithms as algos @@ -1186,7 +1186,7 @@ def _adjust_bin_edges(self, binner, ax_values): bin_edges = binner.asi8 if self.freq != 'D' and is_superperiod(self.freq, 'D'): - day_nanos = _delta_to_nanoseconds(timedelta(1)) + day_nanos = delta_to_nanoseconds(timedelta(1)) if self.closed == 'right': bin_edges = bin_edges + day_nanos - 1 @@ -1312,7 +1312,7 @@ def _get_range_edges(first, last, offset, closed='left', base=0): if isinstance(offset, Tick): is_day = isinstance(offset, Day) - day_nanos = _delta_to_nanoseconds(timedelta(1)) + day_nanos = delta_to_nanoseconds(timedelta(1)) # #1165 if (is_day and day_nanos % offset.nanos == 0) or not is_day: diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index f61d9f90d6ca2..94e2f2342bd51 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -5,6 +5,8 @@ import numpy as np import pandas as pd import pandas._libs.tslib as tslib +from pandas._libs.tslibs.timedeltas import (convert_to_timedelta64, + array_to_timedelta64) from pandas.core.dtypes.common import ( _ensure_object, @@ -140,7 +142,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): """Convert string 'r' to a timedelta object.""" try: - result = tslib.convert_to_timedelta64(r, unit) + result = convert_to_timedelta64(r, unit) except ValueError: if errors == 'raise': raise @@ -169,8 +171,8 @@ def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): 'timedelta64[ns]', copy=False) else: try: - value = tslib.array_to_timedelta64(_ensure_object(arg), - unit=unit, errors=errors) + value = array_to_timedelta64(_ensure_object(arg), + unit=unit, errors=errors) value = value.astype('timedelta64[ns]', copy=False) except ValueError: if errors == 'ignore': diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 862f289d81954..5843aaa23be57 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -14,7 +14,7 @@ from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta from pandas.util._decorators import cache_readonly -from pandas._libs.tslib import _delta_to_nanoseconds +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, @@ -2569,7 +2569,7 @@ def delta(self): @property def nanos(self): - return _delta_to_nanoseconds(self.delta) + return delta_to_nanoseconds(self.delta) def apply(self, other): # Timestamp can handle tz and nano sec, thus no need to use apply_wraps @@ -2612,7 +2612,7 @@ def _delta_to_tick(delta): else: return Second(seconds) else: - nanos = _delta_to_nanoseconds(delta) + nanos = delta_to_nanoseconds(delta) if nanos % 1000000 == 0: return Milli(nanos // 1000000) elif nanos % 1000 == 0: diff --git a/setup.py b/setup.py index f5c27eb3498c5..572c426f26ae3 100755 --- a/setup.py +++ b/setup.py @@ -525,6 +525,7 @@ def pxd(name): 'pyxfile': '_libs/period', 'pxdfiles': ['_libs/src/util', '_libs/lib', + '_libs/tslibs/timedeltas', '_libs/tslibs/timezones', '_libs/tslibs/nattype'], 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], @@ -587,7 +588,8 @@ def pxd(name): 'sources': np_datetime_sources}, '_libs.tslibs.timedeltas': { 'pyxfile': '_libs/tslibs/timedeltas', - 'pxdfiles': ['_libs/src/util'], + 'pxdfiles': ['_libs/src/util', + '_libs/tslibs/nattype'], 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.timezones': {
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18085
2017-11-03T02:38:27Z
2017-11-08T12:00:26Z
2017-11-08T12:00:26Z
2017-11-10T16:10:18Z
DOC: Fix various warnings
diff --git a/doc/source/api.rst b/doc/source/api.rst index e8b8b3624740d..b5cf593ac0d1f 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1822,7 +1822,7 @@ Interval Properties ~~~~~~~~~~ .. autosummary:: - :toctree generated/ + :toctree: generated/ Interval.closed Interval.closed_left @@ -1843,7 +1843,7 @@ Timedelta Properties ~~~~~~~~~~ .. autosummary:: - :toctree generated/ + :toctree: generated/ Timedelta.asm8 Timedelta.components @@ -1860,7 +1860,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree generated/ + :toctree: generated/ Timedelta.ceil Timedelta.floor diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 0325e54d18e36..2a358900e340d 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -670,6 +670,7 @@ columns of a ``DataFrame``: .. ipython:: python :okexcept: + :okwarning: r.agg({'A' : np.sum, 'B' : lambda x: np.std(x, ddof=1)}) diff --git a/doc/source/conf.py b/doc/source/conf.py index 6eb12324ee461..e006f1809da5a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -358,13 +358,12 @@ # latex_use_modindex = True -# Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'statsmodels': ('http://www.statsmodels.org/devel/', None), 'matplotlib': ('http://matplotlib.org/', None), - 'python': ('http://docs.python.org/3', None), - 'numpy': ('http://docs.scipy.org/doc/numpy', None), - 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), + 'python': ('https://docs.python.org/3/', None), + 'numpy': ('https://docs.scipy.org/doc/numpy/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 'py': ('https://pylib.readthedocs.io/en/latest/', None) } import glob @@ -573,6 +572,15 @@ def remove_flags_docstring(app, what, name, obj, options, lines): if what == "attribute" and name.endswith(".flags"): del lines[:] + +suppress_warnings = [ + # We "overwrite" autosummary with our PandasAutosummary, but + # still want the regular autosummary setup to run. So we just + # suppress this warning. + 'app.add_directive' +] + + def setup(app): app.connect("autodoc-process-docstring", remove_flags_docstring) app.add_autodocumenter(AccessorDocumenter) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 2a1aa3d0cf17a..40189f0e45518 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -60,7 +60,7 @@ Bug reports must: The issue will then show up to the *pandas* community and be open to comments/ideas from others. -.. _contributing.github +.. _contributing.github: Working with the code ===================== diff --git a/doc/source/io.rst b/doc/source/io.rst index 5d6b00a4db72e..36f216601b491 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -115,7 +115,7 @@ header : int or list of ints, default ``'infer'`` names : array-like, default ``None`` List of column names to use. If file contains no header row, then you should explicitly pass ``header=None``. Duplicates in this list will cause - a ``UserWarning`` to be issued. + a ``UserWarning`` to be issued. index_col : int or sequence or ``False``, default ``None`` Column to use as the row labels of the DataFrame. If a sequence is given, a MultiIndex is used. If you have a malformed file with delimiters at the end of diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index feba3d6224e65..b908b60334f4c 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -163,7 +163,7 @@ Other enhancements: p.all() - Added support for ``utcfromtimestamp()``, ``fromtimestamp()``, and ``combine()`` on `Timestamp` class (:issue:`5351`). -- Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here<http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__. +- Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here <http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__. - ``Timedelta`` arithmetic returns ``NotImplemented`` in unknown cases, allowing extensions by custom classes (:issue:`8813`). - ``Timedelta`` now supports arithemtic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`). - Added ``Timedelta.to_timedelta64()`` method to the public API (:issue:`8884`). diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index bc5e278df743f..6093e53029cb6 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -216,6 +216,7 @@ contained the values ``[0, 3]``. **New behavior**: .. ipython:: python + :okwarning: pd.read_csv(StringIO(data), names=names) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 1a7b75266bfdf..fc869956c820e 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1375,6 +1375,7 @@ Convert to a MultiIndex DataFrame Convert to an xarray DataArray .. ipython:: python + :okwarning: p.to_xarray() diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 40d970c7b20f2..72523a19b9595 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -967,7 +967,7 @@ cdef class _Period(object): def strftime(self, fmt): """ Returns the string representation of the :class:`Period`, depending - on the selected :keyword:`format`. :keyword:`format` must be a string + on the selected ``fmt``. ``fmt`` must be a string containing one or several directives. The method recognizes the same directives as the :func:`time.strftime` function of the standard Python distribution, as well as the specific additional directives ``%f``, diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 6d793b6770113..bf22a3a528259 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -237,15 +237,13 @@ _no_input = object() class Timestamp(_Timestamp): - """TimeStamp is the pandas equivalent of python's Datetime + """Pandas replacement for datetime.datetime + + TimeStamp is the pandas equivalent of python's Datetime and is interchangable with it in most cases. It's the type used for the entries that make up a DatetimeIndex, and other timeseries oriented data structures in pandas. - There are essentially three calling conventions for the constructor. The - primary form accepts four parameters. They can be passed by position or - keyword. - Parameters ---------- ts_input : datetime-like, str, int, float @@ -259,22 +257,32 @@ class Timestamp(_Timestamp): offset : str, DateOffset Deprecated, use freq + year, month, day : int + .. versionadded:: 0.19.0 + hour, minute, second, microsecond : int, optional, default 0 + .. versionadded:: 0.19.0 + tzinfo : datetime.tzinfo, optional, default None + .. versionadded:: 0.19.0 + + Notes + ----- + There are essentially three calling conventions for the constructor. The + primary form accepts four parameters. They can be passed by position or + keyword. + The other two forms mimic the parameters from ``datetime.datetime``. They can be passed by either position or keyword, but not both mixed together. - :func:`datetime.datetime` Parameters - ------------------------------------ + Examples + -------- + >>> pd.Timestamp('2017-01-01T12') + Timestamp('2017-01-01 12:00:00') - .. versionadded:: 0.19.0 + >>> pd.Timestamp(2017, 1, 1, 12) + Timestamp('2017-01-01 12:00:00') - year : int - month : int - day : int - hour : int, optional, default is 0 - minute : int, optional, default is 0 - second : int, optional, default is 0 - microsecond : int, optional, default is 0 - tzinfo : datetime.tzinfo, optional, default is None + >>> pd.Timestamp(year=2017, month=1, day=1, hour=12) + Timestamp('2017-01-01 12:00:00') """ @classmethod @@ -592,11 +600,13 @@ class Timestamp(_Timestamp): tz : string, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding local time. + ambiguous : bool, 'NaT', default 'raise' - bool contains flags to determine if time is dst or not (note - that this flag is only applicable for ambiguous fall dst dates) + that this flag is only applicable for ambiguous fall dst dates) - 'NaT' will return NaT for an ambiguous time - 'raise' will raise an AmbiguousTimeError for an ambiguous time + errors : 'raise', 'coerce', default 'raise' - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified timezone (e.g. due to a transition from diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index dedc115501cd0..a5861f5865a39 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -478,11 +478,13 @@ class NaTType(_NaT): tz : string, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding local time. + ambiguous : bool, 'NaT', default 'raise' - bool contains flags to determine if time is dst or not (note - that this flag is only applicable for ambiguous fall dst dates) + that this flag is only applicable for ambiguous fall dst dates) - 'NaT' will return NaT for an ambiguous time - 'raise' will raise an AmbiguousTimeError for an ambiguous time + errors : 'raise', 'coerce', default 'raise' - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified timezone (e.g. due to a transition from diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 623babe5422a8..869ff5ee77bda 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -820,7 +820,7 @@ cdef class _Timedelta(timedelta): def isoformat(self): """ Format Timedelta as ISO 8601 Duration like - `P[n]Y[n]M[n]DT[n]H[n]M[n]S`, where the `[n]`s are replaced by the + ``P[n]Y[n]M[n]DT[n]H[n]M[n]S``, where the ``[n]`` s are replaced by the values. See https://en.wikipedia.org/wiki/ISO_8601#Durations .. versionadded:: 0.20.0 @@ -881,7 +881,7 @@ class Timedelta(_Timedelta): Parameters ---------- value : Timedelta, timedelta, np.timedelta64, string, or integer - unit : string, [D,h,m,s,ms,us,ns] + unit : string, {'ns', 'us', 'ms', 's', 'm', 'h', 'D'}, optional Denote the unit of the input, if input is an integer. Default 'ns'. days, seconds, microseconds, milliseconds, minutes, hours, weeks : numeric, optional diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index a5df6aea055ab..196f4b2679576 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -202,7 +202,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True, you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~pandas.DataFrame.query` method to inject the - :attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns` + ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 33531e80449d8..59578b96807e1 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -392,8 +392,9 @@ def table_schema_cb(key): cf.register_option('sim_interactive', False, tc_sim_interactive_doc) use_inf_as_null_doc = """ -use_inf_as_null had been deprecated and will be removed in a future version. -Use `use_inf_as_na` instead. +: boolean + use_inf_as_null had been deprecated and will be removed in a future + version. Use `use_inf_as_na` instead. """ use_inf_as_na_doc = """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5f5f785111fb4..70f1ff0a5380d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1692,7 +1692,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table escape : boolean, default True - Convert the characters <, >, and & to HTML-safe sequences.= + Convert the characters <, >, and & to HTML-safe sequences. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. @@ -1703,6 +1703,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, Character recognized as decimal separator, e.g. ',' in Europe .. versionadded:: 0.18.0 + border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 48e6f8d4d50d3..f1edfe276dfad 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6278,6 +6278,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): * 0 or 'index': apply truncation to rows * 1 or 'columns': apply truncation to columns + Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels) copy : boolean, default is True, diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 1acc8c3ed0bbb..8db75accc84e5 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -742,8 +742,8 @@ def _cumcount_array(self, ascending=True): ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. - Note - ---- + Notes + ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ @@ -1257,7 +1257,6 @@ def expanding(self, *args, **kwargs): return ExpandingGroupby(self, *args, **kwargs) @Substitution(name='groupby') - @Appender(_doc_template) def pad(self, limit=None): """ Forward fill the values @@ -1269,6 +1268,8 @@ def pad(self, limit=None): See Also -------- + Series.pad + DataFrame.pad Series.fillna DataFrame.fillna """ @@ -1276,7 +1277,6 @@ def pad(self, limit=None): ffill = pad @Substitution(name='groupby') - @Appender(_doc_template) def backfill(self, limit=None): """ Backward fill the values @@ -1288,6 +1288,8 @@ def backfill(self, limit=None): See Also -------- + Series.backfill + DataFrame.backfill Series.fillna DataFrame.fillna """ @@ -1450,7 +1452,6 @@ def nth(self, n, dropna=None): return result @Substitution(name='groupby') - @Appender(_doc_template) def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. @@ -1507,7 +1508,6 @@ def ngroup(self, ascending=True): See also -------- .cumcount : Number the rows in each group. - """ self._set_group_selection() @@ -1519,7 +1519,6 @@ def ngroup(self, ascending=True): return result @Substitution(name='groupby') - @Appender(_doc_template) def cumcount(self, ascending=True): """ Number each item in each group from 0 to the length of that group - 1. diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index bd069c1d22403..a6d5690767c10 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -624,9 +624,9 @@ def to_timestamp(self, freq=None, how='start'): Parameters ---------- - freq : string or DateOffset, default 'D' for week or longer, 'S' - otherwise - Target frequency + freq : string or DateOffset, optional + Target frequency. The default is 'D' for week or longer, + 'S' otherwise how : {'s', 'e', 'start', 'end'} Returns @@ -1039,8 +1039,8 @@ def tz_convert(self, tz): ------- normalized : DatetimeIndex - Note - ---- + Notes + ----- Not currently implemented for PeriodIndex """ raise NotImplementedError("Not yet implemented for PeriodIndex") @@ -1063,8 +1063,8 @@ def tz_localize(self, tz, infer_dst=False): ------- localized : DatetimeIndex - Note - ---- + Notes + ----- Not currently implemented for PeriodIndex """ raise NotImplementedError("Not yet implemented for PeriodIndex") diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ab98b9c4e4f49..35a87fbe7b15b 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -84,12 +84,23 @@ "match-parent", "initial", "unset") justify_docstring = """ - justify : {'left', 'right', 'center', 'justify', - 'justify-all', 'start', 'end', 'inherit', - 'match-parent', 'initial', 'unset'}, default None + justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out - of the box.""" + of the box. Valid values are + + * left + * right + * center + * justify + * justify-all + * start + * end + * inherit + * match-parent + * initial + * unset +""" return_docstring = """ diff --git a/setup.py b/setup.py index 572c426f26ae3..dd24c5c14ee69 100755 --- a/setup.py +++ b/setup.py @@ -225,8 +225,8 @@ def build_extensions(self): of the analysis into a form suitable for plotting or tabular display. pandas is the ideal tool for all of these tasks. -Note ----- +Notes +----- Windows binaries built against NumPy 1.8.1 """
I'm trying to get our doc build cleaned up. Just fixing warnings in this one (mostly formatting, some references). Later on I'll figure out why things are slow on sphinx 1.6 and get rid of our hacked numpydoc / ipython directive.
https://api.github.com/repos/pandas-dev/pandas/pulls/18083
2017-11-02T22:03:22Z
2017-11-09T11:58:38Z
2017-11-09T11:58:38Z
2017-12-08T18:41:28Z
Fix 18068: Updates merge_asof error, now outputs datatypes
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 4adafe7c06450..b7b8240a8d77e 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -102,7 +102,7 @@ Sparse Reshaping ^^^^^^^^^ -- +- Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) - - diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e409090e76944..0234a5563326c 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1253,10 +1253,12 @@ def _get_merge_keys(self): join_names) = super(_AsOfMerge, self)._get_merge_keys() # validate index types are the same - for lk, rk in zip(left_join_keys, right_join_keys): + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): - raise MergeError("incompatible merge keys, " - "must be the same type") + raise MergeError("incompatible merge keys [{i}] {lkdtype} and " + "{rkdtype}, must be the same type" + .format(i=i, lkdtype=lk.dtype, + rkdtype=rk.dtype)) # validate tolerance; must be a Timedelta if we have a DTI if self.tolerance is not None: diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py index 78bfa2ff8597c..4b2680b9be592 100644 --- a/pandas/tests/reshape/test_merge_asof.py +++ b/pandas/tests/reshape/test_merge_asof.py @@ -973,3 +973,15 @@ def test_on_float_by_int(self): columns=['symbol', 'exch', 'price', 'mpv']) assert_frame_equal(result, expected) + + def test_merge_datatype_error(self): + """ Tests merge datatype mismatch error """ + msg = 'merge keys \[0\] object and int64, must be the same type' + + left = pd.DataFrame({'left_val': [1, 5, 10], + 'a': ['a', 'b', 'c']}) + right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7], + 'a': [1, 2, 3, 6, 7]}) + + with tm.assert_raises_regex(MergeError, msg): + merge_asof(left, right, on='a')
- [ ] closes #18068 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18082
2017-11-02T16:32:22Z
2017-11-03T23:03:51Z
2017-11-03T23:03:51Z
2017-11-03T23:03:58Z
follow-up to #18014
diff --git a/pandas/_libs/dtypes/__init__.py b/pandas/_libs/dtypes/__init__.py new file mode 100644 index 0000000000000..40a96afc6ff09 --- /dev/null +++ b/pandas/_libs/dtypes/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/pandas/_libs/dtypes/inference.pxd b/pandas/_libs/dtypes/inference.pxd new file mode 100644 index 0000000000000..8ddccc5560f6b --- /dev/null +++ b/pandas/_libs/dtypes/inference.pxd @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +cdef bint checknull_with_nat(object val) +cdef bint check_all_nulls(object val) diff --git a/pandas/_libs/dtypes/inference.pyx b/pandas/_libs/dtypes/inference.pyx new file mode 100644 index 0000000000000..e60095f0298fa --- /dev/null +++ b/pandas/_libs/dtypes/inference.pyx @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# cython: profile=False + +from cpython cimport PyFloat_Check, PyComplex_Check + +cimport numpy as cnp +cnp.import_array() + +from util cimport is_datetime64_object, is_timedelta64_object + +from datetime cimport get_timedelta64_value, get_datetime64_value + +from pandas._libs.tslibs.nattype cimport NPY_NAT +from pandas._libs.tslibs.nattype import NaT + + +cdef inline bint checknull_with_nat(object val): + """ utility to check if a value is a nat or not """ + return val is None or ( + PyFloat_Check(val) and val != val) or val is NaT + + +cdef inline bint check_all_nulls(object val): + """ utility to check if a value is any type of null """ + cdef: + bint res + if PyFloat_Check(val) or PyComplex_Check(val): + res = val != val + elif val is NaT: + res = 1 + elif val is None: + res = 1 + elif is_datetime64_object(val): + res = get_datetime64_value(val) == NPY_NAT + elif is_timedelta64_object(val): + res = get_timedelta64_value(val) == NPY_NAT + else: + res = 0 + return res diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f882c3d7a7621..530ef7e6f3124 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -52,7 +52,7 @@ PyDateTime_IMPORT from datetime cimport get_timedelta64_value, get_datetime64_value -from tslib cimport _check_all_nulls +from dtypes.inference cimport check_all_nulls import tslib from tslib import NaT, Timestamp, Timedelta import interval @@ -232,7 +232,7 @@ def isnaobj(ndarray arr): result = np.empty(n, dtype=np.uint8) for i from 0 <= i < n: val = arr[i] - result[i] = _check_all_nulls(val) + result[i] = check_all_nulls(val) return result.view(np.bool_) diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd index 443b3867eb2b5..a52b464c2ee74 100644 --- a/pandas/_libs/tslib.pxd +++ b/pandas/_libs/tslib.pxd @@ -3,6 +3,5 @@ from numpy cimport ndarray, int64_t from tslibs.conversion cimport convert_to_tsobject cpdef convert_to_timedelta64(object, object) -cdef bint _check_all_nulls(obj) cdef _to_i8(object val) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 5a4af4550f589..ae58e5ca971c1 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -82,10 +82,6 @@ UTC = pytz.utc # initialize numpy import_array() - -cdef int64_t NPY_NAT = util.get_nat() -iNaT = NPY_NAT - from tslibs.timedeltas cimport parse_timedelta_string, cast_from_unit from tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, @@ -103,8 +99,9 @@ from tslibs.conversion import ( tz_localize_to_utc, tz_convert, tz_convert_single) -from tslibs.nattype import NaT, nat_strings -from tslibs.nattype cimport _checknull_with_nat +from tslibs.nattype import NaT, nat_strings, iNaT +from tslibs.nattype cimport NPY_NAT +from dtypes.inference cimport checknull_with_nat cdef inline object create_timestamp_from_ts( @@ -814,24 +811,6 @@ class Timestamp(_Timestamp): # ---------------------------------------------------------------------- -cdef inline bint _check_all_nulls(object val): - """ utility to check if a value is any type of null """ - cdef bint res - if PyFloat_Check(val) or PyComplex_Check(val): - res = val != val - elif val is NaT: - res = 1 - elif val is None: - res = 1 - elif is_datetime64_object(val): - res = get_datetime64_value(val) == NPY_NAT - elif is_timedelta64_object(val): - res = get_timedelta64_value(val) == NPY_NAT - else: - res = 0 - return res - - cpdef object get_value_box(ndarray arr, object loc): cdef: Py_ssize_t i, sz @@ -1265,7 +1244,7 @@ def datetime_to_datetime64(ndarray[object] values): iresult = result.view('i8') for i in range(n): val = values[i] - if _checknull_with_nat(val): + if checknull_with_nat(val): iresult[i] = NPY_NAT elif PyDateTime_Check(val): if val.tzinfo is not None: @@ -1467,7 +1446,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): for i in range(n): val = values[i] - if _checknull_with_nat(val): + if checknull_with_nat(val): iresult[i] = NPY_NAT elif is_integer_object(val) or is_float_object(val): @@ -1535,7 +1514,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): for i in range(n): val = values[i] - if _checknull_with_nat(val): + if checknull_with_nat(val): oresult[i] = NaT elif is_integer_object(val) or is_float_object(val): @@ -1586,7 +1565,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', for i in range(n): val = values[i] - if _checknull_with_nat(val): + if checknull_with_nat(val): iresult[i] = NPY_NAT elif PyDateTime_Check(val): @@ -1746,7 +1725,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', val = values[i] # set as nan except if its a NaT - if _checknull_with_nat(val): + if checknull_with_nat(val): if PyFloat_Check(val): oresult[i] = np.nan else: @@ -1764,7 +1743,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', for i in range(n): val = values[i] - if _checknull_with_nat(val): + if checknull_with_nat(val): oresult[i] = val elif is_string_object(val): @@ -1892,7 +1871,7 @@ def _op_unary_method(func, name): cdef bint _validate_ops_compat(other): # return True if we are compat with operating - if _checknull_with_nat(other): + if checknull_with_nat(other): return True elif PyDelta_Check(other) or is_timedelta64_object(other): return True @@ -1975,7 +1954,7 @@ class Timedelta(_Timedelta): elif is_integer_object(value) or is_float_object(value): # unit=None is de-facto 'ns' value = convert_to_timedelta64(value, unit) - elif _checknull_with_nat(value): + elif checknull_with_nat(value): return NaT else: raise ValueError("Value must be Timedelta, string, integer, " @@ -2210,7 +2189,7 @@ cpdef convert_to_timedelta64(object ts, object unit): # kludgy here until we have a timedelta scalar # handle the numpy < 1.7 case """ - if _checknull_with_nat(ts): + if checknull_with_nat(ts): return np.timedelta64(NPY_NAT) elif isinstance(ts, Timedelta): # already in the proper format diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd index 7ded36bb1bdc0..27e8a13c3cac2 100644 --- a/pandas/_libs/tslibs/nattype.pxd +++ b/pandas/_libs/tslibs/nattype.pxd @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # cython: profile=False -cdef bint _nat_scalar_rules[6] +from numpy cimport int64_t +cdef int64_t NPY_NAT -cdef bint _checknull_with_nat(object val) +cdef bint _nat_scalar_rules[6] diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index dedc115501cd0..77b55fa6cb808 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -17,15 +17,15 @@ cimport numpy as np from numpy cimport int64_t np.import_array() -from util cimport (get_nat, - is_integer_object, is_float_object, +from util cimport (is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object) # ---------------------------------------------------------------------- # Constants nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) -cdef int64_t NPY_NAT = get_nat() +cdef int64_t NPY_NAT = np.datetime64('NaT').astype(np.int64) +iNaT = NPY_NAT cdef bint _nat_scalar_rules[6] _nat_scalar_rules[Py_EQ] = False @@ -536,11 +536,3 @@ class NaTType(_NaT): NaT = NaTType() - - -# ---------------------------------------------------------------------- - -cdef inline bint _checknull_with_nat(object val): - """ utility to check if a value is a nat or not """ - return val is None or ( - PyFloat_Check(val) and val != val) or val is NaT diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 214d7c0f2b432..8468aae86cc5f 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -36,11 +36,10 @@ from cpython.datetime cimport datetime from np_datetime cimport (check_dts_bounds, dtstruct_to_dt64, pandas_datetimestruct) -from util cimport is_string_object, get_nat +from util cimport is_string_object -cdef int64_t NPY_NAT = get_nat() - -from nattype cimport _checknull_with_nat +from pandas._libs.dtypes.inference cimport checknull_with_nat +from nattype cimport NPY_NAT from nattype import nat_strings @@ -144,7 +143,7 @@ def array_strptime(ndarray[object] values, object fmt, iresult[i] = NPY_NAT continue else: - if _checknull_with_nat(val): + if checknull_with_nat(val): iresult[i] = NPY_NAT continue else: diff --git a/setup.py b/setup.py index 783ded906eba2..79a1f6277a3c2 100755 --- a/setup.py +++ b/setup.py @@ -351,6 +351,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/tslibs/offsets.pyx', 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/parsing.pyx', + 'pandas/_libs/dtypes/inference.pyx', 'pandas/io/sas/sas.pyx'] def initialize_options(self): @@ -549,6 +550,7 @@ def pxd(name): 'pandas/_libs/src/parser/io.c']}, '_libs.sparse': {'pyxfile': '_libs/sparse', 'depends': _pxi_dep['sparse']}, + '_libs.dtypes.inference': {'pyxfile': '_libs/dtypes/inference'}, '_libs.testing': {'pyxfile': '_libs/testing'}, '_libs.hashing': {'pyxfile': '_libs/hashing'}, 'io.sas._sas': {'pyxfile': 'io/sas/sas'}} @@ -681,6 +683,7 @@ def pxd(name): 'pandas.io.clipboard', 'pandas._libs', 'pandas._libs.tslibs', + 'pandas._libs.dtypes', 'pandas.plotting', 'pandas.stats', 'pandas.types',
- de-privatize names - move check_all_nulls to nattype - export NPY_NAT and iNaT from nattype - update imports
https://api.github.com/repos/pandas-dev/pandas/pulls/18081
2017-11-02T15:47:09Z
2017-11-04T00:12:56Z
null
2017-12-08T19:40:28Z
Move comparison utilities to np_datetime;
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index d2492064c900c..8a882a465f9f7 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -55,6 +55,8 @@ from datetime cimport ( from datetime import time as datetime_time from tslibs.np_datetime cimport (check_dts_bounds, + reverse_ops, + cmp_scalar, pandas_datetimestruct, dt64_to_dtstruct, dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64) @@ -893,31 +895,6 @@ def unique_deltas(ndarray[int64_t] arr): return result -cdef inline bint _cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1: - if op == Py_EQ: - return lhs == rhs - elif op == Py_NE: - return lhs != rhs - elif op == Py_LT: - return lhs < rhs - elif op == Py_LE: - return lhs <= rhs - elif op == Py_GT: - return lhs > rhs - elif op == Py_GE: - return lhs >= rhs - - -cdef int _reverse_ops[6] - -_reverse_ops[Py_LT] = Py_GT -_reverse_ops[Py_LE] = Py_GE -_reverse_ops[Py_EQ] = Py_EQ -_reverse_ops[Py_NE] = Py_NE -_reverse_ops[Py_GT] = Py_LT -_reverse_ops[Py_GE] = Py_LE - - cdef str _NDIM_STRING = "ndim" # This is PITA. Because we inherit from datetime, which has very specific @@ -970,7 +947,7 @@ cdef class _Timestamp(datetime): raise TypeError('Cannot compare type %r with type %r' % (type(self).__name__, type(other).__name__)) - return PyObject_RichCompare(other, self, _reverse_ops[op]) + return PyObject_RichCompare(other, self, reverse_ops[op]) else: if op == Py_EQ: return False @@ -980,7 +957,7 @@ cdef class _Timestamp(datetime): (type(self).__name__, type(other).__name__)) self._assert_tzawareness_compat(other) - return _cmp_scalar(self.value, ots.value, op) + return cmp_scalar(self.value, ots.value, op) def __reduce_ex__(self, protocol): # python 3.6 compat @@ -2066,7 +2043,7 @@ cdef class _Timedelta(timedelta): type(other).__name__)) if util.is_array(other): return PyObject_RichCompare(np.array([self]), other, op) - return PyObject_RichCompare(other, self, _reverse_ops[op]) + return PyObject_RichCompare(other, self, reverse_ops[op]) else: if op == Py_EQ: return False @@ -2075,7 +2052,7 @@ cdef class _Timedelta(timedelta): raise TypeError('Cannot compare type %r with type %r' % (type(self).__name__, type(other).__name__)) - return _cmp_scalar(self.value, ots.value, op) + return cmp_scalar(self.value, ots.value, op) def _ensure_components(_Timedelta self): """ diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 0e6eda0c88beb..ab77049a9ff5b 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -12,6 +12,10 @@ cdef extern from "../src/datetime/np_datetime.h": int32_t month, day, hour, min, sec, us, ps, as +cdef int reverse_ops[6] + +cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1 + cdef check_dts_bounds(pandas_datetimestruct *dts) cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 217cde2aad677..1c635e6cecc13 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # cython: profile=False +from cpython cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE + from cpython.datetime cimport (datetime, date, PyDateTime_IMPORT, PyDateTime_GET_YEAR, PyDateTime_GET_MONTH, @@ -47,6 +49,35 @@ cdef extern from "../src/datetime/np_datetime.h": pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS # ---------------------------------------------------------------------- +# Comparison + +cdef int reverse_ops[6] + +reverse_ops[Py_LT] = Py_GT +reverse_ops[Py_LE] = Py_GE +reverse_ops[Py_EQ] = Py_EQ +reverse_ops[Py_NE] = Py_NE +reverse_ops[Py_GT] = Py_LT +reverse_ops[Py_GE] = Py_LE + + +cdef inline bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1: + """ + cmp_scalar is a more performant version of PyObject_RichCompare + typed for int64_t arguments. + """ + if op == Py_EQ: + return lhs == rhs + elif op == Py_NE: + return lhs != rhs + elif op == Py_LT: + return lhs < rhs + elif op == Py_LE: + return lhs <= rhs + elif op == Py_GT: + return lhs > rhs + elif op == Py_GE: + return lhs >= rhs class OutOfBoundsDatetime(ValueError):
we will need them available upstream of tslib De-privatizes the appropriate names. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18080
2017-11-02T15:22:09Z
2017-11-02T23:49:55Z
2017-11-02T23:49:55Z
2017-12-08T19:38:58Z
CI: temp disable scipy on windows 3.6 build
diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index af7a90b126f22..5d6c074ec1f85 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -6,7 +6,7 @@ openpyxl xlsxwriter xlrd xlwt -scipy +# scipy feather-format numexpr pytables
xref #18073
https://api.github.com/repos/pandas-dev/pandas/pulls/18078
2017-11-02T10:16:11Z
2017-11-02T10:48:36Z
2017-11-02T10:48:36Z
2017-11-02T11:07:51Z
DOC: Remove duplicate 'in' from contributing.rst (#18040)
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index e345f79dad5c2..1eb3a52e1b050 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -330,7 +330,7 @@ The utility script ``scripts/api_rst_coverage.py`` can be used to compare the list of methods documented in ``doc/source/api.rst`` (which is used to generate the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) and the actual public methods. -This will identify methods documented in in ``doc/source/api.rst`` that are not actually +This will identify methods documented in ``doc/source/api.rst`` that are not actually class methods, and existing methods that are not documented in ``doc/source/api.rst``.
- [x] closes #18040 (already closed) - [ ] Tests added / passed (N/A) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry (N/A) Removes a duplicate "in" from the second sentence of the last paragraph of the "About the _pandas_ documentation" section of the "Contributing to Pandas" docs.
https://api.github.com/repos/pandas-dev/pandas/pulls/18076
2017-11-02T07:04:36Z
2017-11-02T11:25:19Z
2017-11-02T11:25:19Z
2017-12-08T18:32:29Z
Move scalar arithmetic tests to tests.scalars
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 2f3d567599fa6..bf0217e9bf22a 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -199,25 +199,6 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq == 'D' - def test_overflow_offset(self): - # xref https://github.com/statsmodels/statsmodels/issues/3374 - # ends up multiplying really large numbers which overflow - - t = Timestamp('2017-01-13 00:00:00', freq='D') - offset = 20169940 * pd.offsets.Day(1) - - def f(): - t + offset - pytest.raises(OverflowError, f) - - def f(): - offset + t - pytest.raises(OverflowError, f) - - def f(): - t - offset - pytest.raises(OverflowError, f) - # GH 10699 @pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex], diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 9341cf2202f4c..bbc8dd6577b2c 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -51,44 +51,6 @@ def test_numeric_compat(self): pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) pytest.raises(ValueError, lambda: idx * np.array([1, 2])) - # FIXME: duplicate. This came from `test_timedelta`, whereas the - # version above came from `test_astype`. Make sure there aren't more - # duplicates. - def test_numeric_compat__(self): - - idx = self._holder(np.arange(5, dtype='int64')) - didx = self._holder(np.arange(5, dtype='int64') ** 2) - result = idx * 1 - tm.assert_index_equal(result, idx) - - result = 1 * idx - tm.assert_index_equal(result, idx) - - result = idx / 1 - tm.assert_index_equal(result, idx) - - result = idx // 1 - tm.assert_index_equal(result, idx) - - result = idx * np.array(5, dtype='int64') - tm.assert_index_equal(result, - self._holder(np.arange(5, dtype='int64') * 5)) - - result = idx * np.arange(5, dtype='int64') - tm.assert_index_equal(result, didx) - - result = idx * Series(np.arange(5, dtype='int64')) - tm.assert_index_equal(result, didx) - - result = idx * Series(np.arange(5, dtype='float64') + 0.1) - tm.assert_index_equal(result, self._holder(np.arange( - 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) - - # invalid - pytest.raises(TypeError, lambda: idx * idx) - pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) - pytest.raises(ValueError, lambda: idx * np.array([1, 2])) - def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], @@ -406,47 +368,6 @@ def test_addition_ops(self): expected = Timestamp('20130102') assert result == expected - # TODO: Split by op, better name - def test_ops(self): - td = Timedelta(10, unit='d') - assert -td == Timedelta(-10, unit='d') - assert +td == Timedelta(10, unit='d') - assert td - td == Timedelta(0, unit='ns') - assert (td - pd.NaT) is pd.NaT - assert td + td == Timedelta(20, unit='d') - assert (td + pd.NaT) is pd.NaT - assert td * 2 == Timedelta(20, unit='d') - assert (td * pd.NaT) is pd.NaT - assert td / 2 == Timedelta(5, unit='d') - assert td // 2 == Timedelta(5, unit='d') - assert abs(td) == td - assert abs(-td) == td - assert td / td == 1 - assert (td / pd.NaT) is np.nan - assert (td // pd.NaT) is np.nan - - # invert - assert -td == Timedelta('-10d') - assert td * -1 == Timedelta('-10d') - assert -1 * td == Timedelta('-10d') - assert abs(-td) == Timedelta('10d') - - # invalid multiply with another timedelta - pytest.raises(TypeError, lambda: td * td) - - # can't operate with integers - pytest.raises(TypeError, lambda: td + 2) - pytest.raises(TypeError, lambda: td - 2) - - def test_ops_offsets(self): - td = Timedelta(10, unit='d') - assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1) - assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td - assert 240 == td / pd.offsets.Hour(1) - assert 1 / 240.0 == pd.offsets.Hour(1) / td - assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1) - assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td - def test_ops_ndarray(self): td = Timedelta('1 day') @@ -530,50 +451,6 @@ def test_ops_series_object(self): tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp) tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp) - def test_ops_notimplemented(self): - class Other: - pass - - other = Other() - - td = Timedelta('1 day') - assert td.__add__(other) is NotImplemented - assert td.__sub__(other) is NotImplemented - assert td.__truediv__(other) is NotImplemented - assert td.__mul__(other) is NotImplemented - assert td.__floordiv__(other) is NotImplemented - - def test_timedelta_ops_scalar(self): - # GH 6808 - base = pd.to_datetime('20130101 09:01:12.123456') - expected_add = pd.to_datetime('20130101 09:01:22.123456') - expected_sub = pd.to_datetime('20130101 09:01:02.123456') - - for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10), - np.timedelta64(10, 's'), - np.timedelta64(10000000000, 'ns'), - pd.offsets.Second(10)]: - result = base + offset - assert result == expected_add - - result = base - offset - assert result == expected_sub - - base = pd.to_datetime('20130102 09:01:12.123456') - expected_add = pd.to_datetime('20130103 09:01:22.123456') - expected_sub = pd.to_datetime('20130101 09:01:02.123456') - - for offset in [pd.to_timedelta('1 day, 00:00:10'), - pd.to_timedelta('1 days, 00:00:10'), - timedelta(days=1, seconds=10), - np.timedelta64(1, 'D') + np.timedelta64(10, 's'), - pd.offsets.Day() + pd.offsets.Second(10)]: - result = base + offset - assert result == expected_add - - result = base - offset - assert result == expected_sub - def test_timedelta_ops_with_missing_values(self): # setup s1 = pd.to_timedelta(Series(['00:00:01'])) diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index d4434b3af385b..17c818779c76d 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -40,6 +40,91 @@ def test_to_timedelta_on_nanoseconds(self): pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc')) + def test_ops_notimplemented(self): + class Other: + pass + + other = Other() + + td = Timedelta('1 day') + assert td.__add__(other) is NotImplemented + assert td.__sub__(other) is NotImplemented + assert td.__truediv__(other) is NotImplemented + assert td.__mul__(other) is NotImplemented + assert td.__floordiv__(other) is NotImplemented + + def test_timedelta_ops_scalar(self): + # GH 6808 + base = pd.to_datetime('20130101 09:01:12.123456') + expected_add = pd.to_datetime('20130101 09:01:22.123456') + expected_sub = pd.to_datetime('20130101 09:01:02.123456') + + for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10), + np.timedelta64(10, 's'), + np.timedelta64(10000000000, 'ns'), + pd.offsets.Second(10)]: + result = base + offset + assert result == expected_add + + result = base - offset + assert result == expected_sub + + base = pd.to_datetime('20130102 09:01:12.123456') + expected_add = pd.to_datetime('20130103 09:01:22.123456') + expected_sub = pd.to_datetime('20130101 09:01:02.123456') + + for offset in [pd.to_timedelta('1 day, 00:00:10'), + pd.to_timedelta('1 days, 00:00:10'), + timedelta(days=1, seconds=10), + np.timedelta64(1, 'D') + np.timedelta64(10, 's'), + pd.offsets.Day() + pd.offsets.Second(10)]: + result = base + offset + assert result == expected_add + + result = base - offset + assert result == expected_sub + + def test_ops_offsets(self): + td = Timedelta(10, unit='d') + assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1) + assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td + assert 240 == td / pd.offsets.Hour(1) + assert 1 / 240.0 == pd.offsets.Hour(1) / td + assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1) + assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td + + # TODO: Split by op, better name + def test_ops(self): + td = Timedelta(10, unit='d') + assert -td == Timedelta(-10, unit='d') + assert +td == Timedelta(10, unit='d') + assert td - td == Timedelta(0, unit='ns') + assert (td - pd.NaT) is pd.NaT + assert td + td == Timedelta(20, unit='d') + assert (td + pd.NaT) is pd.NaT + assert td * 2 == Timedelta(20, unit='d') + assert (td * pd.NaT) is pd.NaT + assert td / 2 == Timedelta(5, unit='d') + assert td // 2 == Timedelta(5, unit='d') + assert abs(td) == td + assert abs(-td) == td + assert td / td == 1 + assert (td / pd.NaT) is np.nan + assert (td // pd.NaT) is np.nan + + # invert + assert -td == Timedelta('-10d') + assert td * -1 == Timedelta('-10d') + assert -1 * td == Timedelta('-10d') + assert abs(-td) == Timedelta('10d') + + # invalid multiply with another timedelta + pytest.raises(TypeError, lambda: td * td) + + # can't operate with integers + pytest.raises(TypeError, lambda: td + 2) + pytest.raises(TypeError, lambda: td - 2) + class TestTimedeltas(object): _multiprocess_can_split_ = True diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index efee096797510..4cd9a2fadeb32 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -28,6 +28,24 @@ RESO_MS, RESO_SEC) +class TestTimestampArithmetic(object): + def test_overflow_offset(self): + # xref https://github.com/statsmodels/statsmodels/issues/3374 + # ends up multiplying really large numbers which overflow + + stamp = Timestamp('2017-01-13 00:00:00', freq='D') + offset = 20169940 * offsets.Day(1) + + with pytest.raises(OverflowError): + stamp + offset + + with pytest.raises(OverflowError): + offset + stamp + + with pytest.raises(OverflowError): + stamp - offset + + class TestTimestamp(object): def test_constructor(self):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18075
2017-11-02T04:58:11Z
2017-11-02T11:56:16Z
2017-11-02T11:56:16Z
2017-11-02T14:54:19Z
Index tests in the wrong places
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 46be24b90faae..0197fc4c52617 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -117,6 +117,15 @@ def test_astype_datetime64(self): dtype='datetime64[ns]') tm.assert_index_equal(result, expected) + def test_astype_object(self): + rng = date_range('1/1/2000', periods=20) + + casted = rng.astype('O') + exp_values = list(rng) + + tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) + assert casted.tolist() == exp_values + def test_astype_raises(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) @@ -287,12 +296,18 @@ def test_to_period_tz_dateutil(self): assert result == expected tm.assert_index_equal(ts.to_period(), xp) - def test_astype_object(self): - # NumPy 1.6.1 weak ns support - rng = date_range('1/1/2000', periods=20) - - casted = rng.astype('O') - exp_values = list(rng) - - tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) - assert casted.tolist() == exp_values + def test_to_period_nofreq(self): + idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) + pytest.raises(ValueError, idx.to_period) + + idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], + freq='infer') + assert idx.freqstr == 'D' + expected = pd.PeriodIndex(['2000-01-01', '2000-01-02', + '2000-01-03'], freq='D') + tm.assert_index_equal(idx.to_period(), expected) + + # GH 7606 + idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) + assert idx.freqstr is None + tm.assert_index_equal(idx.to_period(), expected) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 88bf8a4024112..cc6eeb44c99c9 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -8,10 +8,10 @@ import pandas.util.testing as tm from pandas.compat import lrange from pandas.compat.numpy import np_datetime64_compat -from pandas import (DatetimeIndex, Index, date_range, Series, DataFrame, +from pandas import (DatetimeIndex, Index, date_range, DataFrame, Timestamp, datetime, offsets) -from pandas.util.testing import assert_series_equal, assert_almost_equal +from pandas.util.testing import assert_almost_equal randn = np.random.randn @@ -223,22 +223,6 @@ def test_append_join_nondatetimeindex(self): # it works rng.join(idx, how='outer') - def test_to_period_nofreq(self): - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) - pytest.raises(ValueError, idx.to_period) - - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], - freq='infer') - assert idx.freqstr == 'D' - expected = pd.PeriodIndex(['2000-01-01', '2000-01-02', - '2000-01-03'], freq='D') - tm.assert_index_equal(idx.to_period(), expected) - - # GH 7606 - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) - assert idx.freqstr is None - tm.assert_index_equal(idx.to_period(), expected) - def test_comparisons_coverage(self): rng = date_range('1/1/2000', periods=10) @@ -567,13 +551,6 @@ def test_does_not_convert_mixed_integer(self): assert cols.dtype == joined.dtype tm.assert_numpy_array_equal(cols.values, joined.values) - def test_slice_keeps_name(self): - # GH4226 - st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') - et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') - dr = pd.date_range(st, et, freq='H', name='timebucket') - assert dr[1:].name == dr.name - def test_join_self(self): index = date_range('1/1/2000', periods=10) kinds = 'outer', 'inner', 'left', 'right' @@ -687,59 +664,3 @@ def test_factorize_dst(self): arr, res = obj.factorize() tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) tm.assert_index_equal(res, idx) - - def test_slice_with_negative_step(self): - ts = Series(np.arange(20), - date_range('2014-01-01', periods=20, freq='MS')) - SLC = pd.IndexSlice - - def assert_slices_equivalent(l_slc, i_slc): - assert_series_equal(ts[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) - - assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1]) - assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1]) - - assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1]) - assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1]) - - assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1], - SLC[13:8:-1]) - assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp( - '2014-10-01'):-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1], - SLC[13:8:-1]) - assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1], - SLC[13:8:-1]) - - assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0]) - - def test_slice_with_zero_step_raises(self): - ts = Series(np.arange(20), - date_range('2014-01-01', periods=20, freq='MS')) - tm.assert_raises_regex(ValueError, 'slice step cannot be zero', - lambda: ts[::0]) - tm.assert_raises_regex(ValueError, 'slice step cannot be zero', - lambda: ts.loc[::0]) - tm.assert_raises_regex(ValueError, 'slice step cannot be zero', - lambda: ts.loc[::0]) - - def test_slice_bounds_empty(self): - # GH 14354 - empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015') - - right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc') - exp = Timestamp('2015-01-02 23:59:59.999999999') - assert right == exp - - left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc') - exp = Timestamp('2015-01-02 00:00:00') - assert left == exp - - def test_slice_duplicate_monotonic(self): - # https://github.com/pandas-dev/pandas/issues/16515 - idx = pd.DatetimeIndex(['2017', '2017']) - result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc') - expected = Timestamp('2017-01-01') - assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 14217ae291a4c..6e66e4a36f905 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -383,49 +383,6 @@ def test_resolution(self): tz=tz) assert idx.resolution == expected - def test_union(self): - for tz in self.tz: - # union - rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) - expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) - - rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) - expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) - - rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other3 = pd.DatetimeIndex([], tz=tz) - expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - - for rng, other, expected in [(rng1, other1, expected1), - (rng2, other2, expected2), - (rng3, other3, expected3)]: - - result_union = rng.union(other) - tm.assert_index_equal(result_union, expected) - - def test_difference(self): - for tz in self.tz: - # diff - rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) - expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - - rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) - expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) - - rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - other3 = pd.DatetimeIndex([], tz=tz) - expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - - for rng, other, expected in [(rng1, other1, expected1), - (rng2, other2, expected2), - (rng3, other3, expected3)]: - result_diff = rng.difference(other) - tm.assert_index_equal(result_diff, expected) - def test_comp_nat(self): left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index e7d03aa193cbd..50ee88bd82f40 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -12,6 +12,68 @@ class TestSlicing(object): + def test_slice_keeps_name(self): + # GH4226 + st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') + et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') + dr = pd.date_range(st, et, freq='H', name='timebucket') + assert dr[1:].name == dr.name + + def test_slice_with_negative_step(self): + ts = Series(np.arange(20), + date_range('2014-01-01', periods=20, freq='MS')) + SLC = pd.IndexSlice + + def assert_slices_equivalent(l_slc, i_slc): + tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc]) + tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) + tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) + + assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1]) + assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1]) + + assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1]) + assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1]) + + assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1], + SLC[13:8:-1]) + assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp( + '2014-10-01'):-1], SLC[13:8:-1]) + assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1], + SLC[13:8:-1]) + assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1], + SLC[13:8:-1]) + + assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0]) + + def test_slice_with_zero_step_raises(self): + ts = Series(np.arange(20), + date_range('2014-01-01', periods=20, freq='MS')) + tm.assert_raises_regex(ValueError, 'slice step cannot be zero', + lambda: ts[::0]) + tm.assert_raises_regex(ValueError, 'slice step cannot be zero', + lambda: ts.loc[::0]) + tm.assert_raises_regex(ValueError, 'slice step cannot be zero', + lambda: ts.loc[::0]) + + def test_slice_bounds_empty(self): + # GH 14354 + empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015') + + right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc') + exp = Timestamp('2015-01-02 23:59:59.999999999') + assert right == exp + + left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc') + exp = Timestamp('2015-01-02 00:00:00') + assert left == exp + + def test_slice_duplicate_monotonic(self): + # https://github.com/pandas-dev/pandas/issues/16515 + idx = pd.DatetimeIndex(['2017', '2017']) + result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc') + expected = Timestamp('2017-01-01') + assert result == expected def test_slice_year(self): dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index ff436e0501849..5df75338d01d7 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -1,5 +1,6 @@ from datetime import datetime +import pytest import numpy as np import pandas as pd @@ -11,14 +12,30 @@ START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) -class TestDatetimeIndex(object): +class TestDatetimeIndexSetOps(object): + tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', + 'dateutil/US/Pacific'] - def test_union(self): - i1 = Int64Index(np.arange(0, 20, 2)) - i2 = Int64Index(np.arange(10, 30, 2)) - result = i1.union(i2) - expected = Int64Index(np.arange(0, 30, 2)) - tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("tz", tz) + def test_union(self, tz): + rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) + expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) + + rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) + expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) + + rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other3 = pd.DatetimeIndex([], tz=tz) + expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + for rng, other, expected in [(rng1, other1, expected1), + (rng2, other2, expected2), + (rng3, other3, expected3)]: + + result_union = rng.union(other) + tm.assert_index_equal(result_union, expected) def test_union_coverage(self): idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) @@ -83,62 +100,62 @@ def test_union_with_DatetimeIndex(self): i1.union(i2) # Works i2.union(i1) # Fails with "AttributeError: can't set attribute" - def test_intersection(self): + @pytest.mark.parametrize("tz", [None, 'Asia/Tokyo', 'US/Eastern', + 'dateutil/US/Pacific']) + def test_intersection(self, tz): # GH 4690 (with tz) - for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']: - base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') - - # if target has the same name, it is preserved - rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') - expected2 = date_range('6/1/2000', '6/20/2000', freq='D', - name='idx') - - # if target name is different, it will be reset - rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other') - expected3 = date_range('6/1/2000', '6/20/2000', freq='D', - name=None) - - rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx') - expected4 = DatetimeIndex([], name='idx') - - for (rng, expected) in [(rng2, expected2), (rng3, expected3), - (rng4, expected4)]: - result = base.intersection(rng) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz - - # non-monotonic - base = DatetimeIndex(['2011-01-05', '2011-01-04', - '2011-01-02', '2011-01-03'], - tz=tz, name='idx') - - rng2 = DatetimeIndex(['2011-01-04', '2011-01-02', - '2011-02-02', '2011-02-03'], - tz=tz, name='idx') - expected2 = DatetimeIndex( - ['2011-01-04', '2011-01-02'], tz=tz, name='idx') - - rng3 = DatetimeIndex(['2011-01-04', '2011-01-02', - '2011-02-02', '2011-02-03'], - tz=tz, name='other') - expected3 = DatetimeIndex( - ['2011-01-04', '2011-01-02'], tz=tz, name=None) - - # GH 7880 - rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz, - name='idx') - expected4 = DatetimeIndex([], tz=tz, name='idx') - - for (rng, expected) in [(rng2, expected2), (rng3, expected3), - (rng4, expected4)]: - result = base.intersection(rng) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq is None - assert result.tz == expected.tz - + base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') + + # if target has the same name, it is preserved + rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') + expected2 = date_range('6/1/2000', '6/20/2000', freq='D', name='idx') + + # if target name is different, it will be reset + rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other') + expected3 = date_range('6/1/2000', '6/20/2000', freq='D', name=None) + + rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx') + expected4 = DatetimeIndex([], name='idx') + + for (rng, expected) in [(rng2, expected2), (rng3, expected3), + (rng4, expected4)]: + result = base.intersection(rng) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + # non-monotonic + base = DatetimeIndex(['2011-01-05', '2011-01-04', + '2011-01-02', '2011-01-03'], + tz=tz, name='idx') + + rng2 = DatetimeIndex(['2011-01-04', '2011-01-02', + '2011-02-02', '2011-02-03'], + tz=tz, name='idx') + expected2 = DatetimeIndex(['2011-01-04', '2011-01-02'], + tz=tz, name='idx') + + rng3 = DatetimeIndex(['2011-01-04', '2011-01-02', + '2011-02-02', '2011-02-03'], + tz=tz, name='other') + expected3 = DatetimeIndex(['2011-01-04', '2011-01-02'], + tz=tz, name=None) + + # GH 7880 + rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz, + name='idx') + expected4 = DatetimeIndex([], tz=tz, name='idx') + + for (rng, expected) in [(rng2, expected2), (rng3, expected3), + (rng4, expected4)]: + result = base.intersection(rng) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq is None + assert result.tz == expected.tz + + def test_intersection_empty(self): # empty same freq GH2129 rng = date_range('6/1/2000', '6/15/2000', freq='T') result = rng[0:0].intersection(rng) @@ -155,6 +172,26 @@ def test_intersection_bug_1708(self): result = index_1 & index_2 assert len(result) == 0 + @pytest.mark.parametrize("tz", tz) + def test_difference(self, tz): + rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) + expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) + expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) + + rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other3 = pd.DatetimeIndex([], tz=tz) + expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + for rng, other, expected in [(rng1, other1, expected1), + (rng2, other2, expected2), + (rng3, other3, expected3)]: + result_diff = rng.difference(other) + tm.assert_index_equal(result_diff, expected) + def test_difference_freq(self): # GH14323: difference of DatetimeIndex should not preserve frequency diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py index 8e5eae2a7a3ef..7c5f82193da6d 100644 --- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py +++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py @@ -9,6 +9,10 @@ class TestSlicing(object): + def test_slice_keeps_name(self): + # GH4226 + dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket') + assert dr[1:].name == dr.name def test_partial_slice(self): rng = timedelta_range('1 day 10:11:12', freq='h', periods=500) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 2683110f2f02e..615c0d0ffa210 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -233,12 +233,6 @@ def test_join_self(self): joined = index.join(index, how=kind) tm.assert_index_equal(index, joined) - def test_slice_keeps_name(self): - - # GH4226 - dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket') - assert dr[1:].name == dr.name - def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: randn(),
https://api.github.com/repos/pandas-dev/pandas/pulls/18074
2017-11-02T04:48:38Z
2017-11-02T22:49:23Z
2017-11-02T22:49:23Z
2017-12-08T19:40:30Z