after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is tslib.NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = i8 + other.value
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
|
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
other = Timestamp(other)
i8 = self.asi8
result = i8 + other.value
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
|
https://github.com/pandas-dev/pandas/issues/11718
|
In [1]: import pandas as pd
In [2]: pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
Traceback (most recent call last):
File "<ipython-input-2-5e0738cec5fa>", line 1, in <module>
pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
File "pandas\tslib.pyx", line 1099, in pandas.tslib._NaT.__sub__ (pandas\tslib.c:21618)
File "pandas\tslib.pyx", line 1026, in pandas.tslib._Timestamp.__sub__ (pandas\tslib.c:20036)
TypeError: Timestamp subtraction must have the same timezones or no timezones
|
TypeError
|
def _sub_datelike(self, other):
from pandas import DatetimeIndex
if other is tslib.NaT:
result = self._nat_new(box=False)
else:
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
return DatetimeIndex(result, name=self.name, copy=False)
|
def _sub_datelike(self, other):
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
|
https://github.com/pandas-dev/pandas/issues/11718
|
In [1]: import pandas as pd
In [2]: pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
Traceback (most recent call last):
File "<ipython-input-2-5e0738cec5fa>", line 1, in <module>
pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
File "pandas\tslib.pyx", line 1099, in pandas.tslib._NaT.__sub__ (pandas\tslib.c:21618)
File "pandas\tslib.pyx", line 1026, in pandas.tslib._Timestamp.__sub__ (pandas\tslib.c:20036)
TypeError: Timestamp subtraction must have the same timezones or no timezones
|
TypeError
|
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(ax).__name__
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(
first, last, self.freq, closed=self.closed, base=self.base
)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(
freq=self.freq, start=first, end=last, tz=tz, name=ax.name
)
# a little hack
trimmed = False
if len(binner) > 2 and binner[-2] == last and self.closed == "right":
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, tslib.NaT)
labels = labels.insert(0, tslib.NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
|
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(ax).__name__
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(
first, last, self.freq, closed=self.closed, base=self.base
)
tz = ax.tz
binner = labels = DatetimeIndex(
freq=self.freq,
start=first.replace(tzinfo=None),
end=last.replace(tzinfo=None),
tz=tz,
name=ax.name,
)
# a little hack
trimmed = False
if len(binner) > 2 and binner[-2] == last and self.closed == "right":
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, tslib.NaT)
labels = labels.insert(0, tslib.NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
|
https://github.com/pandas-dev/pandas/issues/12037
|
4.035752
4035751999
ValueError Traceback (most recent call last)
<ipython-input-14-92e377227823> in <module>()
5 period_nanos=int(period_seconds*(10**9))
6 print period_nanos
----> 7 res= dfi.value.resample(pd.tseries.offsets.Nano(period_nanos), how=[np.min, np.max,'mean'])
8
9 nullrows=pd.isnull(res).any(1).nonzero()[0]
C:\Users\USER1\Anaconda2\lib\site-packages\pandas\core\generic.pyc in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
3641 fill_method=fill_method, convention=convention,
3642 limit=limit, base=base)
-> 3643 return sampler.resample(self).__finalize__(self)
3644
3645 def first(self, offset):
C:\Users\USER1\Anaconda2\lib\site-packages\pandas\tseries\resample.pyc in resample(self, obj)
80
81 if isinstance(ax, DatetimeIndex):
---> 82 rs = self._resample_timestamps()
83 elif isinstance(ax, PeriodIndex):
84 offset = to_offset(self.freq)
C:\Users\USER1\Anaconda2\lib\site-packages\pandas\tseries\resample.pyc in _resample_timestamps(self, kind)
274 axlabels = self.ax
275
--> 276 self._get_binner_for_resample(kind=kind)
277 grouper = self.grouper
278 binner = self.binner
C:\Users\USER1\Anaconda2\lib\site-packages\pandas\tseries\resample.pyc in _get_binner_for_resample(self, kind)
118 kind = self.kind
119 if kind is None or kind == 'timestamp':
--> 120 self.binner, bins, binlabels = self._get_time_bins(ax)
121 elif kind == 'timedelta':
122 self.binner, bins, binlabels = self._get_time_delta_bins(ax)
C:\Users\USER1\Anaconda2\lib\site-packages\pandas\tseries\resample.pyc in _get_time_bins(self, ax)
179
180 # general version, knowing nothing about relative frequencies
--> 181 bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
182
183 if self.closed == 'right':
pandas\lib.pyx in pandas.lib.generate_bins_dt64 (pandas\lib.c:20875)()
ValueError: Values falls after last bin
|
ValueError
|
def _wrap_result(self, result, use_codes=True, name=None):
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
# leave as it is to keep extract and get_dummies results
# can be merged to _wrap_result_expand in v0.17
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
if not hasattr(result, "ndim"):
return result
if result.ndim == 1:
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
name = name or getattr(result, "name", None) or self._orig.name
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
return Index(result, name=name)
return Series(result, index=self._orig.index, name=name)
else:
assert result.ndim < 3
return DataFrame(result, index=self._orig.index)
|
def _wrap_result(self, result, use_codes=True, name=None):
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
# leave as it is to keep extract and get_dummies results
# can be merged to _wrap_result_expand in v0.17
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
if not hasattr(result, "ndim"):
return result
name = name or getattr(result, "name", None) or self._orig.name
if result.ndim == 1:
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
return Index(result, name=name)
return Series(result, index=self._orig.index, name=name)
else:
assert result.ndim < 3
return DataFrame(result, index=self._orig.index)
|
https://github.com/pandas-dev/pandas/issues/12180
|
s = pd.Series(['name', 'email|Name|address', 'address|email'])
s.str.get_dummies(sep='|')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-1a39a6dcd56b> in <module>()
----> 1 s.str.get_dummies(sep='|')
/Users/dgrady/anaconda/envs/python3/lib/python3.5/site-packages/pandas/core/strings.py in get_dummies(self, sep)
1377 data = self._orig.astype(str) if self._is_categorical else self._data
1378 result = str_get_dummies(data, sep)
-> 1379 return self._wrap_result(result, use_codes=(not self._is_categorical))
1380
1381 @copy(str_translate)
/Users/dgrady/anaconda/envs/python3/lib/python3.5/site-packages/pandas/core/strings.py in _wrap_result(self, result, use_codes, name)
1100 if not hasattr(result, 'ndim'):
1101 return result
-> 1102 name = name or getattr(result, 'name', None) or self._orig.name
1103
1104 if result.ndim == 1:
/Users/dgrady/anaconda/envs/python3/lib/python3.5/site-packages/pandas/core/generic.py in __nonzero__(self)
729 raise ValueError("The truth value of a {0} is ambiguous. "
730 "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
--> 731 .format(self.__class__.__name__))
732
733 __bool__ = __nonzero__
ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def setup(self):
self.rng = date_range(start="1/1/2000", periods=20000, freq="H")
self.strings = [x.strftime("%Y-%m-%d %H:%M:%S") for x in self.rng]
self.strings_nosep = [x.strftime("%Y%m%d %H:%M:%S") for x in self.rng]
self.strings_tz_space = [
x.strftime("%Y-%m-%d %H:%M:%S") + " -0800" for x in self.rng
]
|
def setup(self):
self.N = 100000
self.rng = date_range(start="1/1/2000", periods=self.N, freq="T")
if hasattr(Series, "convert"):
Series.resample = Series.convert
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng = date_range(start="1/1/2000", periods=20000, freq="H")
self.strings = [x.strftime("%Y-%m-%d %H:%M:%S") for x in self.rng]
|
https://github.com/pandas-dev/pandas/issues/11871
|
import pandas
pandas.__version__
u'0.17.1'
pandas.to_datetime('2005-1-13', format='%Y-%m-%d')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/util/decorators.py", line 89, in wrapper
return func(*args, **kwargs)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 276, in to_datetime
unit=unit, infer_datetime_format=infer_datetime_format)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 397, in _to_datetime
return _convert_listlike(np.array([ arg ]), box, format)[0]
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 383, in _convert_listlike
raise e
ValueError: time data '2005-1-13' does match format specified
|
ValueError
|
def _to_datetime(
arg,
errors="raise",
dayfirst=False,
yearfirst=False,
utc=None,
box=True,
format=None,
exact=True,
unit="ns",
freq=None,
infer_datetime_format=False,
):
"""
Same as to_datetime, but accept freq for
DatetimeIndex internal construction
"""
from pandas.core.series import Series
from pandas.tseries.index import DatetimeIndex
def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz="utc" if utc else None, name=name)
except ValueError:
pass
return arg
elif com.is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz="utc" if utc else None)
if utc:
arg = arg.tz_convert(None)
return arg
elif format is None and com.is_integer_dtype(arg) and unit == "ns":
result = arg.astype("datetime64[ns]")
if box:
return DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == "%Y%m%d":
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError(
"cannot convert the input to '%Y%m%d' date format"
)
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, errors=errors
)
except tslib.OutOfBoundsDatetime:
if errors == "raise":
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
freq=freq,
unit=unit,
require_iso8601=require_iso8601,
)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if arg is None:
return arg
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box, format, name=arg.name)
elif com.is_list_like(arg):
return _convert_listlike(arg, box, format)
return _convert_listlike(np.array([arg]), box, format)[0]
|
def _to_datetime(
arg,
errors="raise",
dayfirst=False,
yearfirst=False,
utc=None,
box=True,
format=None,
exact=True,
unit="ns",
freq=None,
infer_datetime_format=False,
):
"""
Same as to_datetime, but accept freq for
DatetimeIndex internal construction
"""
from pandas.core.series import Series
from pandas.tseries.index import DatetimeIndex
def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz="utc" if utc else None, name=name)
except ValueError:
pass
return arg
elif com.is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz="utc" if utc else None)
if utc:
arg = arg.tz_convert(None)
return arg
elif format is None and com.is_integer_dtype(arg) and unit == "ns":
result = arg.astype("datetime64[ns]")
if box:
return DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = (
"%Y-%m-%dT%H:%M:%S.%f".startswith(format)
or "%Y-%m-%d %H:%M:%S.%f".startswith(format)
) and format != "%Y"
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == "%Y%m%d":
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError(
"cannot convert the input to '%Y%m%d' date format"
)
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, errors=errors
)
except tslib.OutOfBoundsDatetime:
if errors == "raise":
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
freq=freq,
unit=unit,
require_iso8601=require_iso8601,
)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if arg is None:
return arg
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box, format, name=arg.name)
elif com.is_list_like(arg):
return _convert_listlike(arg, box, format)
return _convert_listlike(np.array([arg]), box, format)[0]
|
https://github.com/pandas-dev/pandas/issues/11871
|
import pandas
pandas.__version__
u'0.17.1'
pandas.to_datetime('2005-1-13', format='%Y-%m-%d')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/util/decorators.py", line 89, in wrapper
return func(*args, **kwargs)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 276, in to_datetime
unit=unit, infer_datetime_format=infer_datetime_format)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 397, in _to_datetime
return _convert_listlike(np.array([ arg ]), box, format)[0]
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 383, in _convert_listlike
raise e
ValueError: time data '2005-1-13' does match format specified
|
ValueError
|
def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz="utc" if utc else None, name=name)
except ValueError:
pass
return arg
elif com.is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz="utc" if utc else None)
if utc:
arg = arg.tz_convert(None)
return arg
elif format is None and com.is_integer_dtype(arg) and unit == "ns":
result = arg.astype("datetime64[ns]")
if box:
return DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == "%Y%m%d":
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, errors=errors
)
except tslib.OutOfBoundsDatetime:
if errors == "raise":
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
freq=freq,
unit=unit,
require_iso8601=require_iso8601,
)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
|
def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz="utc" if utc else None, name=name)
except ValueError:
pass
return arg
elif com.is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz="utc" if utc else None)
if utc:
arg = arg.tz_convert(None)
return arg
elif format is None and com.is_integer_dtype(arg) and unit == "ns":
result = arg.astype("datetime64[ns]")
if box:
return DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = (
"%Y-%m-%dT%H:%M:%S.%f".startswith(format)
or "%Y-%m-%d %H:%M:%S.%f".startswith(format)
) and format != "%Y"
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == "%Y%m%d":
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, errors=errors
)
except tslib.OutOfBoundsDatetime:
if errors == "raise":
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
freq=freq,
unit=unit,
require_iso8601=require_iso8601,
)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz="utc" if utc else None, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
|
https://github.com/pandas-dev/pandas/issues/11871
|
import pandas
pandas.__version__
u'0.17.1'
pandas.to_datetime('2005-1-13', format='%Y-%m-%d')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/util/decorators.py", line 89, in wrapper
return func(*args, **kwargs)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 276, in to_datetime
unit=unit, infer_datetime_format=infer_datetime_format)
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 397, in _to_datetime
return _convert_listlike(np.array([ arg ]), box, format)[0]
File "/Users/dpinte/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tseries/tools.py", line 383, in _convert_listlike
raise e
ValueError: time data '2005-1-13' does match format specified
|
ValueError
|
def replace(
self,
to_replace,
value,
inplace=False,
filter=None,
regex=False,
convert=True,
mgr=None,
):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
original_to_replace = to_replace
mask = isnull(self.values)
# try to replace, if we raise an error, convert to ObjectBlock and retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values, to_replace)
mask = com.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [
b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks
]
return blocks
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
return self.to_object_block(mgr=mgr).replace(
to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
|
def replace(
self,
to_replace,
value,
inplace=False,
filter=None,
regex=False,
convert=True,
mgr=None,
):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
original_to_replace = to_replace
# try to replace, if we raise an error, convert to ObjectBlock and retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values, to_replace)
mask = com.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [
b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks
]
return blocks
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
return self.to_object_block(mgr=mgr).replace(
to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
|
https://github.com/pandas-dev/pandas/issues/11698
|
pandas.DataFrame([('-', pandas.to_datetime('20150101')), ('a', pandas.to_datetime('20150102')), ('b', pandas.to_datetime('20150103'))], columns=['a', 'b']).replace('-', numpy.nan)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-11-bce7dc78fc44> in <module>()
----> 1 pandas.DataFrame([('-', pandas.to_datetime('20150101')), ('a', pandas.to_datetime('20150102')), ('b', pandas.to_datetime('20150103'))], columns=['a', 'b']).replace('-', numpy.nan)
/.../pandas/core/generic.pyc in replace(self, to_replace, value, inplace, limit, regex, method, axis)
3108 elif not com.is_list_like(value): # NA -> 0
3109 new_data = self._data.replace(to_replace=to_replace, value=value,
-> 3110 inplace=inplace, regex=regex)
3111 else:
3112 msg = ('Invalid "to_replace" type: '
/.../pandas/core/internals.pyc in replace(self, **kwargs)
2868
2869 def replace(self, **kwargs):
-> 2870 return self.apply('replace', **kwargs)
2871
2872 def replace_list(self, src_list, dest_list, inplace=False, regex=False, mgr=None):
/.../pandas/core/internals.pyc in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
2821
2822 kwargs['mgr'] = self
-> 2823 applied = getattr(b, f)(**kwargs)
2824 result_blocks = _extend_blocks(applied, result_blocks)
2825
/.../pandas/core/internals.pyc in replace(self, to_replace, value, inplace, filter, regex, convert, mgr)
605
606 # we can't process the value, but nothing to do
--> 607 if not mask.any():
608 return self if inplace else self.copy()
609
UnboundLocalError: local variable 'mask' referenced before assignment
|
UnboundLocalError
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
key = _values_from_object(key)
return self._engine.get_loc(key)
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("get_loc requires scalar valued input")
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
return self._engine.get_loc(_values_from_object(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("get_loc requires scalar valued input")
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
|
https://github.com/pandas-dev/pandas/issues/11652
|
series length = 5
series length = 999999
series length = 1000000
Traceback (most recent call last):
File "<stdin>", line 9, in <module>
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 114, in __setitem__
indexer = self._get_setitem_indexer(key)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 109, in _get_setitem_indexer
return self._convert_to_indexer(key, is_setter=True)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 1042, in _convert_to_indexer
return labels.get_loc(obj)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/index.py", line 1692, in get_loc
return self._engine.get_loc(_values_from_object(key))
File "pandas/index.pyx", line 137, in pandas.index.IndexEngine.get_loc (pandas/index.c:3979)
File "pandas/index.pyx", line 145, in pandas.index.IndexEngine.get_loc (pandas/index.c:3680)
File "pandas/index.pyx", line 464, in pandas.index._bin_search (pandas/index.c:9124)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple) and not self.ndim < len(key):
return self._convert_tuple(key, is_setter=True)
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError:
raise IndexingError(key)
|
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple) and not self.ndim < len(key):
return self._convert_tuple(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError:
raise IndexingError(key)
|
https://github.com/pandas-dev/pandas/issues/11652
|
series length = 5
series length = 999999
series length = 1000000
Traceback (most recent call last):
File "<stdin>", line 9, in <module>
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 114, in __setitem__
indexer = self._get_setitem_indexer(key)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 109, in _get_setitem_indexer
return self._convert_to_indexer(key, is_setter=True)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/indexing.py", line 1042, in _convert_to_indexer
return labels.get_loc(obj)
File "/home/nekobon/.env_exp/lib/python3.4/site-packages/pandas/core/index.py", line 1692, in get_loc
return self._engine.get_loc(_values_from_object(key))
File "pandas/index.pyx", line 137, in pandas.index.IndexEngine.get_loc (pandas/index.c:3979)
File "pandas/index.pyx", line 145, in pandas.index.IndexEngine.get_loc (pandas/index.c:3680)
File "pandas/index.pyx", line 464, in pandas.index._bin_search (pandas/index.c:9124)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
|
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
|
https://github.com/pandas-dev/pandas/issues/11556
|
fig, ax = plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
pd.Series(np.random.rand(100)).plot(ax=ax)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-80531efe48c4> in <module>()
8 fig, ax = plt.subplots()
9 inset = fig.add_axes([0.2, 0.2, 0.2, 0.2])
---> 10 data.plot(ax=ax)
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in __call__(self, kind, ax, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, label, secondary_y, **kwds)
3491 colormap=colormap, table=table, yerr=yerr,
3492 xerr=xerr, label=label, secondary_y=secondary_y,
-> 3493 **kwds)
3494 __call__.__doc__ = plot_series.__doc__
3495
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in plot_series(data, kind, ax, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, label, secondary_y, **kwds)
2581 yerr=yerr, xerr=xerr,
2582 label=label, secondary_y=secondary_y,
-> 2583 **kwds)
2584
2585
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _plot(data, x, y, subplots, ax, kind, **kwds)
2378 plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
2379
-> 2380 plot_obj.generate()
2381 plot_obj.draw()
2382 return plot_obj.result
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in generate(self)
990 self._post_plot_logic_common(ax, self.data)
991 self._post_plot_logic(ax, self.data)
--> 992 self._adorn_subplots()
993
994 def _args_adjust(self):
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _adorn_subplots(self)
1141 naxes=nrows * ncols, nrows=nrows,
1142 ncols=ncols, sharex=self.sharex,
-> 1143 sharey=self.sharey)
1144
1145 for ax in self.axes:
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
3378 layout = np.zeros((nrows+1,ncols+1), dtype=np.bool)
3379 for ax in axarr:
-> 3380 layout[ax.rowNum, ax.colNum] = ax.get_visible()
3381
3382 for ax in axarr:
AttributeError: 'Axes' object has no attribute 'rowNum'
|
AttributeError
|
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
|
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
|
https://github.com/pandas-dev/pandas/issues/11556
|
fig, ax = plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
pd.Series(np.random.rand(100)).plot(ax=ax)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-80531efe48c4> in <module>()
8 fig, ax = plt.subplots()
9 inset = fig.add_axes([0.2, 0.2, 0.2, 0.2])
---> 10 data.plot(ax=ax)
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in __call__(self, kind, ax, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, label, secondary_y, **kwds)
3491 colormap=colormap, table=table, yerr=yerr,
3492 xerr=xerr, label=label, secondary_y=secondary_y,
-> 3493 **kwds)
3494 __call__.__doc__ = plot_series.__doc__
3495
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in plot_series(data, kind, ax, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, label, secondary_y, **kwds)
2581 yerr=yerr, xerr=xerr,
2582 label=label, secondary_y=secondary_y,
-> 2583 **kwds)
2584
2585
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _plot(data, x, y, subplots, ax, kind, **kwds)
2378 plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
2379
-> 2380 plot_obj.generate()
2381 plot_obj.draw()
2382 return plot_obj.result
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in generate(self)
990 self._post_plot_logic_common(ax, self.data)
991 self._post_plot_logic(ax, self.data)
--> 992 self._adorn_subplots()
993
994 def _args_adjust(self):
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _adorn_subplots(self)
1141 naxes=nrows * ncols, nrows=nrows,
1142 ncols=ncols, sharex=self.sharex,
-> 1143 sharey=self.sharey)
1144
1145 for ax in self.axes:
/Users/jakevdp/anaconda/envs/python3.4/lib/python3.4/site-packages/pandas/tools/plotting.py in _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
3378 layout = np.zeros((nrows+1,ncols+1), dtype=np.bool)
3379 for ax in axarr:
-> 3380 layout[ax.rowNum, ax.colNum] = ax.get_visible()
3381
3382 for ax in axarr:
AttributeError: 'Axes' object has no attribute 'rowNum'
|
AttributeError
|
def _join_monotonic(self, other, how="left", return_indexers=False):
if self.equals(other):
ret_index = other if how == "right" else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == "right":
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == "left":
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == "right":
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
|
def _join_monotonic(self, other, how="left", return_indexers=False):
if self.equals(other):
ret_index = other if how == "right" else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == "right":
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == "left":
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == "right":
join_index, ridx, lidx = self._left_indexer(other, self)
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
|
https://github.com/pandas-dev/pandas/issues/11519
|
import pandas as pd
import numpy as np
df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
s = pd.Series(np.repeat(np.arange(8),2), index=np.repeat(np.arange(8),2), name='TEST')
In []: s.head()
Out[]:
0 0
0 0
1 1
1 1
2 2
dtype: int32
# The following all work as expected
df.join(s, how='inner')
df.join(s, how='outer')
df.join(s, how='left')
# Right Joins Type Error
df.join(s, how='right')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-80-26e8bf54fd8f> in <module>()
----> 1 df.join(s, how='right')
D:\Python27\lib\site-packages\pandas\core\frame.pyc in join(self, other, on, how, lsuffix, rsuffix, sort)
4218 # For SparseDataFrame's benefit
4219 return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
-> 4220 rsuffix=rsuffix, sort=sort)
4221
4222 def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
D:\Python27\lib\site-packages\pandas\core\frame.pyc in _join_compat(self, other, on, how, lsuffix, rsuffix, sort)
4232 return merge(self, other, left_on=on, how=how,
4233 left_index=on is None, right_index=True,
-> 4234 suffixes=(lsuffix, rsuffix), sort=sort)
4235 else:
4236 if on is not None:
D:\Python27\lib\site-packages\pandas\tools\merge.pyc in merge(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator)
33 right_index=right_index, sort=sort, suffixes=suffixes,
34 copy=copy, indicator=indicator)
---> 35 return op.get_result()
36 if __debug__:
37 merge.__doc__ = _merge_doc % '\nleft : DataFrame'
D:\Python27\lib\site-packages\pandas\tools\merge.pyc in get_result(self)
194 self.left, self.right = self._indicator_pre_merge(self.left, self.right)
195
--> 196 join_index, left_indexer, right_indexer = self._get_join_info()
197
198 ldata, rdata = self.left._data, self.right._data
D:\Python27\lib\site-packages\pandas\tools\merge.pyc in _get_join_info(self)
309 if self.left_index and self.right_index:
310 join_index, left_indexer, right_indexer = \
--> 311 left_ax.join(right_ax, how=self.how, return_indexers=True)
312 elif self.right_index and self.how == 'left':
313 join_index, left_indexer, right_indexer = \
D:\Python27\lib\site-packages\pandas\core\index.pyc in join(self, other, how, level, return_indexers)
2212 if self.is_monotonic and other.is_monotonic:
2213 return self._join_monotonic(other, how=how,
-> 2214 return_indexers=return_indexers)
2215 else:
2216 return self._join_non_unique(other, how=how,
D:\Python27\lib\site-packages\pandas\core\index.pyc in _join_monotonic(self, other, how, return_indexers)
2463 join_index, lidx, ridx = self._left_indexer(sv, ov)
2464 elif how == 'right':
-> 2465 join_index, ridx, lidx = self._left_indexer(other, self)
2466 elif how == 'inner':
2467 join_index, lidx, ridx = self._inner_indexer(sv, ov)
TypeError: Argument 'left' has incorrect type (expected numpy.ndarray, got Int64Index)
|
TypeError
|
def _add_margins(table, data, values, rows, cols, aggfunc):
grand_margin = _compute_grand_margin(data, values, aggfunc)
# categorical index or columns will fail below when 'All' is added
# here we'll convert all categorical indices to object
def convert_categorical(ind):
_convert = lambda ind: (
ind.astype("object") if ind.dtype.name == "category" else ind
)
if isinstance(ind, MultiIndex):
return ind.set_levels([_convert(lev) for lev in ind.levels])
else:
return _convert(ind)
table.index = convert_categorical(table.index)
if hasattr(table, "columns"):
table.columns = convert_categorical(table.columns)
if not values and isinstance(table, Series):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
row_key = ("All",) + ("",) * (len(rows) - 1) if len(rows) > 1 else "All"
return table.append(Series({row_key: grand_margin["All"]}))
if values:
marginal_result_set = _generate_marginal_results(
table, data, values, rows, cols, aggfunc, grand_margin
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
key = ("All",) + ("",) * (len(rows) - 1) if len(rows) > 1 else "All"
row_margin = row_margin.reindex(result.columns)
# populate grand margin
for k in margin_keys:
if isinstance(k, compat.string_types):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
result = result.append(margin_dummy)
result.index.names = row_names
return result
|
def _add_margins(table, data, values, rows, cols, aggfunc):
grand_margin = _compute_grand_margin(data, values, aggfunc)
if not values and isinstance(table, Series):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
row_key = ("All",) + ("",) * (len(rows) - 1) if len(rows) > 1 else "All"
return table.append(Series({row_key: grand_margin["All"]}))
if values:
marginal_result_set = _generate_marginal_results(
table, data, values, rows, cols, aggfunc, grand_margin
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
key = ("All",) + ("",) * (len(rows) - 1) if len(rows) > 1 else "All"
row_margin = row_margin.reindex(result.columns)
# populate grand margin
for k in margin_keys:
if isinstance(k, compat.string_types):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
result = result.append(margin_dummy)
result.index.names = row_names
return result
|
https://github.com/pandas-dev/pandas/issues/10989
|
In [27]: data.y = data.y.astype('category')
In [28]: data.z = data.z.astype('category')
In [29]: data.pivot_table('x', 'y', 'z')
Out[29]:
z 0 1 2
y
0 24.0 25.0 24.5
1 73.5 74.5 74.0
In [32]: data.pivot_table('x', 'y', 'z', margins=True)
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/internals.py in set(self, item, value, check)
2979 try:
-> 2980 loc = self.items.get_loc(item)
2981 except KeyError:
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/index.py in get_loc(self, key, method)
5072 key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
-> 5073 return self._engine.get_loc(key)
5074
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3824)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3704)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12280)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12231)()
KeyError: ('x', 'All')
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-32-7436e0e1c9bb> in <module>()
----> 1 data.pivot_table('x', 'y', 'z', margins=True)
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/tools/pivot.py in pivot_table(data, values, index, columns, aggfunc, fill_value, margins, dropna)
141 if margins:
142 table = _add_margins(table, data, values, rows=index,
--> 143 cols=columns, aggfunc=aggfunc)
144
145 # discard the top level
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/tools/pivot.py in _add_margins(table, data, values, rows, cols, aggfunc)
167
168 if values:
--> 169 marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin)
170 if not isinstance(marginal_result_set, tuple):
171 return marginal_result_set
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/tools/pivot.py in _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin)
236 # we are going to mutate this, so need to copy!
237 piece = piece.copy()
--> 238 piece[all_key] = margin[key]
239
240 table_pieces.append(piece)
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/frame.py in __setitem__(self, key, value)
2125 else:
2126 # set column
-> 2127 self._set_item(key, value)
2128
2129 def _setitem_slice(self, key, value):
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/frame.py in _set_item(self, key, value)
2203 self._ensure_valid_index(value)
2204 value = self._sanitize_column(key, value)
-> 2205 NDFrame._set_item(self, key, value)
2206
2207 # check if we are modifying a copy
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/generic.py in _set_item(self, key, value)
1194
1195 def _set_item(self, key, value):
-> 1196 self._data.set(key, value)
1197 self._clear_item_cache()
1198
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/internals.py in set(self, item, value, check)
2981 except KeyError:
2982 # This item wasn't present, just insert at end
-> 2983 self.insert(len(self.items), item, value)
2984 return
2985
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/internals.py in insert(self, loc, item, value, allow_duplicates)
3100 self._blknos = np.insert(self._blknos, loc, len(self.blocks))
3101
-> 3102 self.axes[0] = self.items.insert(loc, item)
3103
3104 self.blocks += (block,)
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/index.py in insert(self, loc, item)
5583 # other labels
5584 lev_loc = len(level)
-> 5585 level = level.insert(lev_loc, k)
5586 else:
5587 lev_loc = level.get_loc(k)
/Users/jakevdp/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/index.py in insert(self, loc, item)
3217 code = self.categories.get_indexer([item])
3218 if (code == -1):
-> 3219 raise TypeError("cannot insert an item into a CategoricalIndex that is not already an existing category")
3220
3221 codes = self.codes
TypeError: cannot insert an item into a CategoricalIndex that is not already an existing category
|
KeyError
|
def replace(self, to_replace, value, inplace=False, filter=None, regex=False):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
values, to_replace = self._try_coerce_args(self.values, to_replace)
mask = com.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
|
def replace(self, to_replace, value, inplace=False, filter=None, regex=False):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
|
https://github.com/pandas-dev/pandas/issues/11326
|
df = extended_df
Traceback (most recent call last):
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3066, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-3f216cf6821e>", line 1, in <module>
df = extended_df
NameError: name 'extended_df' is not defined
df = expanded
df
Out[16]:
pricing_date exchange_id close
pricing_date_ts
1445025600 2015-10-13 20:00:00+00:00 NaN NaN
1444939200 2015-10-13 20:00:00+00:00 NaN NaN
1444852800 2015-10-13 20:00:00+00:00 0 NaN
1444766400 2015-10-13 20:00:00+00:00 6 0.545
1444680000 2015-10-12 20:00:00+00:00 6 0.570
1444420800 2015-10-09 20:00:00+00:00 6 0.580
1444334400 2015-10-08 20:00:00+00:00 6 0.560
1444248000 2015-10-07 20:00:00+00:00 6 0.580
1444161600 2015-10-06 20:00:00+00:00 6 0.620
1444075200 2015-10-05 20:00:00+00:00 6 0.480
1443816000 2015-10-13 20:00:00+00:00 NaN NaN
df.dtypes
Out[17]:
pricing_date datetime64[ns, UTC]
exchange_id float64
close float64
dtype: object
df.replace(0, np.NaN)
Traceback (most recent call last):
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3066, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-18-cc26b9ec3ff7>", line 1, in <module>
df.replace(0, np.NaN)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/generic.py", line 2996, in replace
inplace=inplace, regex=regex)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 2761, in replace
return self.apply('replace', **kwargs)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 2710, in apply
applied = getattr(b, f)(**kwargs)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 560, in replace
mask = com.mask_missing(self.values, to_replace)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/common.py", line 449, in mask_missing
mask = arr == x
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/tseries/index.py", line 84, in wrapper
other = _ensure_datetime64(other)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/tseries/index.py", line 111, in _ensure_datetime64
raise TypeError('%s type object %s' % (type(other), str(other)))
TypeError: <type 'int'> type object 0
|
NameError
|
def _try_coerce_args(self, values, other):
"""localize and return i8 for the values"""
values = values.tz_localize(None).asi8
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
elif isinstance(other, (np.datetime64, datetime)):
other = lib.Timestamp(other)
if not getattr(other, "tz", None):
raise ValueError("incompatible or non tz-aware value")
other = other.value
return values, other
|
def _try_coerce_args(self, values, other):
"""localize and return i8 for the values"""
values = values.tz_localize(None).asi8
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
else:
other = lib.Timestamp(other)
if not getattr(other, "tz", None):
raise ValueError("incompatible or non tz-aware value")
other = other.value
return values, other
|
https://github.com/pandas-dev/pandas/issues/11326
|
df = extended_df
Traceback (most recent call last):
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3066, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-3f216cf6821e>", line 1, in <module>
df = extended_df
NameError: name 'extended_df' is not defined
df = expanded
df
Out[16]:
pricing_date exchange_id close
pricing_date_ts
1445025600 2015-10-13 20:00:00+00:00 NaN NaN
1444939200 2015-10-13 20:00:00+00:00 NaN NaN
1444852800 2015-10-13 20:00:00+00:00 0 NaN
1444766400 2015-10-13 20:00:00+00:00 6 0.545
1444680000 2015-10-12 20:00:00+00:00 6 0.570
1444420800 2015-10-09 20:00:00+00:00 6 0.580
1444334400 2015-10-08 20:00:00+00:00 6 0.560
1444248000 2015-10-07 20:00:00+00:00 6 0.580
1444161600 2015-10-06 20:00:00+00:00 6 0.620
1444075200 2015-10-05 20:00:00+00:00 6 0.480
1443816000 2015-10-13 20:00:00+00:00 NaN NaN
df.dtypes
Out[17]:
pricing_date datetime64[ns, UTC]
exchange_id float64
close float64
dtype: object
df.replace(0, np.NaN)
Traceback (most recent call last):
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3066, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-18-cc26b9ec3ff7>", line 1, in <module>
df.replace(0, np.NaN)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/generic.py", line 2996, in replace
inplace=inplace, regex=regex)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 2761, in replace
return self.apply('replace', **kwargs)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 2710, in apply
applied = getattr(b, f)(**kwargs)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/internals.py", line 560, in replace
mask = com.mask_missing(self.values, to_replace)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/core/common.py", line 449, in mask_missing
mask = arr == x
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/tseries/index.py", line 84, in wrapper
other = _ensure_datetime64(other)
File "/Users/josh/anaconda/envs/Openfolio/lib/python2.7/site-packages/pandas/tseries/index.py", line 111, in _ensure_datetime64
raise TypeError('%s type object %s' % (type(other), str(other)))
TypeError: <type 'int'> type object 0
|
NameError
|
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup)
return Series(out, index=self.result_index, dtype="int64")
|
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
out = np.bincount(ids[ids != -1], minlength=ngroup)
return Series(out, index=self.result_index)
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = com._ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
comp_ids.astype("int64", copy=False),
obs_group_ids.astype("int64", copy=False),
ngroups,
)
|
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups, dtype="int64")
rep = np.diff(np.r_[0, self.bins])
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups, dtype="int64"), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups, dtype="int64")], rep)
return comp_ids, obs_group_ids, ngroups
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def nunique(self, dropna=True):
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
return Series(
out if ids[0] != -1 else out[1:],
index=self.grouper.result_index,
name=self.name,
)
|
def nunique(self, dropna=True):
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundries are where group ids change
# unique observations are where sorted values change
idx = com._ensure_int64(np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]])
inc = com._ensure_int64(np.r_[1, val[1:] != val[:-1]])
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx)
return Series(
out if ids[0] != -1 else out[1:],
index=self.grouper.result_index,
name=self.name,
)
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype("float")
acc = rep(np.diff(np.r_[idx, len(ids)]))
out /= acc[mask] if dropna else acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, labels=labels, names=names, verify_integrity=False
)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names, verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
|
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype("float")
acc = rep(np.diff(np.r_[idx, len(ids)]))
out /= acc[mask] if dropna else acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, labels=labels, names=names, verify_integrity=False
)
return Series(out, index=mi)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names, verify_integrity=False)
return Series(out, index=mi)
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def count(self):
"""Compute count of group, excluding missing values"""
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups) if ngroups != 0 else []
return Series(out, index=self.grouper.result_index, name=self.name, dtype="int64")
|
def count(self):
"""Compute count of group, excluding missing values"""
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
out = np.bincount(ids[mask], minlength=ngroups) if ngroups != 0 else []
return Series(out, index=self.grouper.result_index, name=self.name)
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
from pandas.core.index import _get_na_value
if level is None:
return notnull(_values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
out = np.bincount(lab[notnull(self.values)], minlength=len(lev))
return self._constructor(out, index=lev, dtype="int64").__finalize__(self)
|
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
from pandas.core.index import _get_na_value
if level is None:
return notnull(_values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
out = np.bincount(lab[notnull(self.values)], minlength=len(lev))
return self._constructor(out, index=lev).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/11189
|
In [1]: df = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']})
In [2]: df.groupby("grade").size()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-d8e387418f9d> in <module>()
----> 1 df.groupby("grade").size()
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
818
819 """
--> 820 return self.grouper.size()
821
822 sum = _groupby_function('sum', 'add', np.sum)
/home/joris/scipy/pandas/pandas/core/groupby.pyc in size(self)
1380 """
1381 ids, _, ngroup = self.group_info
-> 1382 out = np.bincount(ids[ids != -1], minlength=ngroup)
1383 return Series(out, index=self.result_index)
1384
TypeError: Cannot cast array data from dtype('int64') to dtype('int32') according to the rule 'safe'
In [4]: pd.__version__
Out[4]: '0.17.0rc1+108.g3fb802a'
|
TypeError
|
def convert_objects(
self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True
):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
# Deprecation code to handle usage change
issue_warning = False
if datetime == "coerce":
datetime = coerce = True
numeric = timedelta = False
issue_warning = True
elif numeric == "coerce":
numeric = coerce = True
datetime = timedelta = False
issue_warning = True
elif timedelta == "coerce":
timedelta = coerce = True
datetime = numeric = False
issue_warning = True
if issue_warning:
warnings.warn(
"The use of 'coerce' as an input is deprecated. Instead set coerce=True.",
FutureWarning,
)
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
|
def convert_objects(
self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True
):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/10601
|
In [1]: from datetime import datetime
In [2]: s = pd.Series([datetime(2001,1,1,0,0), 'foo', 1.0, 1,
...: pd.Timestamp('20010104'), '20010105'], dtype='O')
In [5]: s.convert_objects(convert_dates='coerce')
c:\users\vdbosscj\scipy\pandas-joris\pandas\util\decorators.py:81: FutureWarning
: the 'convert_dates' keyword is deprecated, use 'datetime' instead
warnings.warn(msg, FutureWarning)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-b962b638ac86> in <module>()
----> 1 s.convert_objects(convert_dates='coerce')
c:\users\vdbosscj\scipy\pandas-joris\pandas\util\decorators.pyc in wrapper(*args
, **kwargs)
86 else:
87 kwargs[new_arg_name] = new_arg_value
---> 88 return func(*args, **kwargs)
89 return wrapper
90 return _deprecate_kwarg
c:\users\vdbosscj\scipy\pandas-joris\pandas\util\decorators.pyc in wrapper(*args
, **kwargs)
86 else:
87 kwargs[new_arg_name] = new_arg_value
---> 88 return func(*args, **kwargs)
89 return wrapper
90 return _deprecate_kwarg
c:\users\vdbosscj\scipy\pandas-joris\pandas\util\decorators.pyc in wrapper(*args
, **kwargs)
86 else:
87 kwargs[new_arg_name] = new_arg_value
---> 88 return func(*args, **kwargs)
89 return wrapper
90 return _deprecate_kwarg
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\generic.py in convert_objects(s
elf, datetime, numeric, timedelta, coerce, copy)
2468 timedelta=timedelta,
2469 coerce=coerce,
-> 2470 copy=copy)).__finalize__(self)
2471
2472 #-------------------------------------------------------------------
---
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\internals.py in convert(self, *
*kwargs)
3459 """ convert the whole block as one """
3460 kwargs['by_item'] = False
-> 3461 return self.apply('convert', **kwargs)
3462
3463 @property
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\internals.py in apply(self, f,
axes, filter, do_integrity_check, **kwargs)
2467 copy=align_copy)
2468
-> 2469 applied = getattr(b, f)(**kwargs)
2470
2471 if isinstance(applied, list):
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\internals.py in convert(self, d
atetime, numeric, timedelta, coerce, copy, by_item)
1493 timedelta=timedelta,
1494 coerce=coerce,
-> 1495 copy=copy
1496 ).reshape(self.values.shape)
1497 blocks.append(make_block(values,
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\common.py in _possibly_convert_
objects(values, datetime, numeric, timedelta, coerce, copy)
1897 """ if we have an object dtype, try to coerce dates and/or numbers "
""
1898
-> 1899 conversion_count = sum((datetime, numeric, timedelta))
1900 if conversion_count == 0:
1901 import warnings
TypeError: unsupported operand type(s) for +: 'int' and 'str'
|
TypeError
|
def _new_DatetimeIndex(cls, d):
"""This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__"""
# data are already in UTC
# so need to localize
tz = d.pop("tz", None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
|
def _new_DatetimeIndex(cls, d):
"""This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__"""
# data are already in UTC
# so need to localize
tz = d.pop("tz", None)
result = cls.__new__(cls, **d)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
|
https://github.com/pandas-dev/pandas/issues/11002
|
import pandas as pd
df4 = pd.DataFrame(index=P.date_range('1750-1-1', '2050-1-1', freq='7D')
pd.to_pickle(df4, '7d.test')
pd.read_pickle('7d.test')
In [84]: P.read_pickle('7d.test')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-84-0108eecfbea7> in <module>()
----> 1 P.read_pickle('7d.test')
C:\Users\LV\Miniconda\lib\site-packages\pandas\io\pickle.pyc in read_pickle(path)
58
59 try:
---> 60 return try_read(path)
61 except:
62 if PY3:
C:\Users\LV\Miniconda\lib\site-packages\pandas\io\pickle.pyc in try_read(path, encoding)
55 except:
56 with open(path, 'rb') as fh:
---> 57 return pc.load(fh, encoding=encoding, compat=True)
58
59 try:
C:\Users\LV\Miniconda\lib\site-packages\pandas\compat\pickle_compat.pyc in load(fh, encoding, compat, is_verbose)
114 up.is_verbose = is_verbose
115
--> 116 return up.load()
117 except:
118 raise
C:\Users\LV\Miniconda\lib\pickle.pyc in load(self)
856 while 1:
857 key = read(1)
--> 858 dispatch[key](self)
859 except _Stop, stopinst:
860 return stopinst.value
C:\Users\LV\Miniconda\lib\site-packages\pandas\compat\pickle_compat.pyc in load_reduce(self)
18
19 try:
---> 20 stack[-1] = func(*args)
21 return
22 except Exception as e:
C:\Users\LV\Miniconda\lib\site-packages\pandas\tseries\index.pyc in _new_DatetimeIndex(cls, d)
113 # data are already in UTC
114 tz = d.pop('tz',None)
--> 115 result = cls.__new__(cls, **d)
116 result.tz = tz
117 return result
C:\Users\LV\Miniconda\lib\site-packages\pandas\util\decorators.pyc in wrapper(*args, **kwargs)
86 else:
87 kwargs[new_arg_name] = new_arg_value
---> 88 return func(*args, **kwargs)
89 return wrapper
90 return _deprecate_kwarg
C:\Users\LV\Miniconda\lib\site-packages\pandas\tseries\index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, closed, ambiguous, **kwargs)
334 if not np.array_equal(subarr.asi8, on_freq.asi8):
335 raise ValueError('Inferred frequency {0} from passed dates does not'
--> 336 'conform to passed frequency {1}'.format(inferred, freq.freqstr))
337
338 if freq_infer:
ValueError: Inferred frequency W-THU from passed dates does notconform to passed frequency 7D
|
ValueError
|
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
from pandas.io.excel import ExcelWriter
if self.columns.nlevels > 1:
raise NotImplementedError(
"Writing as Excel with a MultiIndex is not yet implemented."
)
need_save = False
if encoding == None:
encoding = "ascii"
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(
formatted_cells, sheet_name, startrow=startrow, startcol=startcol
)
if need_save:
excel_writer.save()
|
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
"""
from pandas.io.excel import ExcelWriter
if self.columns.nlevels > 1:
raise NotImplementedError(
"Writing as Excel with a MultiIndex is not yet implemented."
)
need_save = False
if encoding == None:
encoding = "ascii"
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(
formatted_cells, sheet_name, startrow=startrow, startcol=startcol
)
if need_save:
excel_writer.save()
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def _parse_excel(
self,
sheetname=0,
header=0,
skiprows=None,
skip_footer=0,
index_col=None,
has_index_names=None,
parse_cols=None,
parse_dates=False,
date_parser=None,
na_values=None,
thousands=None,
chunksize=None,
convert_float=True,
verbose=False,
**kwds,
):
import xlrd
from xlrd import (
xldate,
XL_CELL_DATE,
XL_CELL_ERROR,
XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if (not epoch1904 and year == (1899, 12, 31)) or (
epoch1904 and year == (1904, 1, 1)
):
cell_contents = datetime.time(
cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond,
)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < datetime.MINYEAR:
cell_contents = datetime.time(*dt[3:])
else:
cell_contents = datetime.datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
# handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(
zip(sheet.row_values(i), sheet.row_types(i))
):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
return DataFrame()
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(
data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
chunksize=chunksize,
**kwds,
)
output[asheetname] = parser.read()
if ret_dict:
return output
else:
return output[asheetname]
|
def _parse_excel(
self,
sheetname=0,
header=0,
skiprows=None,
skip_footer=0,
index_col=None,
has_index_names=None,
parse_cols=None,
parse_dates=False,
date_parser=None,
na_values=None,
thousands=None,
chunksize=None,
convert_float=True,
verbose=False,
**kwds,
):
import xlrd
from xlrd import (
xldate,
XL_CELL_DATE,
XL_CELL_ERROR,
XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if (not epoch1904 and year == (1899, 12, 31)) or (
epoch1904 and year == (1904, 1, 1)
):
cell_contents = datetime.time(
cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond,
)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < datetime.MINYEAR:
cell_contents = datetime.time(*dt[3:])
else:
cell_contents = datetime.datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
# handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(
zip(sheet.row_values(i), sheet.row_types(i))
):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(
data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
chunksize=chunksize,
**kwds,
)
output[asheetname] = parser.read()
if ret_dict:
return output
else:
return output[asheetname]
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
val = int(val)
elif com.is_float(val):
val = float(val)
elif com.is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
elif com.is_list_like(val):
val = str(val)
return val
|
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
val = int(val)
elif com.is_float(val):
val = float(val)
elif com.is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
return val
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None:
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option("io.excel.%s.writer" % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
|
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if cls == ExcelWriter:
if engine is None:
ext = os.path.splitext(path)[-1][1:]
try:
engine = config.get_option("io.excel.%s.writer" % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def __init__(
self, path, engine=None, date_format=None, datetime_format=None, **engine_kwargs
):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = "xls" if engine == "xlwt" else "xlsx"
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = "YYYY-MM-DD"
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self.datetime_format = datetime_format
|
def __init__(
self, path, engine=None, date_format=None, datetime_format=None, **engine_kwargs
):
# validate that this engine can handle the extension
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = "YYYY-MM-DD"
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self.datetime_format = datetime_format
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs["engine"] = engine
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = "ascii"
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
|
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = "ascii"
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, cell.val, style)
|
https://github.com/pandas-dev/pandas/issues/6403
|
Traceback (most recent call last):
File "/Users/myourshaw/lab/pypeline/python2/excel_example.py", line 10, in <module>
xl_file.parse('Sheet1')
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 208, in parse
**kwds)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/pandas/io/excel.py", line 291, in _parse_excel
data[header] = _trim_excel_header(data[header])
IndexError: list index out of range
|
IndexError
|
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
zone = tslib.get_timezone(self.tz)
izone = tslib.get_timezone(getattr(item, "tzinfo", None))
if zone != izone:
raise ValueError("Passed item and index have different timezone")
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate(
(self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
)
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, "UTC", self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert DatetimeIndex with incompatible label")
|
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
zone = tslib.get_timezone(self.tz)
izone = tslib.get_timezone(getattr(item, "tzinfo", None))
if zone != izone:
raise ValueError("Passed item and index have different timezone")
# check freq can be preserved on edge cases
if self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate(
(self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
)
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, "UTC", self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert DatetimeIndex with incompatible label")
|
https://github.com/pandas-dev/pandas/issues/10193
|
INSTALLED VERSIONS
------------------
commit: None
python: 2.7.9.final.0
python-bits: 64
OS: Windows
OS-release: 7
machine: AMD64
processor: Intel64 Family 6 Model 26 Stepping 5, GenuineIntel
byteorder: little
LC_ALL: None
LANG: None
pandas: 0.16.1-46-g0aceb38
nose: 1.3.6
Cython: 0.22
numpy: 1.9.2
scipy: 0.14.0
statsmodels: 0.6.1
IPython: 3.1.0
sphinx: 1.3.1
patsy: 0.3.0
dateutil: 2.4.2
pytz: 2015.4
bottleneck: 0.8.0
tables: 3.1.1
numexpr: 2.3.1
matplotlib: 1.4.3
openpyxl: None
xlrd: 0.9.3
xlwt: None
xlsxwriter: 0.7.2
lxml: None
bs4: 4.3.2
html5lib: 0.999
httplib2: None
apiclient: None
sqlalchemy: 1.0.4
pymysql: None
psycopg2: None
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
c:\test_set_empty_series_with_freq.py in <module>()
11 ts2 = pd.TimeSeries(0, pd.date_range('2011-01-01', '2011-01-01'))[:0]
12
---> 13 ts2[pd.datetime(2012, 1, 1)] = 47
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\core\series.pyc in __setitem__(self, key, value)
687 # do the setitem
688 cacher_needs_updating = self._check_is_chained_assignment_possible()
--> 689 setitem(key, value)
690 if cacher_needs_updating:
691 self._maybe_update_cacher()
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\core\series.pyc in setitem(key, value)
660 pass
661 try:
--> 662 self.loc[key] = value
663 except:
664 print ""
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\core\indexing.pyc in __setitem__(self, key, value)
113 def __setitem__(self, key, value):
114 indexer = self._get_setitem_indexer(key)
--> 115 self._setitem_with_indexer(indexer, value)
116
117 def _has_valid_type(self, k, axis):
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\core\indexing.pyc in _setitem_with_indexer(self, indexer, value)
272 if self.ndim == 1:
273 index = self.obj.index
--> 274 new_index = index.insert(len(index),indexer)
275
276 # this preserves dtype of the value
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\tseries\index.pyc in insert(self, loc, item)
1523 # check freq can be preserved on edge cases
1524 if self.freq is not None:
-> 1525 if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
1526 freq = self.freq
1527 elif (loc == len(self)) and item - self.freq == self[-1]:
c:\python\envs\pandas-0.16.1\lib\site-packages\pandas\tseries\index.pyc in __getitem__(self, key)
1351 getitem = self._data.__getitem__
1352 if np.isscalar(key):
-> 1353 val = getitem(key)
1354 return Timestamp(val, offset=self.offset, tz=self.tz)
1355 else:
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not com.is_bool_dtype(key.values):
raise TypeError("Must pass DataFrame with boolean values only")
self._check_inplace_setting(value)
self._check_setitem_copy()
self.where(-key, value, inplace=True)
|
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.dtype != np.bool_:
raise TypeError("Must pass DataFrame with boolean values only")
self._check_inplace_setting(value)
self._check_setitem_copy()
self.where(-key, value, inplace=True)
|
https://github.com/pandas-dev/pandas/issues/10126
|
import pandas as pd
df = pd.DataFrame()
df[df>0]
Traceback (most recent call last):
File "<ipython-input-3-efe84c9ebabc>", line 1, in <module>
df[df>0]
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 1787, in __getitem__
return self._getitem_frame(key)
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 1859, in _getitem_frame
raise ValueError('Must pass DataFrame with boolean values only')
ValueError: Must pass DataFrame with boolean values only
|
ValueError
|
def __init__(
self,
obj,
path_or_buf=None,
sep=",",
na_rep="",
float_format=None,
cols=None,
header=True,
index=True,
index_label=None,
mode="w",
nanRep=None,
encoding=None,
quoting=None,
line_terminator="\n",
chunksize=None,
engine=None,
tupleize_cols=False,
quotechar='"',
date_format=None,
doublequote=True,
escapechar=None,
):
self.engine = engine # remove for 0.13
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = path_or_buf
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == "python":
raise NotImplementedError(
"columns.is_unique == False not supported with engine='python'"
)
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(
na_rep=na_rep, float_format=float_format, date_format=date_format
)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(
na_rep=na_rep, float_format=float_format, date_format=date_format
)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if isinstance(self.data_index, DatetimeIndex) and date_format is not None:
self.data_index = Index(
[x.strftime(date_format) if notnull(x) else "" for x in self.data_index]
)
self.nlevels = getattr(self.data_index, "nlevels", 1)
if not index:
self.nlevels = 0
|
def __init__(
self,
obj,
path_or_buf=None,
sep=",",
na_rep="",
float_format=None,
cols=None,
header=True,
index=True,
index_label=None,
mode="w",
nanRep=None,
encoding=None,
quoting=None,
line_terminator="\n",
chunksize=None,
engine=None,
tupleize_cols=False,
quotechar='"',
date_format=None,
doublequote=True,
escapechar=None,
):
self.engine = engine # remove for 0.13
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = path_or_buf
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == "python":
raise NotImplementedError(
"columns.is_unique == False not supported with engine='python'"
)
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(
na_rep=na_rep, float_format=float_format, date_format=date_format
)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(
na_rep=na_rep, float_format=float_format, date_format=date_format
)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 / (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if isinstance(self.data_index, DatetimeIndex) and date_format is not None:
self.data_index = Index(
[x.strftime(date_format) if notnull(x) else "" for x in self.data_index]
)
self.nlevels = getattr(self.data_index, "nlevels", 1)
if not index:
self.nlevels = 0
|
https://github.com/pandas-dev/pandas/issues/8621
|
Python 3.4.2 (default, Oct 8 2014, 13:44:52)
[GCC 4.9.1 20140903 (prerelease)] on linux
Type "help", "copyright", "credits" or "license" for more information.
import pandas as pd
pd.show_versions()
INSTALLED VERSIONS
------------------
commit: None
python: 3.4.2.final.0
python-bits: 64
OS: Linux
OS-release: 3.14.22-1-lts
machine: x86_64
processor:
byteorder: little
LC_ALL: None
LANG: en_AU.UTF-8
pandas: 0.15.0-16-g7012d71
nose: 1.3.4
Cython: 0.21.1
numpy: 1.9.0
scipy: 0.14.0
statsmodels: None
IPython: 2.3.0
sphinx: 1.2.3
patsy: 0.3.0
dateutil: 2.2
pytz: 2014.7
bottleneck: 0.8.0
tables: 3.1.1
numexpr: 2.4
matplotlib: 1.4.2
openpyxl: 1.8.6
xlrd: 0.9.3
xlwt: None
xlsxwriter: 0.5.7
lxml: 3.4.0
bs4: None
html5lib: 0.999
httplib2: None
apiclient: None
rpy2: None
sqlalchemy: 0.9.8
pymysql: None
psycopg2: 2.5.4 (dt dec pq3 ext)
d=pd.read_msgpack('test.mpk')
d.shape
(3, 454731)
d.to_csv('test.csv')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.4/site-packages/pandas/util/decorators.py", line 88, in wrapper
return func(*args, **kwargs)
File "/usr/lib/python3.4/site-packages/pandas/core/frame.py", line 1154, in to_csv
formatter.save()
File "/usr/lib/python3.4/site-packages/pandas/core/format.py", line 1400, in save
self._save()
File "/usr/lib/python3.4/site-packages/pandas/core/format.py", line 1492, in _save
chunks = int(nrows / chunksize) + 1
ZeroDivisionError: division by zero
d.T.to_csv('test.csv')
|
ZeroDivisionError
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
from pandas.tseries.tools import normalize_date
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pydata/pandas/issues/8683
start_day_nanos = Timestamp(normalize_date(first)).value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult, tz=first.tz), Timestamp(lresult, tz=last.tz))
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
from pandas.tseries.tools import normalize_date
start_day_nanos = Timestamp(normalize_date(first)).value
last_day_nanos = Timestamp(normalize_date(last)).value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
last_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - last_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult, tz=first.tz), Timestamp(lresult, tz=last.tz))
|
https://github.com/pandas-dev/pandas/issues/8683
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-71895ab1ef27> in <module>()
----> 1 R_inst = loaded_sorted.tail(200).head(20).resample('2200L', how='sum', label='right')
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/core/generic.py in resample(self, rule, how, axis, fill_method, closed, label, convention, kind,
loffset, limit, base)
2978 fill_method=fill_method, convention=convention,
2979 limit=limit, base=base)
-> 2980 return sampler.resample(self).__finalize__(self)
2981
2982 def first(self, offset):
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/tseries/resample.py in resample(self, obj)
83
84 if isinstance(ax, DatetimeIndex):
---> 85 rs = self._resample_timestamps()
86 elif isinstance(ax, PeriodIndex):
87 offset = to_offset(self.freq)
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/tseries/resample.py in _resample_timestamps(self, kind)
273 axlabels = self.ax
274
--> 275 self._get_binner_for_resample(kind=kind)
276 grouper = self.grouper
277 binner = self.binner
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/tseries/resample.py in _get_binner_for_resample(self, kind)
121 kind = self.kind
122 if kind is None or kind == 'timestamp':
--> 123 self.binner, bins, binlabels = self._get_time_bins(ax)
124 elif kind == 'timedelta':
125 self.binner, bins, binlabels = self._get_time_delta_bins(ax)
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
182
183 # general version, knowing nothing about relative frequencies
--> 184 bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
185
186 if self.closed == 'right':
/Users/josh/anaconda3/envs/py34/lib/python3.4/site-packages/pandas/lib.so in pandas.lib.generate_bins_dt64 (pandas/lib.c:17928)()
ValueError: Values falls after last bin
|
ValueError
|
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
|
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError):
return default
|
https://github.com/pandas-dev/pandas/issues/7725
|
s = pd.Series([1,2,3], index=["a","b","c"])
s
a 1
b 2
c 3
dtype: int64
s.get("d", 0)
0
s.get(10, 0)
Traceback (most recent call last):
File "<ipython-input-18-26d73ac73179>", line 1, in <module>
s.get(10, 0)
File "/usr/local/lib/python2.7/dist-packages/pandas-0.14.0_421_g20dfc6b-py2.7-linux-x86_64.egg/pandas/core/generic.py", line 1040, in get
return self[key]
File "/usr/local/lib/python2.7/dist-packages/pandas-0.14.0_421_g20dfc6b-py2.7-linux-x86_64.egg/pandas/core/series.py", line 484, in __getitem__
result = self.index.get_value(self, key)
File "/usr/local/lib/python2.7/dist-packages/pandas-0.14.0_421_g20dfc6b-py2.7-linux-x86_64.egg/pandas/core/index.py", line 1202, in get_value
return tslib.get_value_box(s, key)
File "tslib.pyx", line 540, in pandas.tslib.get_value_box (pandas/tslib.c:11831)
File "tslib.pyx", line 555, in pandas.tslib.get_value_box (pandas/tslib.c:11678)
IndexError: index out of bounds
|
IndexError
|
def shift(self, periods, axis=0):
"""shift the block by periods, possibly upcast"""
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, periods, axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [
make_block(new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)
]
|
def shift(self, periods, axis=0):
"""shift the block by periods, possibly upcast"""
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
new_values = np.roll(new_values, periods, axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [
make_block(new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)
]
|
https://github.com/pandas-dev/pandas/issues/8019
|
In [1]: from pandas import *
In [2]: df = DataFrame(columns=['foo'])
In [3]: df.shift(-1)
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-3-6aa009807b04> in <module>()
----> 1 df.shift(-1)
/users/is/whughes/pyenvs/da497516f84bbd5b/lib/python2.7/site-packages/pandas/core/generic.pyc in shift(self, periods, freq, axis, **kwds)
3288 block_axis = self._get_block_manager_axis(axis)
3289 if freq is None and not len(kwds):
-> 3290 new_data = self._data.shift(periods=periods, axis=block_axis)
3291 else:
3292 return self.tshift(periods, freq, **kwds)
/users/is/whughes/pyenvs/da497516f84bbd5b/lib/python2.7/site-packages/pandas/core/internals.pyc in shift(self, **kwargs)
2226
2227 def shift(self, **kwargs):
-> 2228 return self.apply('shift', **kwargs)
2229
2230 def fillna(self, **kwargs):
/users/is/whughes/pyenvs/da497516f84bbd5b/lib/python2.7/site-packages/pandas/core/internals.pyc in apply(self, f, axes, filter, do_integrity_check, **kwargs)
2190 copy=align_copy)
2191
-> 2192 applied = getattr(b, f)(**kwargs)
2193
2194 if isinstance(applied, list):
/users/is/whughes/pyenvs/da497516f84bbd5b/lib/python2.7/site-packages/pandas/core/internals.pyc in shift(self, periods, axis)
789 new_values = new_values.T
790 axis = new_values.ndim - axis - 1
--> 791 new_values = np.roll(new_values, periods, axis=axis)
792 axis_indexer = [ slice(None) ] * self.ndim
793 if periods > 0:
/users/is/whughes/pyenvs/da497516f84bbd5b/lib/python2.7/site-packages/numpy/core/numeric.pyc in roll(a, shift, axis)
1145 n = a.shape[axis]
1146 reshape = False
-> 1147 shift %= n
1148 indexes = concatenate((arange(n-shift,n),arange(n-shift)))
1149 res = a.take(indexes, axis)
ZeroDivisionError: integer division or modulo by zero
|
ZeroDivisionError
|
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
# row = row.squeeze().tolist() # needed for structured arrays
for i, var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if var is None or var == np.nan:
var = _pad_bytes("", typ)
if len(var) < typ:
var = _pad_bytes(var, typ)
if compat.PY3:
self._write(var)
else:
self._write(var.encode(self._encoding))
else:
try:
self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var))
except struct.error:
# have to be strict about type pack won't do any
# kind of casting
self._file.write(
struct.pack(
byteorder + TYPE_MAP[typ], self.type_converters[typ](var)
)
)
|
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
# row = row.squeeze().tolist() # needed for structured arrays
for i, var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if var is None or var == np.nan:
var = _pad_bytes("", typ)
if len(var) < typ:
var = _pad_bytes(var, typ)
self._write(var)
else:
try:
self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var))
except struct.error:
# have to be strict about type pack won't do any
# kind of casting
self._file.write(
struct.pack(
byteorder + TYPE_MAP[typ], self.type_converters[typ](var)
)
)
|
https://github.com/pandas-dev/pandas/issues/7286
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1242, in write_file
self._write_data_nodates()
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1326, in _write_data_nodates
self._write(var)
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1104, in _write
self._file.write(to_write)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 1:ordinal not in range(128)
|
UnicodeEncodeError
|
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
# row = row.squeeze().tolist() # needed for structured arrays
for i, var in enumerate(row):
typ = ord(typlist[i])
# NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(var, typ)
if compat.PY3:
self._write(var)
else:
self._write(var.encode(self._encoding))
else:
self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var))
|
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
# row = row.squeeze().tolist() # needed for structured arrays
for i, var in enumerate(row):
typ = ord(typlist[i])
# NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(var, typ)
self._write(var)
else:
self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var))
|
https://github.com/pandas-dev/pandas/issues/7286
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1242, in write_file
self._write_data_nodates()
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1326, in _write_data_nodates
self._write(var)
File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 1104, in _write
self._file.write(to_write)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 1:ordinal not in range(128)
|
UnicodeEncodeError
|
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
# format 117 or higher (XML like)
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117]:
raise ValueError(
"Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)"
)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
self.path_or_buf.read(11) # </N><label>
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
self.data_label = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(19) # </label><timestamp>
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
self.time_stamp = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
seek_variable_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
)
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
# self.path_or_buf.read(8) # </stata_dta>
# self.path_or_buf.read(8) # EOF
self.path_or_buf.seek(seek_vartypes)
typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for i in range(self.nvar)
]
self.typlist = [None] * self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.typlist[i] = typ
elif typ == 32768:
raise ValueError("Long strings are not supported")
else:
self.typlist[i] = self.TYPE_MAP_XML[typ]
i += 1
except:
raise ValueError(
"cannot convert stata types [{0}]".format(",".join(typlist))
)
self.dtyplist = [None] * self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.dtyplist[i] = str(typ)
else:
self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
i += 1
except:
raise ValueError(
"cannot convert stata dtypes [{0}]".format(",".join(typlist))
)
self.path_or_buf.seek(seek_varnames)
self.varlist = [
self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(seek_formats)
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(49)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_value_label_names)
self.lbllist = [
self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_variable_labels)
self.vlblist = [
self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)
]
else:
# header
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 113, 114, 115]:
raise ValueError(
"Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)"
)
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
if self.format_version > 105:
self.data_label = self._null_terminate(self.path_or_buf.read(81))
else:
self.data_label = self._null_terminate(self.path_or_buf.read(32))
if self.format_version > 104:
self.time_stamp = self._null_terminate(self.path_or_buf.read(18))
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)]
else:
typlist = [
self.OLD_TYPE_MAPPING[self._decode_bytes(self.path_or_buf.read(1))]
for i in range(self.nvar)
]
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError(
"cannot convert stata types [{0}]".format(",".join(typlist))
)
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError(
"cannot convert stata dtypes [{0}]".format(",".join(typlist))
)
if self.format_version > 108:
self.varlist = [
self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)
]
else:
self.varlist = [
self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
if self.format_version > 113:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)
]
elif self.format_version > 104:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(12))
for i in range(self.nvar)
]
else:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(7)) for i in range(self.nvar)
]
if self.format_version > 108:
self.lbllist = [
self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)
]
else:
self.lbllist = [
self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)
]
if self.format_version > 105:
self.vlblist = [
self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)
]
else:
self.vlblist = [
self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)
]
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
"""Calculate size of a data record."""
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
|
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
# format 117 or higher (XML like)
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117]:
raise ValueError(
"Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)"
)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
self.path_or_buf.read(11) # </N><label>
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
self.data_label = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(19) # </label><timestamp>
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
self.time_stamp = self._null_terminate(self.path_or_buf.read(strlen))
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
seek_variable_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
)
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
# self.path_or_buf.read(8) # </stata_dta>
# self.path_or_buf.read(8) # EOF
self.path_or_buf.seek(seek_vartypes)
typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for i in range(self.nvar)
]
self.typlist = [None] * self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045 or typ == 32768:
self.typlist[i] = None
else:
self.typlist[i] = self.TYPE_MAP_XML[typ]
i += 1
except:
raise ValueError(
"cannot convert stata types [{0}]".format(",".join(typlist))
)
self.dtyplist = [None] * self.nvar
try:
i = 0
for typ in typlist:
if typ <= 2045:
self.dtyplist[i] = str(typ)
else:
self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
i += 1
except:
raise ValueError(
"cannot convert stata dtypes [{0}]".format(",".join(typlist))
)
self.path_or_buf.seek(seek_varnames)
self.varlist = [
self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(seek_formats)
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(49)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_value_label_names)
self.lbllist = [
self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)
]
self.path_or_buf.seek(seek_variable_labels)
self.vlblist = [
self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)
]
else:
# header
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 113, 114, 115]:
raise ValueError(
"Version of given Stata file is not 104, "
"105, 108, 113 (Stata 8/9), 114 (Stata "
"10/11), 115 (Stata 12) or 117 (Stata 13)"
)
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
if self.format_version > 105:
self.data_label = self._null_terminate(self.path_or_buf.read(81))
else:
self.data_label = self._null_terminate(self.path_or_buf.read(32))
if self.format_version > 104:
self.time_stamp = self._null_terminate(self.path_or_buf.read(18))
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)]
else:
typlist = [
self.OLD_TYPE_MAPPING[self._decode_bytes(self.path_or_buf.read(1))]
for i in range(self.nvar)
]
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError(
"cannot convert stata types [{0}]".format(",".join(typlist))
)
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError(
"cannot convert stata dtypes [{0}]".format(",".join(typlist))
)
if self.format_version > 108:
self.varlist = [
self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)
]
else:
self.varlist = [
self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
if self.format_version > 113:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(49))
for i in range(self.nvar)
]
elif self.format_version > 104:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(12))
for i in range(self.nvar)
]
else:
self.fmtlist = [
self._null_terminate(self.path_or_buf.read(7)) for i in range(self.nvar)
]
if self.format_version > 108:
self.lbllist = [
self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)
]
else:
self.lbllist = [
self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)
]
if self.format_version > 105:
self.vlblist = [
self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)
]
else:
self.vlblist = [
self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)
]
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
"""Calculate size of a data record."""
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
|
https://github.com/pandas-dev/pandas/issues/7360
|
%run D:/Datos/RFERRER/Desktop/import_stata13.py
INSTALLED VERSIONS
------------------
commit: None
python: 2.7.6.final.0
python-bits: 64
OS: Windows
OS-release: 7
machine: AMD64
processor: Intel64 Family 6 Model 45 Stepping 7, GenuineIntel
byteorder: little
LC_ALL: None
LANG: None
pandas: 0.14.0
nose: 1.3.0
Cython: 0.19.2
numpy: 1.8.0
scipy: 0.14.0
statsmodels: 0.5.0
IPython: 1.2.1
sphinx: 1.2.2
patsy: 0.2.0
scikits.timeseries: 0.91.3
dateutil: 2.2
pytz: 2013.8
bottleneck: None
tables: 2.4.0
numexpr: 2.2.2
matplotlib: 1.3.1
openpyxl: 1.8.5
xlrd: 0.9.2
xlwt: 0.7.5
xlsxwriter: None
lxml: 3.2.3
bs4: None
html5lib: 0.95-dev
bq: None
apiclient: None
rpy2: None
sqlalchemy: 0.8.3
pymysql: None
psycopg2: None
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\openpyxl\__init__.py:31: UserWarning: The installed version of lxml is too old to be used with openpyxl
warnings.warn("The installed version of lxml is too old to be used with openpyxl")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
C:\Users\rferrer\AppData\Local\Enthought\Canopy\App\appdata\canopy-1.4.0.1938.win-x86_64\lib\site-packages\IPython\utils\py3compat.pyc in execfile(fname, glob, loc)
195 else:
196 filename = fname
--> 197 exec compile(scripttext, filename, 'exec') in glob, loc
198 else:
199 def execfile(fname, *where):
D:\Datos\RFERRER\Desktop\import_stata13.py in <module>()
3 pandas.show_versions()
4
----> 5 dta = pandas.io.stata.read_stata('D:\\Datos\\rferrer\\Desktop\\myauto.dta')
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\pandas\io\stata.pyc in read_stata(filepath_or_buffer, convert_dates, convert_categoricals, encoding, index)
45 identifier of column that should be used as index of the DataFrame
46 """
---> 47 reader = StataReader(filepath_or_buffer, encoding)
48
49 return reader.data(convert_dates, convert_categoricals, index)
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\pandas\io\stata.pyc in __init__(self, path_or_buf, encoding)
455 self.path_or_buf = path_or_buf
456
--> 457 self._read_header()
458
459 def _read_header(self):
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\pandas\io\stata.pyc in _read_header(self)
657
658 """Calculate size of a data record."""
--> 659 self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
660
661 def _calcsize(self, fmt):
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\pandas\io\stata.pyc in <lambda>(x)
657
658 """Calculate size of a data record."""
--> 659 self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
660
661 def _calcsize(self, fmt):
C:\Users\rferrer\AppData\Local\Enthought\Canopy\User\lib\site-packages\pandas\io\stata.pyc in _calcsize(self, fmt)
661 def _calcsize(self, fmt):
662 return (type(fmt) is int and fmt
--> 663 or struct.calcsize(self.byteorder + fmt))
664
665 def _col_size(self, k=None):
TypeError: cannot concatenate 'str' and 'NoneType' objects
|
TypeError
|
def rolling_count(arg, window, freq=None, center=False, how=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq, how)
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(
converted, window, min_periods=1, center=center
) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
|
def rolling_count(arg, window, freq=None, center=False):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq)
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(
converted, window, min_periods=1, center=center
) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def rolling_cov(
arg1,
arg2=None,
window=None,
min_periods=None,
freq=None,
center=False,
pairwise=None,
how=None,
):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
window = min(window, len(arg1), len(arg2))
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
bias_adj = count / (count - 1)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
|
def rolling_cov(
arg1,
arg2=None,
window=None,
min_periods=None,
freq=None,
center=False,
pairwise=None,
):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq)
arg2 = _conv_timerule(arg2, freq)
window = min(window, len(arg1), len(arg2))
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
bias_adj = count / (count - 1)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def rolling_corr(
arg1,
arg2=None,
window=None,
min_periods=None,
freq=None,
center=False,
pairwise=None,
how=None,
):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
window = min(window, len(arg1), len(arg2))
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq, center=center)
den = rolling_std(
a, window, min_periods, freq=freq, center=center
) * rolling_std(b, window, min_periods, freq=freq, center=center)
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise))
|
def rolling_corr(
arg1,
arg2=None,
window=None,
min_periods=None,
freq=None,
center=False,
pairwise=None,
):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq)
arg2 = _conv_timerule(arg2, freq)
window = min(window, len(arg1), len(arg2))
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq, center=center)
den = rolling_std(
a, window, min_periods, freq=freq, center=center
) * rolling_std(b, window, min_periods, freq=freq, center=center)
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise))
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def _rolling_moment(
arg,
window,
func,
minp,
axis=0,
freq=None,
center=False,
how=None,
args=(),
kwargs={},
**kwds,
):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs, **kwds)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
rs = return_hook(result)
if center:
rs = _center_window(rs, window, axis)
return rs
|
def _rolling_moment(
arg, window, func, minp, axis=0, freq=None, center=False, args=(), kwargs={}, **kwds
):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq)
calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs, **kwds)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
rs = return_hook(result)
if center:
rs = _center_window(rs, window, axis)
return rs
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def ewma(
arg,
com=None,
span=None,
halflife=None,
min_periods=0,
freq=None,
adjust=True,
how=None,
):
com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq, how)
def _ewma(v):
result = algos.ewma(v, com, int(adjust))
first_index = _first_valid_index(v)
result[first_index : first_index + min_periods] = NaN
return result
return_hook, values = _process_data_structure(arg)
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
|
def ewma(
arg, com=None, span=None, halflife=None, min_periods=0, freq=None, adjust=True
):
com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq)
def _ewma(v):
result = algos.ewma(v, com, int(adjust))
first_index = _first_valid_index(v)
result[first_index : first_index + min_periods] = NaN
return result
return_hook, values = _process_data_structure(arg)
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def ewmvar(
arg,
com=None,
span=None,
halflife=None,
min_periods=0,
bias=False,
freq=None,
how=None,
):
com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq, how)
moment2nd = ewma(arg * arg, com=com, min_periods=min_periods)
moment1st = ewma(arg, com=com, min_periods=min_periods)
result = moment2nd - moment1st**2
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
def ewmvar(
arg, com=None, span=None, halflife=None, min_periods=0, bias=False, freq=None
):
com = _get_center_of_mass(com, span, halflife)
arg = _conv_timerule(arg, freq)
moment2nd = ewma(arg * arg, com=com, min_periods=min_periods)
moment1st = ewma(arg, com=com, min_periods=min_periods)
result = moment2nd - moment1st**2
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def ewmcov(
arg1,
arg2=None,
com=None,
span=None,
halflife=None,
min_periods=0,
bias=False,
freq=None,
pairwise=None,
how=None,
):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_ewmcov(X, Y):
mean = lambda x: ewma(
x, com=com, span=span, halflife=halflife, min_periods=min_periods
)
return mean(X * Y) - mean(X) * mean(Y)
result = _flex_binary_moment(arg1, arg2, _get_ewmcov, pairwise=bool(pairwise))
if not bias:
com = _get_center_of_mass(com, span, halflife)
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
def ewmcov(
arg1,
arg2=None,
com=None,
span=None,
halflife=None,
min_periods=0,
bias=False,
freq=None,
pairwise=None,
):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq)
arg2 = _conv_timerule(arg2, freq)
def _get_ewmcov(X, Y):
mean = lambda x: ewma(
x, com=com, span=span, halflife=halflife, min_periods=min_periods
)
return mean(X * Y) - mean(X) * mean(Y)
result = _flex_binary_moment(arg1, arg2, _get_ewmcov, pairwise=bool(pairwise))
if not bias:
com = _get_center_of_mass(com, span, halflife)
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def ewmcorr(
arg1,
arg2=None,
com=None,
span=None,
halflife=None,
min_periods=0,
freq=None,
pairwise=None,
how=None,
):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_ewmcorr(X, Y):
mean = lambda x: ewma(
x, com=com, span=span, halflife=halflife, min_periods=min_periods
)
var = lambda x: ewmvar(
x, com=com, span=span, halflife=halflife, min_periods=min_periods, bias=True
)
return (mean(X * Y) - mean(X) * mean(Y)) / _zsqrt(var(X) * var(Y))
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr, pairwise=bool(pairwise))
return result
|
def ewmcorr(
arg1,
arg2=None,
com=None,
span=None,
halflife=None,
min_periods=0,
freq=None,
pairwise=None,
):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq)
arg2 = _conv_timerule(arg2, freq)
def _get_ewmcorr(X, Y):
mean = lambda x: ewma(
x, com=com, span=span, halflife=halflife, min_periods=min_periods
)
var = lambda x: ewmvar(
x, com=com, span=span, halflife=halflife, min_periods=min_periods, bias=True
)
return (mean(X * Y) - mean(X) * mean(Y)) / _zsqrt(var(X) * var(Y))
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr, pairwise=bool(pairwise))
return result
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def _conv_timerule(arg, freq, how):
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq, how=how)
return arg
|
def _conv_timerule(arg, freq):
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq)
return arg
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def _rolling_func(func, desc, check_minp=_use_window, how=None):
if how is None:
how_arg_str = "None"
else:
how_arg_str = "'%s" % how
@Substitution(
desc, _unary_arg, _roll_kw % how_arg_str, _type_of_input_retval, _roll_notes
)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, how=how, **kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg,
window,
call_cython,
min_periods,
freq=freq,
center=center,
how=how,
**kwargs,
)
return f
|
def _rolling_func(func, desc, check_minp=_use_window):
@Substitution(desc, _unary_arg, _roll_kw, _type_of_input_retval, _roll_notes)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, **kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, center=center, **kwargs
)
return f
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def f(arg, window, min_periods=None, freq=None, center=False, how=how, **kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg,
window,
call_cython,
min_periods,
freq=freq,
center=center,
how=how,
**kwargs,
)
|
def f(arg, window, min_periods=None, freq=None, center=False, **kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, center=center, **kwargs
)
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def rolling_window(
arg,
window=None,
win_type=None,
min_periods=None,
freq=None,
center=False,
mean=True,
axis=0,
how=None,
**kwargs,
):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
if isinstance(window, (list, tuple, np.ndarray)):
if win_type is not None:
raise ValueError(("Do not specify window type if using custom weights"))
window = pdcom._asarray_tuplesafe(window).astype(float)
elif pdcom.is_integer(window): # window size
if win_type is None:
raise ValueError("Must specify window type")
try:
import scipy.signal as sig
except ImportError:
raise ImportError("Please install scipy to generate window weight")
win_type = _validate_win_type(win_type, kwargs) # may pop from kwargs
window = sig.get_window(win_type, window).astype(float)
else:
raise ValueError("Invalid window %s" % str(window))
minp = _use_window(min_periods, len(window))
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
f = lambda x: algos.roll_window(x, window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
rs = return_hook(result)
if center:
rs = _center_window(rs, len(window), axis)
return rs
|
def rolling_window(
arg,
window=None,
win_type=None,
min_periods=None,
freq=None,
center=False,
mean=True,
axis=0,
**kwargs,
):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
if isinstance(window, (list, tuple, np.ndarray)):
if win_type is not None:
raise ValueError(("Do not specify window type if using custom weights"))
window = pdcom._asarray_tuplesafe(window).astype(float)
elif pdcom.is_integer(window): # window size
if win_type is None:
raise ValueError("Must specify window type")
try:
import scipy.signal as sig
except ImportError:
raise ImportError("Please install scipy to generate window weight")
win_type = _validate_win_type(win_type, kwargs) # may pop from kwargs
window = sig.get_window(win_type, window).astype(float)
else:
raise ValueError("Invalid window %s" % str(window))
minp = _use_window(min_periods, len(window))
arg = _conv_timerule(arg, freq)
return_hook, values = _process_data_structure(arg)
f = lambda x: algos.roll_window(x, window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
rs = return_hook(result)
if center:
rs = _center_window(rs, len(window), axis)
return rs
|
https://github.com/pandas-dev/pandas/issues/6297
|
In [118]: import pandas
In [119]: indices = [datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
In [120]: indices.append(datetime.datetime(1975, 1, 3, 6, 0)) # So that we can have 2 datapoints on one of the days
In [121]: series = pandas.Series(range(1, 7), index=indices)
In [122]: series = series.map(lambda x: float(x)) # Use floats instead of ints as values
In [123]: series = series.sort_index() # Sort chronologically
In [124]: expected_result = pandas.Series([1.0, 2.0, 6.0, 4.0, 5.0], index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
In [125]: actual_result = pandas.rolling_max(series, window=1, freq='D')
In [126]: assert((actual_result==expected_result).all())
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-126-cc436c4798a7> in <module>()
----> 1 assert((actual_result==expected_result).all())
AssertionError:
In [127]: expected_result
Out[127]:
1975-01-01 12:00:00 1
1975-01-02 12:00:00 2
1975-01-03 12:00:00 6
1975-01-04 12:00:00 4
1975-01-05 12:00:00 5
dtype: float64
In [128]: actual_result
Out[128]:
1975-01-01 1.0
1975-01-02 2.0
1975-01-03 4.5
1975-01-04 4.0
1975-01-05 5.0
Freq: D, dtype: float64
|
AssertionError
|
def _get_time_period_bins(self, axis):
if not isinstance(axis, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(axis).__name__
)
if not len(axis):
binner = labels = PeriodIndex(data=[], freq=self.freq)
return binner, [], labels
labels = binner = PeriodIndex(start=axis[0], end=axis[-1], freq=self.freq)
end_stamps = (labels + 1).asfreq(self.freq, "s").to_timestamp()
if axis.tzinfo:
end_stamps = end_stamps.tz_localize(axis.tzinfo)
bins = axis.searchsorted(end_stamps, side="left")
return binner, bins, labels
|
def _get_time_period_bins(self, axis):
if not isinstance(axis, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(axis).__name__
)
if not len(axis):
binner = labels = PeriodIndex(data=[], freq=self.freq)
return binner, [], labels
labels = binner = PeriodIndex(start=axis[0], end=axis[-1], freq=self.freq)
end_stamps = (labels + 1).asfreq("D", "s").to_timestamp()
bins = axis.searchsorted(end_stamps, side="left")
return binner, bins, labels
|
https://github.com/pandas-dev/pandas/issues/3609
|
In [20]: s.resample('T', kind='period')
-----------------
AssertionError
Traceback (most recent call last)
<ipython-input-79-c290c0578332> in <module>()
----> 1 s.resample('T', kind='period')
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/generic.py in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
255 fill_method=fill_method, convention=convention,
256 limit=limit, base=base)
--> 257 return sampler.resample(self)
258
259 def first(self, offset):
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/tseries/resample.py in resample(self, obj)
81
82 if isinstance(axis, DatetimeIndex):
---> 83 rs = self._resample_timestamps(obj)
84 elif isinstance(axis, PeriodIndex):
85 offset = to_offset(self.freq)
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/tseries/resample.py in _resample_timestamps(self, obj)
224 # Irregular data, have to use groupby
225 grouped = obj.groupby(grouper, axis=self.axis)
--> 226 result = grouped.aggregate(self._agg_method)
227
228 if self.fill_method is not None:
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/groupby.py in aggregate(self, func_or_funcs, *args, **kwargs)
1410 if isinstance(func_or_funcs, basestring):
-> 1411 return getattr(self, func_or_funcs)(*args, **kwargs)
1412
1413 if hasattr(func_or_funcs, '__iter__'):
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/groupby.py in mean(self)
356 except Exception: # pragma: no cover
357 f = lambda x: x.mean(axis=self.axis)
--> 358 return self._python_agg_general(f)
359
360 def median(self):
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/groupby.py in _python_agg_general(self, func, *args, **kwargs)
498 output[name] = self._try_cast(values[mask],result)
499
--> 500 return self._wrap_aggregated_output(output)
501
502 def _wrap_applied_output(self, *args, **kwargs):
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/groupby.py in _wrap_aggregated_output(self, output, names)
1473 return DataFrame(output, index=index, columns=names)
1474 else:
-> 1475 return Series(output, index=index, name=self.name)
1476
1477 def _wrap_applied_output(self, keys, values, not_indexed_same=False):
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/core/series.py in __new__(cls, data, index, dtype, name, copy)
494 else:
495 subarr = subarr.view(Series)
--> 496 subarr.index = index
497 subarr.name = name
498
/home/dk3810/workspace/python/pda/scripts/src/pandas/pandas/lib.so in pandas.lib.SeriesIndex.__set__ (pandas/lib.c:29775)()
AssertionError: Index length did not match values
|
AssertionError
|
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indexers = [
self.obj.index.get_indexer(group.index) if true_and_notnull(group) else []
for _, group in self
]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
if len(indexers) == 0:
filtered = self.obj.take([]) # because np.concatenate would fail
else:
filtered = self.obj.take(np.concatenate(indexers))
if dropna:
return filtered
else:
return filtered.reindex(self.obj.index) # Fill with NaNs.
|
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
indexers = [
self.obj.index.get_indexer(group.index) if wrapper(group) else []
for _, group in self
]
if len(indexers) == 0:
filtered = self.obj.take([]) # because np.concatenate would fail
else:
filtered = self.obj.take(np.concatenate(indexers))
if dropna:
return filtered
else:
return filtered.reindex(self.obj.index) # Fill with NaNs.
|
https://github.com/pandas-dev/pandas/issues/4447
|
In [90]: dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})
In [91]: dff.groupby('B').filter(lambda x: len(x) > 2)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-91-89d79df28299> in <module>()
----> 1 dff.groupby('B').filter(lambda x: len(x) > 2)
C:\Anaconda\lib\site-packages\pandas\core\groupby.pyc in filter(self, func, dropna, *args, **kwargs)
2092 res = path(group)
2093
-> 2094 if res:
2095 indexers.append(self.obj.index.get_indexer(group.index))
2096
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
In [93]: pd.__version__
Out[93]: '0.12.0'
|
ValueError
|
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Note
----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
from pandas.tools.merge import concat
indexers = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, "name", name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
def add_indexer():
indexers.append(self.obj.index.get_indexer(group.index))
# interpret the result of the filter
if isinstance(res, (bool, np.bool_)):
if res:
add_indexer()
else:
if getattr(res, "ndim", None) == 1:
val = res.ravel()[0]
if val and notnull(val):
add_indexer()
else:
# in theory you could do .all() on the boolean result ?
raise TypeError("the filter must return a boolean result")
if len(indexers) == 0:
filtered = self.obj.take([]) # because np.concatenate would fail
else:
filtered = self.obj.take(np.concatenate(indexers))
if dropna:
return filtered
else:
return filtered.reindex(self.obj.index) # Fill with NaNs.
|
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Note
----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
from pandas.tools.merge import concat
indexers = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, "name", name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
def add_indexer():
indexers.append(self.obj.index.get_indexer(group.index))
# interpret the result of the filter
if isinstance(res, (bool, np.bool_)):
if res:
add_indexer()
else:
if getattr(res, "ndim", None) == 1:
if res.ravel()[0]:
add_indexer()
else:
# in theory you could do .all() on the boolean result ?
raise TypeError("the filter must return a boolean result")
if len(indexers) == 0:
filtered = self.obj.take([]) # because np.concatenate would fail
else:
filtered = self.obj.take(np.concatenate(indexers))
if dropna:
return filtered
else:
return filtered.reindex(self.obj.index) # Fill with NaNs.
|
https://github.com/pandas-dev/pandas/issues/4447
|
In [90]: dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})
In [91]: dff.groupby('B').filter(lambda x: len(x) > 2)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-91-89d79df28299> in <module>()
----> 1 dff.groupby('B').filter(lambda x: len(x) > 2)
C:\Anaconda\lib\site-packages\pandas\core\groupby.pyc in filter(self, func, dropna, *args, **kwargs)
2092 res = path(group)
2093
-> 2094 if res:
2095 indexers.append(self.obj.index.get_indexer(group.index))
2096
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
In [93]: pd.__version__
Out[93]: '0.12.0'
|
ValueError
|
def reshape(self, *args, **kwargs):
"""
See numpy.ndarray.reshape
"""
if len(args) == 1 and hasattr(args[0], "__iter__"):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
return self
return self.values.reshape(shape, **kwargs)
|
def reshape(self, newshape, order="C"):
"""
See numpy.ndarray.reshape
"""
if order not in ["C", "F"]:
raise TypeError("must specify a tuple / singular length to reshape")
if isinstance(newshape, tuple) and len(newshape) > 1:
return self.values.reshape(newshape, order=order)
else:
return ndarray.reshape(self, newshape, order)
|
https://github.com/pandas-dev/pandas/issues/4554
|
import pandas as pd
x = pd.Series(range(5))
x.reshape(x.shape)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/pymodules/python2.7/pandas/core/series.py", line 981, in reshape
return ndarray.reshape(self, newshape, order)
TypeError: an integer is required
|
TypeError
|
def __getitem__(self, key):
try:
if type(key) == tuple and len(key) == 1:
key = key[0]
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
else:
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
# boolean
# special handling of boolean data with NAs stored in object
# arrays. Since we can't represent NA with dtype=bool
if _is_bool_indexer(key):
key = self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return self._get_with(key)
|
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
else:
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
# boolean
# special handling of boolean data with NAs stored in object
# arrays. Since we can't represent NA with dtype=bool
if _is_bool_indexer(key):
key = self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return self._get_with(key)
|
https://github.com/pandas-dev/pandas/issues/816
|
In [13]: s = Series(np.arange(10))
In [14]: np.diff(s)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/wesm/code/pandas/<ipython-input-14-b5f9fe77ab7c> in <module>()
----> 1 np.diff(s)
/usr/lib/epd-7.1/lib/python2.7/site-packages/numpy/lib/function_base.pyc in diff(a, n, axis)
975 return diff(a[slice1]-a[slice2], n-1, axis=axis)
976 else:
--> 977 return a[slice1]-a[slice2]
978
979 def interp(x, xp, fp, left=None, right=None):
/home/wesm/code/pandas/pandas/core/series.pyc in __getitem__(self, key)
392 key = np.asarray(key, dtype=bool)
393
--> 394 return self._get_with(key)
395
396 def _get_with(self, key):
/home/wesm/code/pandas/pandas/core/series.pyc in _get_with(self, key)
406 else:
407 if isinstance(key, tuple):
--> 408 return self._get_values_tuple(key)
409
410 if not isinstance(key, (list, np.ndarray)): # pragma: no cover
/home/wesm/code/pandas/pandas/core/series.pyc in _get_values_tuple(self, key)
437
438 if not isinstance(self.index, MultiIndex):
--> 439 raise ValueError('Can only tuple-index with a MultiIndex')
440
441 # If key is contained, would have returned by now
ValueError: Can only tuple-index with a MultiIndex
|
ValueError
|
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
else:
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
# boolean
# special handling of boolean data with NAs stored in object
# arrays. Since we can't represent NA with dtype=bool
if _is_bool_indexer(key):
key = self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return self._get_with(key)
|
def __getitem__(self, key):
try:
if type(key) == tuple and len(key) == 1:
key = key[0]
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
else:
raise
except Exception:
raise
if com.is_iterator(key):
key = list(key)
# boolean
# special handling of boolean data with NAs stored in object
# arrays. Since we can't represent NA with dtype=bool
if _is_bool_indexer(key):
key = self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return self._get_with(key)
|
https://github.com/pandas-dev/pandas/issues/816
|
In [13]: s = Series(np.arange(10))
In [14]: np.diff(s)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/wesm/code/pandas/<ipython-input-14-b5f9fe77ab7c> in <module>()
----> 1 np.diff(s)
/usr/lib/epd-7.1/lib/python2.7/site-packages/numpy/lib/function_base.pyc in diff(a, n, axis)
975 return diff(a[slice1]-a[slice2], n-1, axis=axis)
976 else:
--> 977 return a[slice1]-a[slice2]
978
979 def interp(x, xp, fp, left=None, right=None):
/home/wesm/code/pandas/pandas/core/series.pyc in __getitem__(self, key)
392 key = np.asarray(key, dtype=bool)
393
--> 394 return self._get_with(key)
395
396 def _get_with(self, key):
/home/wesm/code/pandas/pandas/core/series.pyc in _get_with(self, key)
406 else:
407 if isinstance(key, tuple):
--> 408 return self._get_values_tuple(key)
409
410 if not isinstance(key, (list, np.ndarray)): # pragma: no cover
/home/wesm/code/pandas/pandas/core/series.pyc in _get_values_tuple(self, key)
437
438 if not isinstance(self.index, MultiIndex):
--> 439 raise ValueError('Can only tuple-index with a MultiIndex')
440
441 # If key is contained, would have returned by now
ValueError: Can only tuple-index with a MultiIndex
|
ValueError
|
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
from pandas.core.indexing import _is_index_slice
if self.index.inferred_type == "integer" or _is_index_slice(key):
indexer = key
else:
indexer = self.ix._convert_to_indexer(key, axis=0)
return self._get_values(indexer)
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
if not isinstance(key, (list, np.ndarray)): # pragma: no cover
key = list(key)
key_type = lib.infer_dtype(key)
if key_type == "integer":
if self.index.inferred_type == "integer":
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == "boolean":
return self._get_values(key)
else:
try:
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
|
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
from pandas.core.indexing import _is_index_slice
if self.index.inferred_type == "integer" or _is_index_slice(key):
indexer = key
else:
indexer = self.ix._convert_to_indexer(key, axis=0)
return self._get_values(indexer)
else:
if isinstance(key, tuple):
return self._get_values_tuple(key)
if not isinstance(key, (list, np.ndarray)): # pragma: no cover
key = list(key)
key_type = lib.infer_dtype(key)
if key_type == "integer":
if self.index.inferred_type == "integer":
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == "boolean":
return self._get_values(key)
else:
try:
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
|
https://github.com/pandas-dev/pandas/issues/816
|
In [13]: s = Series(np.arange(10))
In [14]: np.diff(s)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/wesm/code/pandas/<ipython-input-14-b5f9fe77ab7c> in <module>()
----> 1 np.diff(s)
/usr/lib/epd-7.1/lib/python2.7/site-packages/numpy/lib/function_base.pyc in diff(a, n, axis)
975 return diff(a[slice1]-a[slice2], n-1, axis=axis)
976 else:
--> 977 return a[slice1]-a[slice2]
978
979 def interp(x, xp, fp, left=None, right=None):
/home/wesm/code/pandas/pandas/core/series.pyc in __getitem__(self, key)
392 key = np.asarray(key, dtype=bool)
393
--> 394 return self._get_with(key)
395
396 def _get_with(self, key):
/home/wesm/code/pandas/pandas/core/series.pyc in _get_with(self, key)
406 else:
407 if isinstance(key, tuple):
--> 408 return self._get_values_tuple(key)
409
410 if not isinstance(key, (list, np.ndarray)): # pragma: no cover
/home/wesm/code/pandas/pandas/core/series.pyc in _get_values_tuple(self, key)
437
438 if not isinstance(self.index, MultiIndex):
--> 439 raise ValueError('Can only tuple-index with a MultiIndex')
440
441 # If key is contained, would have returned by now
ValueError: Can only tuple-index with a MultiIndex
|
ValueError
|
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype="O")
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
return _hashtable_algo(f, values.dtype)
|
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = np.asarray(values)
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype="O")
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
return _hashtable_algo(f, values.dtype)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _isnull_ndarraylike(obj):
from pandas import Series
values = np.asarray(obj)
if values.dtype.kind in ("O", "S", "U"):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ("S", "U"):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = Series(result, index=obj.index, copy=False)
elif values.dtype == np.dtype("M8[ns]"):
# this is the NaT pattern
result = values.view("i8") == lib.iNaT
else:
result = -np.isfinite(obj)
return result
|
def _isnull_ndarraylike(obj):
from pandas import Series
values = np.asarray(obj)
if values.dtype.kind in ("O", "S"):
# Working around NumPy ticket 1542
shape = values.shape
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = Series(result, index=obj.index, copy=False)
elif values.dtype == np.dtype("M8[ns]"):
# this is the NaT pattern
result = values.view("i8") == lib.iNaT
else:
result = -np.isfinite(obj)
return result
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def pad_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.pad_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif values.dtype == np.object_:
_method = _algos.pad_2d_inplace_object
else: # pragma: no cover
raise ValueError("Invalid dtype for padding")
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
|
def pad_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.pad_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif values.dtype == np.object_:
_method = _algos.pad_2d_inplace_object
else: # pragma: no cover
raise ValueError("Invalid dtype for padding")
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def backfill_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.backfill_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif values.dtype == np.object_:
_method = _algos.backfill_2d_inplace_object
else: # pragma: no cover
raise ValueError("Invalid dtype for padding")
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
|
def backfill_2d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.backfill_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif values.dtype == np.object_:
_method = _algos.backfill_2d_inplace_object
else: # pragma: no cover
raise ValueError("Invalid dtype for padding")
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not isinstance(values, (list, tuple, np.ndarray)):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, basestring):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
result = np.empty(len(values), dtype=object)
result[:] = values
return result
|
def _asarray_tuplesafe(values, dtype=None):
if not isinstance(values, (list, tuple, np.ndarray)):
values = list(values)
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, basestring):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
result = np.empty(len(values), dtype=object)
result[:] = values
return result
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def console_encode(value):
if py3compat.PY3 or not isinstance(value, unicode):
return value
try:
import sys
return value.encode(sys.stdin.encoding or "utf-8", "replace")
except (AttributeError, TypeError):
return value.encode("ascii", "replace")
|
def console_encode(value):
if py3compat.PY3 or not isinstance(value, unicode):
return value
try:
import sys
return value.encode(sys.stdin.encoding, "replace")
except (AttributeError, TypeError):
return value.encode("ascii", "replace")
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
if axis == 0:
# patch ref_items, #1823
for blk in result.blocks:
blk.ref_items = new_axis
return result
else:
return self
if axis == 0:
assert method is None
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
|
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
return result
else:
return self
if axis == 0:
assert method is None
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def form_blocks(data, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(data) < len(items):
extra_items = items - Index(data.keys())
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_dict = {}
complex_dict = {}
int_dict = {}
bool_dict = {}
object_dict = {}
datetime_dict = {}
for k, v in data.iteritems():
if issubclass(v.dtype.type, np.floating):
float_dict[k] = v
elif issubclass(v.dtype.type, np.complexfloating):
complex_dict[k] = v
elif issubclass(v.dtype.type, np.datetime64):
datetime_dict[k] = v
elif issubclass(v.dtype.type, np.integer):
int_dict[k] = v
elif v.dtype == np.bool_:
bool_dict[k] = v
else:
object_dict[k] = v
blocks = []
if len(float_dict):
float_block = _simple_blockify(float_dict, items, np.float64)
blocks.append(float_block)
if len(complex_dict):
complex_block = _simple_blockify(complex_dict, items, np.complex128)
blocks.append(complex_block)
if len(int_dict):
int_block = _simple_blockify(int_dict, items, np.int64)
blocks.append(int_block)
for k, v in list(datetime_dict.items()):
# hackeroo
if hasattr(v, "tz") and v.tz is not None:
del datetime_dict[k]
object_dict[k] = v.asobject
if len(datetime_dict):
datetime_block = _simple_blockify(datetime_dict, items, np.dtype("M8[ns]"))
blocks.append(datetime_block)
if len(bool_dict):
bool_block = _simple_blockify(bool_dict, items, np.bool_)
blocks.append(bool_block)
if len(object_dict) > 0:
object_block = _simple_blockify(object_dict, items, np.object_)
blocks.append(object_block)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items, do_integrity_check=True)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
|
def form_blocks(data, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(data) < len(items):
extra_items = items - Index(data.keys())
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_dict = {}
complex_dict = {}
int_dict = {}
bool_dict = {}
object_dict = {}
datetime_dict = {}
for k, v in data.iteritems():
if issubclass(v.dtype.type, np.floating):
float_dict[k] = v
elif issubclass(v.dtype.type, np.complexfloating):
complex_dict[k] = v
elif issubclass(v.dtype.type, np.datetime64):
datetime_dict[k] = v
elif issubclass(v.dtype.type, np.integer):
int_dict[k] = v
elif v.dtype == np.bool_:
bool_dict[k] = v
else:
object_dict[k] = v
blocks = []
if len(float_dict):
float_block = _simple_blockify(float_dict, items, np.float64)
blocks.append(float_block)
if len(complex_dict):
complex_block = _simple_blockify(complex_dict, items, np.complex128)
blocks.append(complex_block)
if len(int_dict):
int_block = _simple_blockify(int_dict, items, np.int64)
blocks.append(int_block)
for k, v in list(datetime_dict.items()):
# hackeroo
if hasattr(v, "tz") and v.tz is not None:
del datetime_dict[k]
object_dict[k] = v.asobject
if len(datetime_dict):
datetime_block = _simple_blockify(datetime_dict, items, np.dtype("M8[ns]"))
blocks.append(datetime_block)
if len(bool_dict):
bool_block = _simple_blockify(bool_dict, items, np.bool_)
blocks.append(bool_block)
if len(object_dict) > 0:
object_block = _simple_blockify(object_dict, items, np.object_)
blocks.append(object_block)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
block_values = np.empty(shape, dtype=float)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items, do_integrity_check=True)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def __setitem__(self, key, value):
_, N, K = self.shape
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis, columns=self.minor_axis)
mat = value.values
elif isinstance(value, np.ndarray):
assert value.shape == (N, K)
mat = np.asarray(value)
elif np.isscalar(value):
dtype = _infer_dtype(value)
mat = np.empty((N, K), dtype=dtype)
mat.fill(value)
else:
raise TypeError("Cannot set item of type: %s" % str(type(value)))
mat = mat.reshape((1, N, K))
NDFrame._set_item(self, key, mat)
|
def __setitem__(self, key, value):
_, N, K = self.shape
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis, columns=self.minor_axis)
mat = value.values
elif isinstance(value, np.ndarray):
assert value.shape == (N, K)
mat = np.asarray(value)
elif np.isscalar(value):
dtype = _infer_dtype(value)
mat = np.empty((N, K), dtype=dtype)
mat.fill(value)
mat = mat.reshape((1, N, K))
NDFrame._set_item(self, key, mat)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
new_values = np.empty((length, result_width), dtype=values.dtype)
new_mask = np.zeros((length, result_width), dtype=bool)
new_values = com._maybe_upcast(new_values)
new_values.fill(np.nan)
# is there a simpler / faster way of doing this?
for i in xrange(values.shape[1]):
chunk = new_values[:, i * width : (i + 1) * width]
mask_chunk = new_mask[:, i * width : (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
new_values = new_values.take(self.unique_groups, axis=0)
return new_values, new_mask
|
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
new_values = np.empty((length, result_width), dtype=values.dtype)
new_mask = np.zeros((length, result_width), dtype=bool)
if issubclass(values.dtype.type, np.integer):
new_values = new_values.astype(float)
new_values.fill(np.nan)
# is there a simpler / faster way of doing this?
for i in xrange(values.shape[1]):
chunk = new_values[:, i * width : (i + 1) * width]
mask_chunk = new_mask[:, i * width : (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
new_values = new_values.take(self.unique_groups, axis=0)
return new_values, new_mask
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def str_contains(arr, pat, case=True, flags=0, na=np.nan):
"""
Check whether given pattern is contained in each string in the array
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : bool, default NaN
Returns
-------
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
f = lambda x: bool(regex.search(x))
return _na_map(f, arr, na)
|
def str_contains(arr, pat, case=True, flags=0):
"""
Check whether given pattern is contained in each string in the array
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
f = lambda x: bool(regex.search(x))
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean array indicating whether each string starts with passed
pattern
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : array (boolean)
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na)
|
def str_startswith(arr, pat):
"""
Return boolean array indicating whether each string starts with passed
pattern
Parameters
----------
pat : string
Character sequence
Returns
-------
startswith : array (boolean)
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean array indicating whether each string ends with passed
pattern
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : array (boolean)
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na)
|
def str_endswith(arr, pat):
"""
Return boolean array indicating whether each string ends with passed
pattern
Parameters
----------
pat : string
Character sequence
Returns
-------
endswith : array (boolean)
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def str_split(arr, pat=None, n=0):
"""
Split each string (a la re.split) in array by given pattern, propagating NA
values
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default 0 (all)
Returns
-------
split : array
"""
if pat is None:
f = lambda x: x.split()
else:
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
return _na_map(f, arr)
|
def str_split(arr, pat, n=0):
"""
Split each string (a la re.split) in array by given pattern, propagating NA
values
Parameters
----------
pat : string
String or regular expression to split on
n : int, default 0 (all)
Returns
-------
split : array
"""
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _pat_wrapper(f, flags=False, na=False):
def wrapper1(self, pat):
result = f(self.series, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0):
result = f(self.series, pat, flags=flags)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self.series, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
|
def _pat_wrapper(f, flags=False):
def wrapper1(self, pat):
result = f(self.series, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0):
result = f(self.series, pat, flags=flags)
return self._wrap_result(result)
wrapper = wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def split(self, pat=None, n=0):
result = str_split(self.series, pat, n=n)
return self._wrap_result(result)
|
def split(self, pat, n=0):
result = str_split(self.series, pat, n=n)
return self._wrap_result(result)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def contains(self, pat, case=True, flags=0, na=np.nan):
result = str_contains(self.series, pat, case=case, flags=flags, na=np.nan)
return self._wrap_result(result)
|
def contains(self, pat, case=True, flags=0):
result = str_contains(self.series, pat, case=case, flags=flags)
return self._wrap_result(result)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array matrix representation
Columns are presented in sorted order unless a specific list
of columns is provided.
"""
if columns is None:
columns = self.columns
if len(columns) == 0:
return np.zeros((len(self.index), 0), dtype=float)
return np.array([self.icol(i).values for i in range(len(self.columns))]).T
|
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array matrix representation
Columns are presented in sorted order unless a specific list
of columns is provided.
"""
if columns is None:
columns = self.columns
if len(columns) == 0:
return np.zeros((len(self.index), 0), dtype=float)
return np.array([self[col].values for col in columns]).T
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _first_valid_index(arr):
# argmax scans from left
return notnull(arr).argmax() if len(arr) else 0
|
def _first_valid_index(arr):
# argmax scans from left
return notnull(arr).argmax()
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def ewmstd(arg, com=None, span=None, min_periods=0, bias=False, time_rule=None):
result = ewmvar(
arg, com=com, span=span, time_rule=time_rule, min_periods=min_periods, bias=bias
)
return _zsqrt(result)
|
def ewmstd(arg, com=None, span=None, min_periods=0, bias=False, time_rule=None):
result = ewmvar(
arg, com=com, span=span, time_rule=time_rule, min_periods=min_periods, bias=bias
)
return np.sqrt(result)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0, freq=None, time_rule=None):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, freq, time_rule)
Y = _conv_timerule(Y, freq, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
var = lambda x: ewmvar(x, com=com, span=span, min_periods=min_periods, bias=True)
return (mean(X * Y) - mean(X) * mean(Y)) / _zsqrt(var(X) * var(Y))
|
def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0, freq=None, time_rule=None):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, freq, time_rule)
Y = _conv_timerule(Y, freq, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
var = lambda x: ewmvar(x, com=com, span=span, min_periods=min_periods, bias=True)
return (mean(X * Y) - mean(X) * mean(Y)) / np.sqrt(var(X) * var(Y))
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def f(arg, min_periods=1, freq=None, time_rule=None, **kwargs):
window = len(arg)
def call_cython(arg, window, minp, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule, **kwargs
)
|
def f(arg, window, min_periods=None, freq=None, time_rule=None, **kwargs):
def call_cython(arg, window, minp, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule, **kwargs
)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def summary(self):
"""
This returns the formatted result of the OLS computation
"""
template = """
%(bannerTop)s
Formula: Y ~ %(formula)s
Number of Observations: %(nobs)d
Number of Degrees of Freedom: %(df)d
R-squared: %(r2)10.4f
Adj R-squared: %(r2_adj)10.4f
Rmse: %(rmse)10.4f
F-stat %(f_stat_shape)s: %(f_stat)10.4f, p-value: %(f_stat_p_value)10.4f
Degrees of Freedom: model %(df_model)d, resid %(df_resid)d
%(bannerCoef)s
%(coef_table)s
%(bannerEnd)s
"""
coef_table = self._coef_table
results = self._results
f_stat = results["f_stat"]
bracketed = ["<%s>" % str(c) for c in results["beta"].index]
formula = StringIO()
formula.write(bracketed[0])
tot = len(bracketed[0])
line = 1
for coef in bracketed[1:]:
tot = tot + len(coef) + 3
if tot // (68 * line):
formula.write("\n" + " " * 12)
line += 1
formula.write(" + " + coef)
params = {
"bannerTop": scom.banner("Summary of Regression Analysis"),
"bannerCoef": scom.banner("Summary of Estimated Coefficients"),
"bannerEnd": scom.banner("End of Summary"),
"formula": formula.getvalue(),
"r2": results["r2"],
"r2_adj": results["r2_adj"],
"nobs": results["nobs"],
"df": results["df"],
"df_model": results["df_model"],
"df_resid": results["df_resid"],
"coef_table": coef_table,
"rmse": results["rmse"],
"f_stat": f_stat["f-stat"],
"f_stat_shape": "(%d, %d)" % (f_stat["DF X"], f_stat["DF Resid"]),
"f_stat_p_value": f_stat["p-value"],
}
return template % params
|
def summary(self):
"""
This returns the formatted result of the OLS computation
"""
template = """
%(bannerTop)s
Formula: Y ~ %(formula)s
Number of Observations: %(nobs)d
Number of Degrees of Freedom: %(df)d
R-squared: %(r2)10.4f
Adj R-squared: %(r2_adj)10.4f
Rmse: %(rmse)10.4f
F-stat %(f_stat_shape)s: %(f_stat)10.4f, p-value: %(f_stat_p_value)10.4f
Degrees of Freedom: model %(df_model)d, resid %(df_resid)d
%(bannerCoef)s
%(coef_table)s
%(bannerEnd)s
"""
coef_table = self._coef_table
results = self._results
f_stat = results["f_stat"]
bracketed = ["<%s>" % c for c in results["beta"].index]
formula = StringIO()
formula.write(bracketed[0])
tot = len(bracketed[0])
line = 1
for coef in bracketed[1:]:
tot = tot + len(coef) + 3
if tot // (68 * line):
formula.write("\n" + " " * 12)
line += 1
formula.write(" + " + coef)
params = {
"bannerTop": scom.banner("Summary of Regression Analysis"),
"bannerCoef": scom.banner("Summary of Estimated Coefficients"),
"bannerEnd": scom.banner("End of Summary"),
"formula": formula.getvalue(),
"r2": results["r2"],
"r2_adj": results["r2_adj"],
"nobs": results["nobs"],
"df": results["df"],
"df_model": results["df_model"],
"df_resid": results["df_resid"],
"coef_table": coef_table,
"rmse": results["rmse"],
"f_stat": f_stat["f-stat"],
"f_stat_shape": "(%d, %d)" % (f_stat["DF X"], f_stat["DF Resid"]),
"f_stat_p_value": f_stat["p-value"],
}
return template % params
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _make_plot(self):
# this is slightly deceptive
if self.use_index and self._use_dynamic_x():
data = self._maybe_convert_index(self.data)
self._make_ts_plot(data, **self.kwds)
else:
lines = []
labels = []
x = self._get_xticks(convert_period=True)
has_colors, colors = self._get_colors()
def _maybe_add_color(kwargs, style, i):
if (
not has_colors
and (style is None or re.match("[a-z]+", style) is None)
and "color" not in kwargs
):
kwargs["color"] = colors[i % len(colors)]
plotf = self._get_plot_function()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
_maybe_add_color(kwds, style, i)
label = _stringify(label)
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
kwds["label"] = label
if style is None:
args = (ax, x, y)
else:
args = (ax, x, y, style)
newline = plotf(*args, **kwds)[0]
lines.append(newline)
leg_label = label
if self.mark_right and self.on_right(i):
leg_label += " (right)"
labels.append(leg_label)
ax.grid(self.grid)
self._make_legend(lines, labels)
|
def _make_plot(self):
# this is slightly deceptive
if self.use_index and self._use_dynamic_x():
data = self._maybe_convert_index(self.data)
self._make_ts_plot(data, **self.kwds)
else:
lines = []
labels = []
x = self._get_xticks(convert_period=True)
has_colors, colors = self._get_colors()
def _maybe_add_color(kwargs, style, i):
if not has_colors and (style is None or re.match("[a-z]+", style) is None):
kwargs["color"] = colors[i % len(colors)]
plotf = self._get_plot_function()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
_maybe_add_color(kwds, style, i)
label = _stringify(label)
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
kwds["label"] = label
if style is None:
args = (ax, x, y)
else:
args = (ax, x, y, style)
newline = plotf(*args, **kwds)[0]
lines.append(newline)
leg_label = label
if self.mark_right and self.on_right(i):
leg_label += " (right)"
labels.append(leg_label)
ax.grid(self.grid)
self._make_legend(lines, labels)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _maybe_add_color(kwargs, style, i):
if (
not has_colors
and (style is None or re.match("[a-z]+", style) is None)
and "color" not in kwargs
):
kwargs["color"] = colors[i % len(colors)]
|
def _maybe_add_color(kwargs, style, i):
if not has_colors and (style is None or re.match("[a-z]+", style) is None):
kwargs["color"] = colors[i % len(colors)]
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def plot_frame(
frame=None,
x=None,
y=None,
subplots=False,
sharex=True,
sharey=False,
use_index=True,
figsize=None,
grid=False,
legend=True,
rot=None,
ax=None,
style=None,
title=None,
xlim=None,
ylim=None,
logy=False,
xticks=None,
yticks=None,
kind="line",
sort_columns=False,
fontsize=None,
secondary_y=False,
**kwds,
):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
using matplotlib / pylab.
Parameters
----------
x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
use_index : boolean, default True
Use index as ticks for x axis
stacked : boolean, default False
If True, create stacked bar plot. Only valid for DataFrame input
sort_columns: boolean, default False
Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default True
Axis grid lines
legend : boolean, default True
Place legend on axis subplots
ax : matplotlib axis object, default None
style : list or dict
matplotlib line style per column
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If dict then can select which columns to plot on secondary y-axis
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = _get_standard_kind(kind.lower().strip())
if kind == "line":
klass = LinePlot
elif kind in ("bar", "barh"):
klass = BarPlot
elif kind == "kde":
klass = KdePlot
else:
raise ValueError("Invalid chart type given %s" % kind)
if x is not None:
if com.is_integer(x) and not frame.columns.holds_integer():
x = frame.columns[x]
frame = frame.set_index(x)
if y is not None:
if com.is_integer(y) and not frame.columns.holds_integer():
y = frame.columns[y]
return plot_series(
frame[y],
label=y,
kind=kind,
use_index=True,
rot=rot,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
ax=ax,
style=style,
grid=grid,
logy=logy,
secondary_y=secondary_y,
**kwds,
)
plot_obj = klass(
frame,
kind=kind,
subplots=subplots,
rot=rot,
legend=legend,
ax=ax,
style=style,
fontsize=fontsize,
use_index=use_index,
sharex=sharex,
sharey=sharey,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
title=title,
grid=grid,
figsize=figsize,
logy=logy,
sort_columns=sort_columns,
secondary_y=secondary_y,
**kwds,
)
plot_obj.generate()
plot_obj.draw()
if subplots:
return plot_obj.axes
else:
return plot_obj.axes[0]
|
def plot_frame(
frame=None,
x=None,
y=None,
subplots=False,
sharex=True,
sharey=False,
use_index=True,
figsize=None,
grid=False,
legend=True,
rot=None,
ax=None,
style=None,
title=None,
xlim=None,
ylim=None,
logy=False,
xticks=None,
yticks=None,
kind="line",
sort_columns=False,
fontsize=None,
secondary_y=False,
**kwds,
):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
using matplotlib / pylab.
Parameters
----------
x : int or str, default None
y : int or str, default None
Allows plotting of one column versus another
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
use_index : boolean, default True
Use index as ticks for x axis
stacked : boolean, default False
If True, create stacked bar plot. Only valid for DataFrame input
sort_columns: boolean, default False
Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default True
Axis grid lines
legend : boolean, default True
Place legend on axis subplots
ax : matplotlib axis object, default None
style : list or dict
matplotlib line style per column
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If dict then can select which columns to plot on secondary y-axis
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = _get_standard_kind(kind.lower().strip())
if kind == "line":
klass = LinePlot
elif kind in ("bar", "barh"):
klass = BarPlot
elif kind == "kde":
klass = KdePlot
else:
raise ValueError("Invalid chart type given %s" % kind)
if isinstance(x, int):
x = frame.columns[x]
if isinstance(y, int):
y = frame.columns[y]
if x is not None:
frame = frame.set_index(x).sort_index()
if y is not None:
return plot_series(
frame[y],
label=y,
kind=kind,
use_index=True,
rot=rot,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
ax=ax,
style=style,
grid=grid,
logy=logy,
secondary_y=secondary_y,
**kwds,
)
plot_obj = klass(
frame,
kind=kind,
subplots=subplots,
rot=rot,
legend=legend,
ax=ax,
style=style,
fontsize=fontsize,
use_index=use_index,
sharex=sharex,
sharey=sharey,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
title=title,
grid=grid,
figsize=figsize,
logy=logy,
sort_columns=sort_columns,
secondary_y=secondary_y,
**kwds,
)
plot_obj.generate()
plot_obj.draw()
if subplots:
return plot_obj.axes
else:
return plot_obj.axes[0]
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def __new__(
cls,
data=None,
freq=None,
start=None,
end=None,
periods=None,
copy=False,
name=None,
tz=None,
verify_integrity=True,
normalize=False,
**kwds,
):
dayfirst = kwds.pop("dayfirst", None)
yearfirst = kwds.pop("yearfirst", None)
warn = False
if "offset" in kwds and kwds["offset"]:
freq = kwds["offset"]
warn = True
freq_infer = False
if not isinstance(freq, DateOffset):
if freq != "infer":
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if warn:
import warnings
warnings.warn(
"parameter 'offset' is deprecated, please use 'freq' instead", FutureWarning
)
offset = freq
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError("Periods must be a number, got %s" % str(periods))
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is supplied")
if data is None:
return cls._generate(
start, end, periods, name, offset, tz=tz, normalize=normalize
)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"DatetimeIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype="O")
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(
data, offset, dayfirst=dayfirst, yearfirst=yearfirst
)
else:
data = tools.to_datetime(data)
data.offset = offset
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
return data
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset, dayfirst=dayfirst, yearfirst=yearfirst)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if offset is None:
offset = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = lib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError("cannot convert Int64Index->DatetimeIndex")
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
try:
subarr = tools.to_datetime(data)
except ValueError:
# tz aware
subarr = tools.to_datetime(data, utc=True)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError("Unable to convert %s to datetime dtype" % str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tools._maybe_get_tz(tz)
if not isinstance(data, DatetimeIndex) or getattr(data, "tz", None) is None:
# Convert tz-naive to UTC
ints = subarr.view("i8")
subarr = lib.tz_localize_to_utc(ints, tz)
subarr = subarr.view(_NS_DTYPE)
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError("Dates do not conform to passed frequency")
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
|
def __new__(
cls,
data=None,
freq=None,
start=None,
end=None,
periods=None,
copy=False,
name=None,
tz=None,
verify_integrity=True,
normalize=False,
**kwds,
):
dayfirst = kwds.pop("dayfirst", None)
yearfirst = kwds.pop("yearfirst", None)
warn = False
if "offset" in kwds and kwds["offset"]:
freq = kwds["offset"]
warn = True
freq_infer = False
if not isinstance(freq, DateOffset):
if freq != "infer":
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if warn:
import warnings
warnings.warn(
"parameter 'offset' is deprecated, please use 'freq' instead", FutureWarning
)
offset = freq
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError("Periods must be a number, got %s" % str(periods))
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is supplied")
if data is None:
return cls._generate(
start, end, periods, name, offset, tz=tz, normalize=normalize
)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"DatetimeIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype="O")
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(
data, offset, dayfirst=dayfirst, yearfirst=yearfirst
)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset, dayfirst=dayfirst, yearfirst=yearfirst)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if offset is None:
offset = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = lib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
try:
subarr = tools.to_datetime(data)
except ValueError:
# tz aware
subarr = tools.to_datetime(data, utc=True)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError("Unable to convert %s to datetime dtype" % str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view("i8")
subarr = lib.tz_localize_to_utc(ints, tz)
subarr = subarr.view(_NS_DTYPE)
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError("Dates do not conform to passed frequency")
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _generate(cls, start, end, periods, name, offset, tz=None, normalize=False):
if com._count_not_none(start, end, periods) < 2:
raise ValueError("Must specify two of start, end, or periods")
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
inferred_tz = tools._infer_tzinfo(start, end)
if tz is not None and inferred_tz is not None:
assert inferred_tz == tz
elif inferred_tz is not None:
tz = inferred_tz
tz = tools._maybe_get_tz(tz)
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, "delta") and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz)
if end is not None and end.tz is None:
end = end.tz_localize(tz)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and _naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and _naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, "tz", None) is None:
index = lib.tz_localize_to_utc(com._ensure_int64(index), tz)
index = index.view(_NS_DTYPE)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
|
def _generate(cls, start, end, periods, name, offset, tz=None, normalize=False):
if com._count_not_none(start, end, periods) < 2:
raise ValueError("Must specify two of start, end, or periods")
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
inferred_tz = tools._infer_tzinfo(start, end)
if tz is not None and inferred_tz is not None:
assert inferred_tz == tz
elif inferred_tz is not None:
tz = inferred_tz
tz = tools._maybe_get_tz(tz)
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, "delta"):
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz)
if end is not None and end.tz is None:
end = end.tz_localize(tz)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and _naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and _naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, "tz", None) is None:
index = lib.tz_localize_to_utc(com._ensure_int64(index), tz)
index = index.view(_NS_DTYPE)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
|
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
from pandas.core.index import _ensure_compat_concat
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = _ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not hasattr(other, "inferred_type") or other.inferred_type != "datetime64":
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self.tz is not None:
if other.tz is None:
return False
same_zone = lib.get_timezone(self.tz) == lib.get_timezone(other.tz)
else:
if other.tz is not None:
return False
same_zone = True
return same_zone and np.array_equal(self.asi8, other.asi8)
|
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not hasattr(other, "inferred_type") or other.inferred_type != "datetime64":
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self.tz is not None:
if other.tz is None:
return False
same_zone = self.tz.zone == other.tz.zone
else:
if other.tz is not None:
return False
same_zone = True
return same_zone and np.array_equal(self.asi8, other.asi8)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if type(dt) == date:
dt = datetime(dt.year, dt.month, dt.day)
if not self.onOffset(dt):
dt = dt - self.__class__(1, **self.kwds)
return dt
|
def rollback(self, someDate):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(someDate):
someDate = someDate - self.__class__(1, **self.kwds)
return someDate
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if type(dt) == date:
dt = datetime(dt.year, dt.month, dt.day)
if not self.onOffset(dt):
dt = dt + self.__class__(1, **self.kwds)
return dt
|
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
dt = dt + self.__class__(1, **self.kwds)
return dt
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def onOffset(self, dt):
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = (dt + self) - self
return a == b
|
def onOffset(self, dt):
if type(self) == DateOffset:
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = (dt + self) - self
return a == b
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def apply(self, other):
if type(other) == date:
other = datetime(other.year, other.month, other.day)
if isinstance(other, (datetime, timedelta)):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else: # pragma: no cover
raise TypeError("Unhandled type: %s" % type(other))
|
def apply(self, other):
if isinstance(other, (datetime, timedelta)):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def to_timestamp(self, freq=None, how="S"):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
freq : string or DateOffset, default frequency of PeriodIndex
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
if freq is None:
base, mult = _gfc(self.freq)
how = _validate_end_alias(how)
if how == "S":
base = _freq_mod.get_to_timestamp_base(base)
freq = _freq_mod._get_freq_str(base)
new_val = self.asfreq(freq, how)
else:
new_val = self
else:
base, mult = _gfc(freq)
new_val = self.asfreq(freq, how)
dt64 = plib.period_ordinal_to_dt64(new_val.ordinal, base)
return Timestamp(dt64)
|
def to_timestamp(self, freq=None, how="S"):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
freq : string or DateOffset, default frequency of PeriodIndex
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
if freq is None:
base, mult = _gfc(self.freq)
new_val = self
else:
base, mult = _gfc(freq)
new_val = self.asfreq(freq, how)
dt64 = plib.period_ordinal_to_dt64(new_val.ordinal, base)
ts_freq = _period_rule_to_timestamp_rule(new_val.freq, how=how)
return Timestamp(dt64, offset=to_offset(ts_freq))
|
https://github.com/pandas-dev/pandas/issues/1943
|
In [25]: df = pandas.DataFrame(np.random.randn(4,4), columns=list('AABC'))
In [26]: df
Out[26]:
A A B C
0 -0.174905 0.332522 1.134984 -0.201270
1 1.730445 0.382556 -0.607761 1.221815
2 0.513049 0.196231 -1.746732 -0.252282
3 -0.297577 -1.000121 -0.090442 -2.129467
In [27]: df.ix[:,['A', 'B']]
Out[27]:
A A B
0 -0.174905 0.332522 1.134984
1 1.730445 0.382556 -0.607761
2 0.513049 0.196231 -1.746732
3 -0.297577 -1.000121 -0.090442
In [28]: df[['A', 'B']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
In [29]: df[['B', 'C']]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
...
Exception: Reindexing only valid with uniquely valued Index objects
|
Exception
|
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
rects = []
labels = []
ax = self._get_ax(0) # self.axes[0]
bar_f = self.bar_f
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data()):
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds["color"] = colors[i % len(colors)]
start = 0
if self.log:
start = 1
if any(y < 1):
# GH3254
start = 0 if mpl.__version__ == "1.2.1" else None
if self.subplots:
ax = self._get_ax(i) # self.axes[i]
rect = bar_f(ax, self.ax_pos, y, self.bar_width, start=start, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
rect = bar_f(
ax, self.ax_pos, y, self.bar_width, start=start, label=label, **kwds
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
rect = bar_f(
ax,
self.ax_pos + i * 0.75 / K,
y,
0.75 / K,
start=start,
label=label,
**kwds,
)
rects.append(rect)
labels.append(label)
if self.legend and not self.subplots:
patches = [r[0] for r in rects]
self.axes[0].legend(patches, labels, loc="best", title=self.legend_title)
|
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
rects = []
labels = []
ax = self._get_ax(0) # self.axes[0]
bar_f = self.bar_f
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data()):
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds["color"] = colors[i % len(colors)]
# default, GH3254
# I tried, I really did.
start = 0 if mpl.__version__ == "1.2.1" else None
if self.subplots:
ax = self._get_ax(i) # self.axes[i]
rect = bar_f(ax, self.ax_pos, y, self.bar_width, start=start, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
rect = bar_f(
ax, self.ax_pos, y, self.bar_width, start=start, label=label, **kwds
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
rect = bar_f(
ax,
self.ax_pos + i * 0.75 / K,
y,
0.75 / K,
start=start,
label=label,
**kwds,
)
rects.append(rect)
labels.append(label)
if self.legend and not self.subplots:
patches = [r[0] for r in rects]
self.axes[0].legend(patches, labels, loc="best", title=self.legend_title)
|
https://github.com/pandas-dev/pandas/issues/3309
|
23:57 ~/code/pandas (master)$ nosetests pandas/tests/test_graphics.py
........F...............................
======================================================================
FAIL: test_bar_log (pandas.tests.test_graphics.TestDataFramePlots)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/wesm/code/pandas/pandas/tests/test_graphics.py", line 414, in test_bar_log
self.assertEqual(ax.yaxis.get_ticklocs()[0],1.0)
AssertionError: 0.10000000000000001 != 1.0
----------------------------------------------------------------------
Ran 40 tests in 76.852s
FAILED (failures=1)
|
AssertionError
|
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
index = index.to_timestamp().order()
x = index._mpl_repr()
elif index.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index.order()._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
else:
x = range(len(index))
return x
|
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
index = index.to_timestamp()
x = index._mpl_repr()
elif index.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
else:
x = range(len(index))
return x
|
https://github.com/pandas-dev/pandas/issues/2609
|
Exception in Tkinter callback
Traceback (most recent call last):
File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 1413, in __call__
return self.func(*args)
File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 498, in callit
func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 254, in idle_draw
self.draw()
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 239, in draw
FigureCanvasAgg.draw(self)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_agg.py", line 421, in draw
self.figure.draw(self.renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/figure.py", line 898, in draw
func(*args)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 1997, in draw
a.draw(renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/artist.py", line 55, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib/pymodules/python2.7/matplotlib/axis.py", line 1041, in draw
ticks_to_draw = self._update_ticks(renderer)
File "/usr/lib/pymodules/python2.7/matplotlib/axis.py", line 931, in _update_ticks
tick_tups = [ t for t in self.iter_ticks()]
File "/usr/lib/pymodules/python2.7/matplotlib/axis.py", line 878, in iter_ticks
majorLocs = self.major.locator()
File "/usr/lib/pymodules/python2.7/matplotlib/dates.py", line 750, in __call__
return self._locator()
File "/usr/lib/pymodules/python2.7/pandas/tseries/converter.py", line 317, in __call__
(estimate, dmin, dmax, self.MAXTICKS * 2))
RuntimeError: MillisecondLocator estimated to generate 5270400 ticks from 2012-08-01 00:00:00+00:00 to 2012-10-01 00:00:00+00:00: exceeds Locator.MAXTICKS* 2 (2000)
|
RuntimeError
|
def _nanmin(values, axis=None, skipna=True):
mask = isnull(values)
dtype = values.dtype
if skipna and not issubclass(dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, np.inf)
if issubclass(dtype.type, np.datetime64):
values = values.view(np.int64)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.min, apply_ax, values)
else:
result = __builtin__.min(values)
else:
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
result = com.ensure_float(values.sum(axis))
result.fill(np.nan)
else:
result = values.min(axis)
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
return _maybe_null_out(result, axis, mask)
|
def _nanmin(values, axis=None, skipna=True):
mask = isnull(values)
dtype = values.dtype
if skipna and not issubclass(dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, np.inf)
if issubclass(dtype.type, np.datetime64):
values = values.view(np.int64)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.min, apply_ax, values)
else:
result = __builtin__.min(values)
else:
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
result = values.sum(axis)
result.fill(np.nan)
else:
result = values.min(axis)
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
return _maybe_null_out(result, axis, mask)
|
https://github.com/pandas-dev/pandas/issues/2610
|
In [47]: all
Out[47]:
<class 'pandas.core.frame.DataFrame'>
Int64Index: 974757 entries, 0 to 974756
Data columns:
eid 974757 non-null values
number 974757 non-null values
a 972510 non-null values
b 974757 non-null values
c 929268 non-null values
d 922700 non-null values
e 974757 non-null values
dtypes: int64(1), object(6)
In [48]: subset = all[all["eid"].isin(other["eid"])]
In [49]: subset
Out[49]:
Int64Index([], dtype=int64)
Empty DataFrame
In [50]: subset.describe()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-50-ff735ef04a17> in <module>()
----> 1 business_authors.describe()
C:\portabel\Python27\lib\site-packages\pandas\core\frame.pyc in describe(self, percentile_width)
4539 series = self[column]
4540 destat.append([series.count(), series.mean(), series.std(),
-> 4541 series.min(), series.quantile(lb), series.median(),
4542 series.quantile(ub), series.max()])
4543
C:\portabel\Python27\lib\site-packages\pandas\core\series.pyc in min(self, axis, out, skipna, level)
1320 if level is not None:
1321 return self._agg_by_level('min', level=level, skipna=skipna)
-> 1322 return nanops.nanmin(self.values, skipna=skipna)
1323
1324 @Substitution(name='maximum', shortname='max',
C:\portabel\Python27\lib\site-packages\pandas\core\nanops.pyc in f(values, axis, skipna, **kwds)
46 result = alt(values, axis=axis, skipna=skipna, **kwds)
47 except Exception:
---> 48 result = alt(values, axis=axis, skipna=skipna, **kwds)
49
50 return result
C:\portabel\Python27\lib\site-packages\pandas\core\nanops.pyc in _nanmin(values, axis, skipna)
179 or values.size == 0):
180 result = values.sum(axis)
--> 181 result.fill(np.nan)
182 else:
183 result = values.min(axis)
ValueError: cannot convert float NaN to integer
In[51]: all["eid"].isin(other["eid"])
Out[51]:
0 False
1 False
2 False
3 False
4 False
...
974752 False
974753 False
974754 False
974755 False
974756 False
Name: eid, Length: 974757
|
ValueError
|
def _nanmax(values, axis=None, skipna=True):
mask = isnull(values)
dtype = values.dtype
if skipna and not issubclass(dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, -np.inf)
if issubclass(dtype.type, np.datetime64):
values = values.view(np.int64)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.max, apply_ax, values)
else:
result = __builtin__.max(values)
else:
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
result = com.ensure_float(values.sum(axis))
result.fill(np.nan)
else:
result = values.max(axis)
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
return _maybe_null_out(result, axis, mask)
|
def _nanmax(values, axis=None, skipna=True):
mask = isnull(values)
dtype = values.dtype
if skipna and not issubclass(dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, -np.inf)
if issubclass(dtype.type, np.datetime64):
values = values.view(np.int64)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.max, apply_ax, values)
else:
result = __builtin__.max(values)
else:
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
result = values.sum(axis)
result.fill(np.nan)
else:
result = values.max(axis)
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
return _maybe_null_out(result, axis, mask)
|
https://github.com/pandas-dev/pandas/issues/2610
|
In [47]: all
Out[47]:
<class 'pandas.core.frame.DataFrame'>
Int64Index: 974757 entries, 0 to 974756
Data columns:
eid 974757 non-null values
number 974757 non-null values
a 972510 non-null values
b 974757 non-null values
c 929268 non-null values
d 922700 non-null values
e 974757 non-null values
dtypes: int64(1), object(6)
In [48]: subset = all[all["eid"].isin(other["eid"])]
In [49]: subset
Out[49]:
Int64Index([], dtype=int64)
Empty DataFrame
In [50]: subset.describe()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-50-ff735ef04a17> in <module>()
----> 1 business_authors.describe()
C:\portabel\Python27\lib\site-packages\pandas\core\frame.pyc in describe(self, percentile_width)
4539 series = self[column]
4540 destat.append([series.count(), series.mean(), series.std(),
-> 4541 series.min(), series.quantile(lb), series.median(),
4542 series.quantile(ub), series.max()])
4543
C:\portabel\Python27\lib\site-packages\pandas\core\series.pyc in min(self, axis, out, skipna, level)
1320 if level is not None:
1321 return self._agg_by_level('min', level=level, skipna=skipna)
-> 1322 return nanops.nanmin(self.values, skipna=skipna)
1323
1324 @Substitution(name='maximum', shortname='max',
C:\portabel\Python27\lib\site-packages\pandas\core\nanops.pyc in f(values, axis, skipna, **kwds)
46 result = alt(values, axis=axis, skipna=skipna, **kwds)
47 except Exception:
---> 48 result = alt(values, axis=axis, skipna=skipna, **kwds)
49
50 return result
C:\portabel\Python27\lib\site-packages\pandas\core\nanops.pyc in _nanmin(values, axis, skipna)
179 or values.size == 0):
180 result = values.sum(axis)
--> 181 result.fill(np.nan)
182 else:
183 result = values.min(axis)
ValueError: cannot convert float NaN to integer
In[51]: all["eid"].isin(other["eid"])
Out[51]:
0 False
1 False
2 False
3 False
4 False
...
974752 False
974753 False
974754 False
974755 False
974756 False
Name: eid, Length: 974757
|
ValueError
|
def format(self, name=False, formatter=None):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(str(self.name) if self.name is not None else "")
return header + ["%s" % Period(x, freq=self.freq) for x in self]
|
def format(self, name=False):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(str(self.name) if self.name is not None else "")
return header + ["%s" % Period(x, freq=self.freq) for x in self]
|
https://github.com/pandas-dev/pandas/issues/2549
|
In [1]: import numpy as np
In [2]: import pandas as pd
In [3]: index = pd.PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
In [4]: frame = pd.DataFrame(np.random.randn(3,4),index=index)
In [5]: frame.to_string()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-b6219037419a> in <module>()
----> 1 frame.to_string()
/mnt/home/jreback/pandas/pandas/core/frame.pyc in to_string(self, buf, columns, col_space, colSpace, header, index, na_rep, formatters, float_format, sparsify, nanRep, index_names, justify, force_unicode, line_width)
1501 header=header, index=index,
1502 line_width=line_width)
-> 1503 formatter.to_string()
1504
1505 if buf is None:
/mnt/home/jreback/pandas/pandas/core/format.pyc in to_string(self, force_unicode)
295 text = info_line
296 else:
--> 297 strcols = self._to_str_columns()
298 if self.line_width is None:
299 text = adjoin(1, *strcols)
/mnt/home/jreback/pandas/pandas/core/format.pyc in _to_str_columns(self)
240
241 # may include levels names also
--> 242 str_index = self._get_formatted_index()
243 str_columns = self._get_formatted_column_labels()
244
/mnt/home/jreback/pandas/pandas/core/format.pyc in _get_formatted_index(self)
444 formatter=fmt)
445 else:
--> 446 fmt_index = [index.format(name=show_index_names, formatter=fmt)]
447
448 adjoined = adjoin(1, *fmt_index).split('\n')
TypeError: format() got an unexpected keyword argument 'formatter'
|
TypeError
|
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array(
[(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]
)
if fig == None:
fig = plt.figure()
x = range(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
|
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib
import matplotlib.pyplot as plt
data = series.values
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array(
[(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]
)
if fig == None:
fig = plt.figure()
x = range(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
|
https://github.com/pandas-dev/pandas/issues/2331
|
======================================================================
FAIL: test_quoting (pandas.io.tests.test_parsers.TestParsers)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/jtaylor/tmp/pandas-0.9.1/build/lib.linux-x86_64-3.3/pandas/io/tests/test_parsers.py", line 528, in test_quoting
sep='\t')
AssertionError: Exception not raised by read_table
======================================================================
FAIL: test_cant_compare_tz_naive_w_aware (pandas.tseries.tests.test_timeseries.TestTimestamp)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/jtaylor/tmp/pandas-0.9.1/build/lib.linux-x86_64-3.3/pandas/tseries/tests/test_timeseries.py", line 2349, in test_cant_compare_tz_naive_w_aware
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
AssertionError: Exception not raised by __eq__
----------------------------------------------------------------------
======================================================================
FAIL: test_more_flexible_frame_multi_function (__main__.TestGroupBy)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/jtaylor/tmp/pandas-0.9.1/build/lib.linux-x86_64-3.3/pandas/tests/test_groupby.py", line 1909, in test_more_flexible_frame_multi_function
assert_frame_equal(result, expected)
File "/home/jtaylor/tmp/pandas-0.9.1/build/lib.linux-x86_64-3.3/pandas/util/testing.py", line 167, in assert_frame_equal
assert(left.columns.equals(right.columns))
AssertionError
----------------------------------------------------------------------
======================================================================
ERROR: test_yahoo (pandas.io.tests.test_yahoo.TestYahoo)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/tests/test_yahoo.py", line 25, in test_yahoo
pd.DataReader("F", 'yahoo', start, end)['Close'][-1],
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/data.py", line 58, in DataReader
retry_count=retry_count, pause=pause)
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/data.py", line 149, in get_data_yahoo
parse_dates=True)[::-1]
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/parsers.py", line 364, in parser_f
return _read(filepath_or_buffer, kwds)
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/parsers.py", line 195, in _read
return parser.read()
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/parsers.py", line 592, in read
ret = self._engine.read(nrows)
File "/tmp/pandas/build/lib.linux-x86_64-3.3/pandas/io/parsers.py", line 844, in read
data = self._reader.read(nrows)
File "parser.pyx", line 597, in pandas._parser.TextReader.read (pandas/src/parser.c:5342)
File "parser.pyx", line 619, in pandas._parser.TextReader._read_low_memory (pandas/src/parser.c:5562)
File "parser.pyx", line 668, in pandas._parser.TextReader._read_rows (pandas/src/parser.c:6143)
File "parser.pyx", line 655, in pandas._parser.TextReader._tokenize_rows (pandas/src/parser.c:6027)
File "parser.pyx", line 1385, in pandas._parser.raise_parser_error (pandas/src/parser.c:14807)
pandas._parser.CParserError: Error tokenizing data. C error: Expected 7 fields in line 106, saw 3
|
AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.