after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __new__(
cls,
data=None,
freq=None,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name, freq=dtarr.freq, tz=dtarr.tz)
return subarr
|
def __new__(
cls,
data=None,
freq=None,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
if name is None and hasattr(data, "name"):
name = data.name
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name, freq=dtarr.freq, tz=dtarr.tz)
return subarr
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
):
name = maybe_extract_name(name, data, cls)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
|
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
):
if name is None and hasattr(data, "name"):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __new__(cls, data=None, dtype=None, copy=False, name=None):
cls._validate_dtype(dtype)
# Coerce to ndarray if not already ndarray or Index
if not isinstance(data, (np.ndarray, Index)):
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data, dtype=dtype)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
name = maybe_extract_name(name, data, cls)
return cls._simple_new(subarr, name=name)
|
def __new__(cls, data=None, dtype=None, copy=False, name=None):
cls._validate_dtype(dtype)
# Coerce to ndarray if not already ndarray or Index
if not isinstance(data, (np.ndarray, Index)):
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data, dtype=dtype)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, "name"):
name = data.name
return cls._simple_new(subarr, name=name)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
tz=None,
dtype=None,
copy=False,
name=None,
**fields,
):
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
|
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
tz=None,
dtype=None,
copy=False,
name=None,
**fields,
):
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
if name is None and hasattr(data, "name"):
name = data.name
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype=None,
copy=False,
name=None,
):
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
start = start._range
return cls._simple_new(start, dtype=dtype, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, dtype=dtype, name=name)
|
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype=None,
copy=False,
name=None,
):
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
name = start.name if name is None else name
start = start._range
return cls._simple_new(start, dtype=dtype, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, dtype=dtype, name=name)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __new__(
cls,
data=None,
unit=None,
freq=None,
closed=None,
dtype=_TD_DTYPE,
copy=False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M' and 'Y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray):
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
|
def __new__(
cls,
data=None,
unit=None,
freq=None,
closed=None,
dtype=_TD_DTYPE,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M' and 'Y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray):
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq)
else:
if freq is None:
freq = values.freq
assert isinstance(values, TimedeltaArray), type(values)
assert dtype == _TD_DTYPE, dtype
assert values.dtype == "m8[ns]", values.dtype
tdarr = TimedeltaArray._simple_new(values._data, freq=freq)
result = object.__new__(cls)
result._data = tdarr
result._name = name
# For groupby perf. See note in indexes/base about _index_data
result._index_data = tdarr._data
result._reset_identity()
return result
|
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq)
else:
if freq is None:
freq = values.freq
assert isinstance(values, TimedeltaArray), type(values)
assert dtype == _TD_DTYPE, dtype
assert values.dtype == "m8[ns]", values.dtype
tdarr = TimedeltaArray._simple_new(values._data, freq=freq)
result = object.__new__(cls)
result._data = tdarr
result.name = name
# For groupby perf. See note in indexes/base about _index_data
result._index_data = tdarr._data
result._reset_identity()
return result
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
name = ibase.maybe_extract_name(name, data, type(self))
if is_empty_data(data) and dtype is None:
# gh-17261
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
DeprecationWarning,
stacklevel=2,
)
# uncomment the line below when removing the DeprecationWarning
# dtype = np.dtype(object)
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# need to copy to avoid aliasing issues
data = data._values.copy()
if isinstance(data, ABCDatetimeIndex) and data.tz is not None:
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
data = data._values.copy(deep=True)
copy = False
elif isinstance(data, np.ndarray):
if len(data.dtype):
# GH#13296 we are dealing with a compound dtype, which
# should be treated as 2D
raise ValueError(
"Cannot construct a Series from an ndarray with "
"compound dtype. Use DataFrame instead."
)
pass
elif isinstance(data, ABCSeries):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must be False."
)
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(f"'{type(data).__name__}' type is unordered")
elif isinstance(data, ABCSparseArray):
# handle sparse passed here (and force conversion)
data = data.to_dense()
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
# a scalar numpy array is list-like but doesn't
# have a proper length
try:
if len(index) != len(data):
raise ValueError(
f"Length of passed values is {len(data)}, "
f"index implies {len(index)}."
)
except TypeError:
pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
|
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if is_empty_data(data) and dtype is None:
# gh-17261
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
DeprecationWarning,
stacklevel=2,
)
# uncomment the line below when removing the DeprecationWarning
# dtype = np.dtype(object)
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if name is None:
name = data.name
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# need to copy to avoid aliasing issues
data = data._values.copy()
if isinstance(data, ABCDatetimeIndex) and data.tz is not None:
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
data = data._values.copy(deep=True)
copy = False
elif isinstance(data, np.ndarray):
if len(data.dtype):
# GH#13296 we are dealing with a compound dtype, which
# should be treated as 2D
raise ValueError(
"Cannot construct a Series from an ndarray with "
"compound dtype. Use DataFrame instead."
)
pass
elif isinstance(data, ABCSeries):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must be False."
)
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(f"'{type(data).__name__}' type is unordered")
elif isinstance(data, ABCSparseArray):
# handle sparse passed here (and force conversion)
data = data.to_dense()
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
# a scalar numpy array is list-like but doesn't
# have a proper length
try:
if len(index) != len(data):
raise ValueError(
f"Length of passed values is {len(data)}, "
f"index implies {len(index)}."
)
except TypeError:
pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
|
https://github.com/pandas-dev/pandas/issues/29069
|
In [5]: pd.Series([], name=[])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2c73ddde103e> in <module>
----> 1 pd.Series([], name=[])
~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
326 generic.NDFrame.__init__(self, data, fastpath=True)
327
--> 328 self.name = name
329 self._set_axis(0, index, fastpath=True)
330
~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value)
5257 object.__setattr__(self, name, value)
5258 elif name in self._metadata:
-> 5259 object.__setattr__(self, name, value)
5260 else:
5261 try:
~/sandbox/pandas/pandas/core/series.py in name(self, value)
468 def name(self, value):
469 if value is not None and not is_hashable(value):
--> 470 raise TypeError("Series.name must be a hashable type")
471 object.__setattr__(self, "_name", value)
472
TypeError: Series.name must be a hashable type
|
TypeError
|
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(
carg / 10000, carg / 100 % 100, carg % 100
)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype("M8[ns]")
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except (ValueError, OverflowError, TypeError):
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except (ValueError, OverflowError, TypeError):
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except (ValueError, OverflowError, TypeError):
pass
return None
|
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(
carg / 10000, carg / 100 % 100, carg % 100
)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype("M8[ns]")
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except (ValueError, OverflowError):
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except (ValueError, OverflowError):
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except (ValueError, OverflowError):
pass
return None
|
https://github.com/pandas-dev/pandas/issues/30011
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/pandas/core/tools/datetimes.py", line 448, in _convert_listlike_datetimes
values, tz = conversion.datetime_to_datetime64(arg)
File "pandas/_libs/tslibs/conversion.pyx", line 200, in pandas._libs.tslibs.conversion.datetime_to_datetime64
TypeError: Unrecognized value type: <class 'str'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/example.py", line 6, in <module>
print(pd.to_datetime(["19850212", "19890611", None], format = "%Y%m%d"))
File "/usr/local/lib/python3.7/dist-packages/pandas/util/_decorators.py", line 208, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/pandas/core/tools/datetimes.py", line 794, in to_datetime
result = convert_listlike(arg, box, format)
File "/usr/local/lib/python3.7/dist-packages/pandas/core/tools/datetimes.py", line 451, in _convert_listlike_datetimes
raise e
File "/usr/local/lib/python3.7/dist-packages/pandas/core/tools/datetimes.py", line 409, in _convert_listlike_datetimes
"cannot convert the input to " "'%Y%m%d' date format"
ValueError: cannot convert the input to '%Y%m%d' date format
|
TypeError
|
def melt(
frame: DataFrame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> DataFrame:
# TODO: what about the existing index?
# If multiindex, gather names of columns on all level for checking presence
# of `id_vars` and `value_vars`
if isinstance(frame.columns, ABCMultiIndex):
cols = [x for c in frame.columns for x in c]
else:
cols = list(frame.columns)
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
# Check that `id_vars` are in frame
id_vars = list(id_vars)
missing = Index(com.flatten(id_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present"
" in the DataFrame: {missing}"
"".format(missing=list(missing))
)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(
value_vars, list
):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
# Check that `value_vars` are in frame
missing = Index(com.flatten(value_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in"
" the DataFrame: {missing}"
"".format(missing=list(missing))
)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, ABCMultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [
"variable_{i}".format(i=i) for i in range(len(frame.columns.names))
]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, str):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
id_data = frame.pop(col)
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
id_data = np.tile(id_data.values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
|
def melt(
frame: DataFrame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> DataFrame:
# TODO: what about the existing index?
# If multiindex, gather names of columns on all level for checking presence
# of `id_vars` and `value_vars`
if isinstance(frame.columns, ABCMultiIndex):
cols = [x for c in frame.columns for x in c]
else:
cols = list(frame.columns)
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
# Check that `id_vars` are in frame
id_vars = list(id_vars)
missing = Index(np.ravel(id_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present"
" in the DataFrame: {missing}"
"".format(missing=list(missing))
)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(
value_vars, list
):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
# Check that `value_vars` are in frame
missing = Index(np.ravel(value_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in"
" the DataFrame: {missing}"
"".format(missing=list(missing))
)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, ABCMultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [
"variable_{i}".format(i=i) for i in range(len(frame.columns.names))
]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, str):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
id_data = frame.pop(col)
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
id_data = np.tile(id_data.values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
|
https://github.com/pandas-dev/pandas/issues/29718
|
Traceback (most recent call last):
File "test.py", line 5, in <module>
pd.melt(df, id_vars=[1, "string"])
File "/home/nils/projects/tsfresh/venv/lib/python3.6/site-packages/pandas/core/reshape/melt.py", line 52, in melt
"".format(missing=list(missing))
KeyError: "The following 'id_vars' are not present in the DataFrame: ['1']"
|
KeyError
|
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(
other.values.reshape((1, len(other))),
index=index,
columns=combined_columns,
)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
|
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(
other.values.reshape((1, len(other))),
index=index,
columns=combined_columns,
)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
|
https://github.com/pandas-dev/pandas/issues/28769
|
import pandas
pandas.DataFrame().append([])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".env\lib\site-packages\pandas\core\frame.py", line 7108, in append
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
IndexError: list index out of range
pandas.__version__
'0.25.1'
|
IndexError
|
def recons_codes(self):
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
|
def recons_codes(self):
comp_ids, obs_ids, _ = self.group_info
codes = (ping.codes for ping in self.groupings)
return decons_obs_group_ids(comp_ids, obs_ids, self.shape, codes, xnull=True)
|
https://github.com/pandas-dev/pandas/issues/28479
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-543-5efc1c882109> in <module>
14 [print(g) for g in dfg]
15 print('This will cause crash:')
---> 16 display(dfg['Food'].value_counts())
~/anaconda3/lib/python3.7/site-packages/pandas/core/groupby/generic.py in value_counts(self, normalize, sort, ascending, bins, dropna)
1137
1138 # multi-index components
-> 1139 labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
1140 levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
1141 names = self.grouper.names + [self._selection_name]
~/anaconda3/lib/python3.7/site-packages/numpy/core/fromnumeric.py in repeat(a, repeats, axis)
469
470 """
--> 471 return _wrapfunc(a, 'repeat', repeats, axis=axis)
472
473
~/anaconda3/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
54 def _wrapfunc(obj, method, *args, **kwds):
55 try:
---> 56 return getattr(obj, method)(*args, **kwds)
57
58 # An AttributeError occurs if the object does not have
ValueError: operands could not be broadcast together with shape (5,) (4,)
|
ValueError
|
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
(blk,) = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices (not null slices)
# then we must take the split path, xref GH 10360
if isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i)):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError(
"cannot set a frame with no defined index and a scalar"
)
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes
)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# this preserves dtype of the value
new_values = Series([value]).values
if len(self.obj.values):
new_values = np.concatenate([self.obj.values, new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns, copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with mismatched columns")
value = Series(value, index=self.obj.columns, name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1 :]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if (
is_list_like_indexer(value)
and np.iterable(value)
and lplane_indexer != len(value)
):
if len(obj[idx]) != len(value):
raise ValueError(
"cannot set using a multi-index selection indexer "
"with a different length than the value"
)
# make sure we have an ndarray
value = getattr(value, "values", value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1 :]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(
is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi
):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
"""return True if we have an equal len settable"""
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
for item in labels:
# align to
v = (
np.nan
if item not in value
else self._align_series(indexer[0], value[item])
)
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value "
"when setting with an ndarray"
)
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError(
"Must have equal len keys and value "
"when setting with an iterable"
)
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
|
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
(blk,) = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError(
"cannot set a frame with no defined index and a scalar"
)
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes
)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# this preserves dtype of the value
new_values = Series([value]).values
if len(self.obj.values):
new_values = np.concatenate([self.obj.values, new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns, copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with mismatched columns")
value = Series(value, index=self.obj.columns, name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
# if we have a complicated setup, take the split path
if isinstance(indexer, tuple) and any(
[isinstance(ax, MultiIndex) for ax in self.obj.axes]
):
take_split_path = True
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1 :]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if (
is_list_like_indexer(value)
and np.iterable(value)
and lplane_indexer != len(value)
):
if len(obj[idx]) != len(value):
raise ValueError(
"cannot set using a multi-index selection indexer "
"with a different length than the value"
)
# make sure we have an ndarray
value = getattr(value, "values", value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1 :]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(
is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi
):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
"""return True if we have an equal len settable"""
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
for item in labels:
# align to
v = (
np.nan
if item not in value
else self._align_series(indexer[0], value[item])
)
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value "
"when setting with an ndarray"
)
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError(
"Must have equal len keys and value "
"when setting with an iterable"
)
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
|
https://github.com/pandas-dev/pandas/issues/10360
|
In [1]: import pandas as pd
In [29]: panel1 = pd.Panel([[[0., 0.]], [[0., 0.]]], items=['A', 'B'], minor_axis=['x', 'y'])
In [30]: panel2 = pd.Panel([[[0., 0.]], [['o', 'o']]], items=['A', 'B'], minor_axis=['x', 'y'])
In [33]: panel1['C'] = 0.
In [34]: panel2['C'] = 0.
In [35]: panel1.dtypes
Out[35]:
A float64
B float64
C float64
dtype: object
In [36]: panel2.dtypes
Out[36]:
A object
B object
C float64
dtype: object
In [37]: panel1.loc['C', 0, :] = [5, 6]
In [38]: panel1['C']
Out[38]:
x y
0 5 6
In [39]: panel2.loc['C', 0, :] = [5, 6]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-39-57fe49e884e4> in <module>()
----> 1 panel2.loc['C', 0, :] = [5, 6]
C:\Python34\lib\site-packages\pandas\core\indexing.py in __setitem__(self, key, value)
116 def __setitem__(self, key, value):
117 indexer = self._get_setitem_indexer(key)
--> 118 self._setitem_with_indexer(indexer, value)
119
120 def _has_valid_type(self, k, axis):
C:\Python34\lib\site-packages\pandas\core\indexing.py in _setitem_with_indexer(self, indexer, value)
460
461 if len(labels) != len(value):
--> 462 raise ValueError('Must have equal len keys and value '
463 'when setting with an iterable')
464
ValueError: Must have equal len keys and value when setting with an iterable
In [41]: pd.show_versions()
INSTALLED VERSIONS
------------------
commit: None
python: 3.4.3.final.0
python-bits: 64
OS: Windows
OS-release: 7
machine: AMD64
processor: Intel64 Family 6 Model 58 Stepping 9, GenuineIntel
byteorder: little
LC_ALL: None
LANG: None
pandas: 0.16.1
nose: 1.3.6
Cython: 0.22
numpy: 1.9.1
scipy: None
statsmodels: None
IPython: 3.1.0
sphinx: None
patsy: 0.3.0
dateutil: 2.4.2
pytz: 2015.4
bottleneck: 0.8.0
tables: 3.1.1
numexpr: 2.4
matplotlib: 1.4.3
openpyxl: None
xlrd: None
xlwt: None
xlsxwriter: None
lxml: None
bs4: None
html5lib: None
httplib2: None
apiclient: None
sqlalchemy: 1.0.5
pymysql: None
psycopg2: None
|
ValueError
|
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
if isinstance(data, ABCSeries):
data = data.to_frame("x")
column = "x"
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
# if colors not provided, use same defaults as DataFrame.plot.box
result = _get_standard_colors(num_colors=3)
result = np.take(result, [0, 0, 2])
result = np.append(result, "k")
colors = kwds.pop("color", None)
if colors:
if is_dict_like(colors):
# replace colors in result array with user-specified colors
# taken from the colors dict parameter
# "boxes" value placed in position 0, "whiskers" in 1, etc.
valid_keys = ["boxes", "whiskers", "medians", "caps"]
key_to_index = dict(zip(valid_keys, range(4)))
for key, value in colors.items():
if key in valid_keys:
result[key_to_index[key]] = value
else:
raise ValueError(
"color dict contains invalid "
"key '{0}' "
"The key must be either {1}".format(key, valid_keys)
)
else:
result.fill(colors)
return result
def maybe_color_bp(bp):
setp(bp["boxes"], color=colors[0], alpha=1)
setp(bp["whiskers"], color=colors[1], alpha=1)
setp(bp["medians"], color=colors[2], alpha=1)
setp(bp["caps"], color=colors[3], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis="both", labelsize=fontsize)
if kwds.get("vert", 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == "dict":
return bp
elif return_type == "both":
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(
plot_group,
data,
columns=columns,
by=by,
grid=grid,
figsize=figsize,
ax=ax,
layout=layout,
return_type=return_type,
)
else:
if return_type is None:
return_type = "axes"
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
if ax is None:
rc = {"figure.figsize": figsize} if figsize is not None else {}
with plt.rc_context(rc):
ax = plt.gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
|
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
if isinstance(data, ABCSeries):
data = data.to_frame("x")
column = "x"
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
return _get_standard_colors(color=kwds.get("color"), num_colors=3)
def maybe_color_bp(bp):
if "color" not in kwds:
setp(bp["boxes"], color=colors[0], alpha=1)
setp(bp["whiskers"], color=colors[0], alpha=1)
setp(bp["medians"], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis="both", labelsize=fontsize)
if kwds.get("vert", 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == "dict":
return bp
elif return_type == "both":
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(
plot_group,
data,
columns=columns,
by=by,
grid=grid,
figsize=figsize,
ax=ax,
layout=layout,
return_type=return_type,
)
else:
if return_type is None:
return_type = "axes"
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
if ax is None:
rc = {"figure.figsize": figsize} if figsize is not None else {}
with plt.rc_context(rc):
ax = plt.gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
|
https://github.com/pandas-dev/pandas/issues/26214
|
Traceback (most recent call last):
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 33, in <module>
comparative_results()
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 26, in comparative_results
ax = draw_plot(ax, df1, 'k')
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 22, in draw_plot
ax = data.boxplot(column=['x'], by=['z'], showfliers=False, ax=ax, color=colors)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2254, in boxplot_frame
return_type=return_type, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2223, in boxplot
return_type=return_type)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2683, in _grouped_plot_by_column
re_plotf = plotf(keys, values, ax, **kwargs)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2191, in plot_group
bp = ax.boxplot(values, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/matplotlib/__init__.py", line 1810, in inner
return func(ax, *args, **kwargs)
TypeError: boxplot() got an unexpected keyword argument 'color'
Process finished with exit code 1
|
TypeError
|
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
# if colors not provided, use same defaults as DataFrame.plot.box
result = _get_standard_colors(num_colors=3)
result = np.take(result, [0, 0, 2])
result = np.append(result, "k")
colors = kwds.pop("color", None)
if colors:
if is_dict_like(colors):
# replace colors in result array with user-specified colors
# taken from the colors dict parameter
# "boxes" value placed in position 0, "whiskers" in 1, etc.
valid_keys = ["boxes", "whiskers", "medians", "caps"]
key_to_index = dict(zip(valid_keys, range(4)))
for key, value in colors.items():
if key in valid_keys:
result[key_to_index[key]] = value
else:
raise ValueError(
"color dict contains invalid "
"key '{0}' "
"The key must be either {1}".format(key, valid_keys)
)
else:
result.fill(colors)
return result
|
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
return _get_standard_colors(color=kwds.get("color"), num_colors=3)
|
https://github.com/pandas-dev/pandas/issues/26214
|
Traceback (most recent call last):
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 33, in <module>
comparative_results()
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 26, in comparative_results
ax = draw_plot(ax, df1, 'k')
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 22, in draw_plot
ax = data.boxplot(column=['x'], by=['z'], showfliers=False, ax=ax, color=colors)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2254, in boxplot_frame
return_type=return_type, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2223, in boxplot
return_type=return_type)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2683, in _grouped_plot_by_column
re_plotf = plotf(keys, values, ax, **kwargs)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2191, in plot_group
bp = ax.boxplot(values, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/matplotlib/__init__.py", line 1810, in inner
return func(ax, *args, **kwargs)
TypeError: boxplot() got an unexpected keyword argument 'color'
Process finished with exit code 1
|
TypeError
|
def maybe_color_bp(bp):
setp(bp["boxes"], color=colors[0], alpha=1)
setp(bp["whiskers"], color=colors[1], alpha=1)
setp(bp["medians"], color=colors[2], alpha=1)
setp(bp["caps"], color=colors[3], alpha=1)
|
def maybe_color_bp(bp):
if "color" not in kwds:
setp(bp["boxes"], color=colors[0], alpha=1)
setp(bp["whiskers"], color=colors[0], alpha=1)
setp(bp["medians"], color=colors[2], alpha=1)
|
https://github.com/pandas-dev/pandas/issues/26214
|
Traceback (most recent call last):
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 33, in <module>
comparative_results()
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 26, in comparative_results
ax = draw_plot(ax, df1, 'k')
File "/Users/BNL28/Code/DataPerformance/bug_report.py", line 22, in draw_plot
ax = data.boxplot(column=['x'], by=['z'], showfliers=False, ax=ax, color=colors)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2254, in boxplot_frame
return_type=return_type, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2223, in boxplot
return_type=return_type)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2683, in _grouped_plot_by_column
re_plotf = plotf(keys, values, ax, **kwargs)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/pandas/plotting/_core.py", line 2191, in plot_group
bp = ax.boxplot(values, **kwds)
File "/Users/BNL28/anaconda3/lib/python3.6/site-packages/matplotlib/__init__.py", line 1810, in inner
return func(ax, *args, **kwargs)
TypeError: boxplot() got an unexpected keyword argument 'color'
Process finished with exit code 1
|
TypeError
|
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
value = np.asarray(value, dtype=self._ndarray.dtype)
self._ndarray[key] = value
|
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
values = self._ndarray
t = np.result_type(value, values)
if t != self._ndarray.dtype:
values = values.astype(t, casting="safe")
values[key] = value
self._dtype = PandasDtype(t)
self._ndarray = values
else:
self._ndarray[key] = value
|
https://github.com/pandas-dev/pandas/issues/28118
|
In [3]: t = pd.array(['a', 'b', 'c'])
In [4]: t[0] = 't'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-7d1c8d6d2e6a> in <module>
----> 1 t[0] = 't'
~/sandbox/pandas/pandas/core/arrays/numpy_.py in __setitem__(self, key, value)
237
238 values = self._ndarray
--> 239 t = np.result_type(value, values)
240 if t != self._ndarray.dtype:
241 values = values.astype(t, casting="safe")
<__array_function__ internals> in result_type(*args, **kwargs)
TypeError: data type "t" not understood
|
TypeError
|
def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func, columns, order = _normalize_keyword_aggregation(kwargs)
kwargs = {}
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
func = _maybe_mangle_lambdas(func)
result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
result.columns = Index(
result.columns.levels[0], name=self._selected_obj.columns.name
)
if isinstance(self.obj, SparseDataFrame):
# Backwards compat for groupby.agg() with sparse
# values. concat no longer converts DataFrame[Sparse]
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
return result._convert(datetime=True)
|
def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop("_level", None)
relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func, columns, order = _normalize_keyword_aggregation(kwargs)
kwargs = {}
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
func = _maybe_mangle_lambdas(func)
result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
result.columns = Index(
result.columns.levels[0], name=self._selected_obj.columns.name
)
if isinstance(self.obj, SparseDataFrame):
# Backwards compat for groupby.agg() with sparse
# values. concat no longer converts DataFrame[Sparse]
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
result = result[order]
result.columns = columns
return result._convert(datetime=True)
|
https://github.com/pandas-dev/pandas/issues/27519
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-58-5b7e2c8bacf8> in <module>
3 df = pd.DataFrame({"A": [1, 2]})
4
----> 5 df.groupby([1, 1]).agg(foo=('A', lambda x: x.max()), bar=("A", lambda x: x.min()))
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\groupby\generic.py in aggregate(self, arg, *args, **kwargs)
1453 @Appender(_shared_docs["aggregate"])
1454 def aggregate(self, arg=None, *args, **kwargs):
-> 1455 return super().aggregate(arg, *args, **kwargs)
1456
1457 agg = aggregate
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\groupby\generic.py in aggregate(self, func, *args, **kwargs)
262
263 if relabeling:
--> 264 result = result[order]
265 result.columns = columns
266
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\frame.py in __getitem__(self, key)
2979 if is_iterator(key):
2980 key = list(key)
-> 2981 indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)
2982
2983 # take() does not accept boolean indexers
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _convert_to_indexer(self, obj, axis, is_setter, raise_missing)
1269 # When setting, missing keys are not allowed, even with .loc:
1270 kwargs = {"raise_missing": True if is_setter else raise_missing}
-> 1271 return self._get_listlike_indexer(obj, axis, **kwargs)[1]
1272 else:
1273 try:
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _get_listlike_indexer(self, key, axis, raise_missing)
1076
1077 self._validate_read_indexer(
-> 1078 keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing
1079 )
1080 return keyarr, indexer
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _validate_read_indexer(self, key, indexer, axis, raise_missing)
1161 raise KeyError(
1162 "None of [{key}] are in the [{axis}]".format(
-> 1163 key=key, axis=self.obj._get_axis_name(axis)
1164 )
1165 )
KeyError: "None of [MultiIndex([('A', '<lambda>'),\n ('A', '<lambda>')],\n )] are in the [columns]"
|
KeyError
|
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
# defaultdict(list)
# TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique(aggspec_order)
# get the new indice of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
|
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
# defaultdict(list)
# TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
return aggspec, columns, order
|
https://github.com/pandas-dev/pandas/issues/27519
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-58-5b7e2c8bacf8> in <module>
3 df = pd.DataFrame({"A": [1, 2]})
4
----> 5 df.groupby([1, 1]).agg(foo=('A', lambda x: x.max()), bar=("A", lambda x: x.min()))
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\groupby\generic.py in aggregate(self, arg, *args, **kwargs)
1453 @Appender(_shared_docs["aggregate"])
1454 def aggregate(self, arg=None, *args, **kwargs):
-> 1455 return super().aggregate(arg, *args, **kwargs)
1456
1457 agg = aggregate
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\groupby\generic.py in aggregate(self, func, *args, **kwargs)
262
263 if relabeling:
--> 264 result = result[order]
265 result.columns = columns
266
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\frame.py in __getitem__(self, key)
2979 if is_iterator(key):
2980 key = list(key)
-> 2981 indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)
2982
2983 # take() does not accept boolean indexers
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _convert_to_indexer(self, obj, axis, is_setter, raise_missing)
1269 # When setting, missing keys are not allowed, even with .loc:
1270 kwargs = {"raise_missing": True if is_setter else raise_missing}
-> 1271 return self._get_listlike_indexer(obj, axis, **kwargs)[1]
1272 else:
1273 try:
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _get_listlike_indexer(self, key, axis, raise_missing)
1076
1077 self._validate_read_indexer(
-> 1078 keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing
1079 )
1080 return keyarr, indexer
~\AppData\Local\Continuum\anaconda3\envs\insight\lib\site-packages\pandas\core\indexing.py in _validate_read_indexer(self, key, indexer, axis, raise_missing)
1161 raise KeyError(
1162 "None of [{key}] are in the [{axis}]".format(
-> 1163 key=key, axis=self.obj._get_axis_name(axis)
1164 )
1165 )
KeyError: "None of [MultiIndex([('A', '<lambda>'),\n ('A', '<lambda>')],\n )] are in the [columns]"
|
KeyError
|
def length_of_indexer(indexer, target=None) -> int:
"""
return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
|
def length_of_indexer(indexer, target=None) -> int:
"""
return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
|
https://github.com/pandas-dev/pandas/issues/26939
|
import pandas as pd
s = pd.Series(index=range(2010, 2020))
s.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1]
Traceback (most recent call last):
[...]
ValueError: cannot set using a slice indexer with a different length than the value
|
ValueError
|
def rename(self, index=None, **kwargs):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : bool, default True
Whether to copy underlying data.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
Series
Series with index labels or name altered.
See Also
--------
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace")
if callable(index) or is_dict_like(index):
return super().rename(index=index, **kwargs)
else:
return self._set_name(index, inplace=kwargs.get("inplace"))
|
def rename(self, index=None, **kwargs):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : bool, default True
Whether to copy underlying data.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
Series
Series with index labels or name altered.
See Also
--------
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace")
non_mapping = is_scalar(index) or (is_list_like(index) and not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get("inplace"))
return super().rename(index=index, **kwargs)
|
https://github.com/pandas-dev/pandas/issues/27813
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
s.rename(i1) # raises error
File "/usr/local/lib/python3.6/dist-packages/pandas/core/series.py", line 3736, in rename
return super(Series, self).rename(index=index, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/generic.py", line 1091, in rename
level=level)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/internals/managers.py", line 171, in rename_axis
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
File "/usr/local/lib/python3.6/dist-packages/pandas/core/internals/managers.py", line 2004, in _transform_index
items = [func(x) for x in index]
File "/usr/local/lib/python3.6/dist-packages/pandas/core/internals/managers.py", line 2004, in <listcomp>
items = [func(x) for x in index]
TypeError: 'MyIndexer' object is not callable
|
TypeError
|
def _isna_new(obj):
if is_scalar(obj):
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(
obj,
(
ABCSeries,
np.ndarray,
ABCIndexClass,
ABCExtensionArray,
ABCDatetimeArray,
ABCTimedeltaArray,
),
):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
|
def _isna_new(obj):
if is_scalar(obj):
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(
obj,
(
ABCSeries,
np.ndarray,
ABCIndexClass,
ABCExtensionArray,
ABCDatetimeArray,
ABCTimedeltaArray,
),
):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
|
https://github.com/pandas-dev/pandas/issues/27482
|
import pandas as pd
x = pd.Series([1,2,3,4])
xt = type(x)
pd.isnull(xt)
## Traceback (most recent call last):
## File "<input>", line 1, in <module>
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 122, in isna
## return _isna(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 145, in _isna_new
## return _isna_ndarraylike(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 225, in _isna_ndarraylike
## dtype = values.dtype
## AttributeError: 'property' object has no attribute 'dtype'
y = pd.DataFrame({"col": [1,2,3,4]})
yt = type(y)
pd.isnull(yt)
## Traceback (most recent call last):
## File "<input>", line 1, in <module>
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 122, in isna
## return _isna(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 147, in _isna_new
## return obj._constructor(obj._data.isna(func=isna))
## AttributeError: 'NoneType' object has no attribute 'isna'
|
AttributeError
|
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
|
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
|
https://github.com/pandas-dev/pandas/issues/27482
|
import pandas as pd
x = pd.Series([1,2,3,4])
xt = type(x)
pd.isnull(xt)
## Traceback (most recent call last):
## File "<input>", line 1, in <module>
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 122, in isna
## return _isna(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 145, in _isna_new
## return _isna_ndarraylike(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 225, in _isna_ndarraylike
## dtype = values.dtype
## AttributeError: 'property' object has no attribute 'dtype'
y = pd.DataFrame({"col": [1,2,3,4]})
yt = type(y)
pd.isnull(yt)
## Traceback (most recent call last):
## File "<input>", line 1, in <module>
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 122, in isna
## return _isna(obj)
## File "/home/bbassett/venv37/lib/python3.7/site-packages/pandas/core/dtypes/missing.py", line 147, in _isna_new
## return obj._constructor(obj._data.isna(func=isna))
## AttributeError: 'NoneType' object has no attribute 'isna'
|
AttributeError
|
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
if isinstance(sp_values, tuple):
# multiple outputs. e.g. modf
arrays = tuple(
self._simple_new(
sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
)
for sp_value, fv in zip(sp_values, fill_value)
)
return arrays
elif is_scalar(sp_values):
# e.g. reductions
return sp_values
return self._simple_new(
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
)
result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
else:
return type(self)(result)
|
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
if isinstance(sp_values, tuple):
# multiple outputs. e.g. modf
arrays = tuple(
self._simple_new(
sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
)
for sp_value, fv in zip(sp_values, fill_value)
)
return arrays
return self._simple_new(
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
)
result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
else:
return type(self)(result)
|
https://github.com/pandas-dev/pandas/issues/27080
|
In [2]: a = pd.SparseArray([0, 10, 1])
In [3]: np.maximum.reduce(a)
Out[3]: ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Envs/pandas-dev/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/Envs/pandas-dev/lib/python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
~/Envs/pandas-dev/lib/python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/sandbox/pandas/pandas/core/arrays/sparse.py in __repr__(self)
1815 def __repr__(self):
1816 return '{self}\nFill: {fill}\n{index}'.format(
-> 1817 self=printing.pprint_thing(self),
1818 fill=printing.pprint_thing(self.fill_value),
1819 index=printing.pprint_thing(self.sp_index))
~/sandbox/pandas/pandas/io/formats/printing.py in pprint_thing(thing, _nest_lvl, escape_chars, default_escapes, quote_strings, max_seq_items)
215 result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
216 quote_strings=quote_strings,
--> 217 max_seq_items=max_seq_items)
218 elif isinstance(thing, str) and quote_strings:
219 result = "'{thing}'".format(thing=as_escaped_unicode(thing))
~/sandbox/pandas/pandas/io/formats/printing.py in _pprint_seq(seq, _nest_lvl, max_seq_items, **kwds)
111 r = [pprint_thing(next(s),
112 _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
--> 113 for i in range(min(nitems, len(seq)))]
114 body = ", ".join(r)
115
~/sandbox/pandas/pandas/io/formats/printing.py in <listcomp>(.0)
111 r = [pprint_thing(next(s),
112 _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
--> 113 for i in range(min(nitems, len(seq)))]
114 body = ", ".join(r)
115
~/sandbox/pandas/pandas/core/arrays/base.py in __iter__(self)
283 # calls to ``__getitem__``, which may be slower than necessary.
284 for i in range(len(self)):
--> 285 yield self[i]
286
287 # ------------------------------------------------------------------------
~/sandbox/pandas/pandas/core/arrays/sparse.py in __getitem__(self, key)
1092
1093 if is_integer(key):
-> 1094 return self._get_val_at(key)
1095 elif isinstance(key, tuple):
1096 data_slice = self.to_dense()[key]
~/sandbox/pandas/pandas/core/arrays/sparse.py in _get_val_at(self, loc)
1135 return self.fill_value
1136 else:
-> 1137 return libindex.get_value_at(self.sp_values, sp_loc)
1138
1139 def take(self, indices, allow_fill=False, fill_value=None):
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got numpy.int64)
In [4]: result = np.maximum.reduce(a)
In [5]: type(result)
Out[5]: pandas.core.arrays.sparse.SparseArray
|
TypeError
|
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q)
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
|
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
|
https://github.com/pandas-dev/pandas/issues/23925
|
In [18]: pd.DataFrame(pd.date_range('1/1/18', periods=5)).quantile()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-68ffc067f6f0> in <module>
----> 1 pd.DataFrame(pd.date_range('1/1/18', periods=5)).quantile()
~/clones/pandas/pandas/core/frame.py in quantile(self, q, axis, numeric_only, interpolation)
7569 axis=1,
7570 interpolation=interpolation,
-> 7571 transposed=is_transposed)
7572
7573 if result.ndim == 2:
~/clones/pandas/pandas/core/internals/managers.py in quantile(self, **kwargs)
500
501 def quantile(self, **kwargs):
--> 502 return self.reduction('quantile', **kwargs)
503
504 def setitem(self, **kwargs):
~/clones/pandas/pandas/core/internals/managers.py in reduction(self, f, axis, consolidate, transposed, **kwargs)
473
474 # single block
--> 475 values = _concat._concat_compat([b.values for b in blocks])
476
477 # compute the orderings of our original data
~/clones/pandas/pandas/core/dtypes/concat.py in _concat_compat(to_concat, axis)
172 to_concat = [x.astype('object') for x in to_concat]
173
--> 174 return np.concatenate(to_concat, axis=axis)
175
176
ValueError: need at least one array to concatenate
|
ValueError
|
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if is_scalar(key) and not is_integer(key) and key not in self.index:
# GH#12862 adding an new key to the Series
# Note: have to exclude integers because that is ambiguously
# position-based
self.loc[key] = value
return
if is_scalar(key):
key = [key]
elif not isinstance(key, (list, Series, np.ndarray)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
if key_type == "integer":
if self.index.inferred_type == "integer":
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == "boolean":
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
|
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if is_scalar(key):
key = [key]
elif not isinstance(key, (list, Series, np.ndarray)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
if key_type == "integer":
if self.index.inferred_type == "integer":
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == "boolean":
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
|
https://github.com/pandas-dev/pandas/issues/12862
|
ValueError Traceback (most recent call last)
<ipython-input-24-cc1ab78086e5> in <module>()
1 x = pd.Series()
2 x['foo'] = pd.to_datetime(42).tz_localize('UTC')
----> 3 x['bar'] = pd.to_datetime(666).tz_localize('UTC')
/usr/lib/python3.5/site-packages/pandas/core/series.py in __setitem__(self, key, value)
726 # do the setitem
727 cacher_needs_updating = self._check_is_chained_assignment_possible()
--> 728 setitem(key, value)
729 if cacher_needs_updating:
730 self._maybe_update_cacher()
/usr/lib/python3.5/site-packages/pandas/core/series.py in setitem(key, value)
722 pass
723
--> 724 self._set_with(key, value)
725
726 # do the setitem
/usr/lib/python3.5/site-packages/pandas/core/series.py in _set_with(self, key, value)
770 self._set_values(key.astype(np.bool_), value)
771 else:
--> 772 self._set_labels(key, value)
773
774 def _set_labels(self, key, value):
/usr/lib/python3.5/site-packages/pandas/core/series.py in _set_labels(self, key, value)
780 mask = indexer == -1
781 if mask.any():
--> 782 raise ValueError('%s not contained in the index' % str(key[mask]))
783 self._set_values(indexer, value)
784
ValueError: ['b' 'a' 'r'] not contained in the index
|
ValueError
|
def _get_join_indexers(self):
"""return the join indexers"""
def flip(xs):
"""unlike np.transpose, this returns an array of tuples"""
xs = [x if not is_extension_array_dtype(x) else x._ndarray_values for x in xs]
labels = list(string.ascii_lowercase[: len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(list(zip(*xs)), labeled_dtypes)
# values to compare
left_values = self.left.index.values if self.left_index else self.left_join_keys[-1]
right_values = (
self.right.index.values if self.right_index else self.right_join_keys[-1]
)
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
msg_sorted = "{side} keys must be sorted"
msg_missings = "Merge keys contain null values on {side} side"
if not Index(left_values).is_monotonic:
if isnull(left_values).any():
raise ValueError(msg_missings.format(side="left"))
else:
raise ValueError(msg_sorted.format(side="left"))
if not Index(right_values).is_monotonic:
if isnull(right_values).any():
raise ValueError(msg_missings.format(side="right"))
else:
raise ValueError(msg_sorted.format(side="right"))
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view("i8")
right_values = right_values.view("i8")
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
func = _asof_by_function(self.direction)
return func(
left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance,
)
else:
# choose appropriate function by type
func = _asof_function(self.direction)
return func(left_values, right_values, self.allow_exact_matches, tolerance)
|
def _get_join_indexers(self):
"""return the join indexers"""
def flip(xs):
"""unlike np.transpose, this returns an array of tuples"""
labels = list(string.ascii_lowercase[: len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(list(zip(*xs)), labeled_dtypes)
# values to compare
left_values = self.left.index.values if self.left_index else self.left_join_keys[-1]
right_values = (
self.right.index.values if self.right_index else self.right_join_keys[-1]
)
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
msg_sorted = "{side} keys must be sorted"
msg_missings = "Merge keys contain null values on {side} side"
if not Index(left_values).is_monotonic:
if isnull(left_values).any():
raise ValueError(msg_missings.format(side="left"))
else:
raise ValueError(msg_sorted.format(side="left"))
if not Index(right_values).is_monotonic:
if isnull(right_values).any():
raise ValueError(msg_missings.format(side="right"))
else:
raise ValueError(msg_sorted.format(side="right"))
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view("i8")
right_values = right_values.view("i8")
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
func = _asof_by_function(self.direction)
return func(
left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance,
)
else:
# choose appropriate function by type
func = _asof_function(self.direction)
return func(left_values, right_values, self.allow_exact_matches, tolerance)
|
https://github.com/pandas-dev/pandas/issues/26649
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
pd.merge_asof(left, right, by=['by_col1', 'by_col2'], on='on_col')
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 462, in merge_asof
return op.get_result()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1256, in get_result
join_index, left_indexer, right_indexer = self._get_join_info()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 756, in _get_join_info
right_indexer) = self._get_join_indexers()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1504, in _get_join_indexers
left_by_values = flip(left_by_values)
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1457, in flip
return np.array(lzip(*xs), labeled_dtypes)
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 150, in __repr__
return str(self)
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 129, in __str__
return self.__unicode__()
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 704, in __unicode__
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
SystemError: PyEval_EvalFrameEx returned a result with an error set
|
SystemError
|
def flip(xs):
"""unlike np.transpose, this returns an array of tuples"""
xs = [x if not is_extension_array_dtype(x) else x._ndarray_values for x in xs]
labels = list(string.ascii_lowercase[: len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(list(zip(*xs)), labeled_dtypes)
|
def flip(xs):
"""unlike np.transpose, this returns an array of tuples"""
labels = list(string.ascii_lowercase[: len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(list(zip(*xs)), labeled_dtypes)
|
https://github.com/pandas-dev/pandas/issues/26649
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
pd.merge_asof(left, right, by=['by_col1', 'by_col2'], on='on_col')
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 462, in merge_asof
return op.get_result()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1256, in get_result
join_index, left_indexer, right_indexer = self._get_join_info()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 756, in _get_join_info
right_indexer) = self._get_join_indexers()
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1504, in _get_join_indexers
left_by_values = flip(left_by_values)
File "myenv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 1457, in flip
return np.array(lzip(*xs), labeled_dtypes)
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 150, in __repr__
return str(self)
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 129, in __str__
return self.__unicode__()
File "myenv/lib/python3.6/site-packages/pandas/core/dtypes/dtypes.py", line 704, in __unicode__
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
SystemError: PyEval_EvalFrameEx returned a result with an error set
|
SystemError
|
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = "-{inf}".format(inf=self.inf_rep)
elif self.float_format is not None:
val = float(self.float_format % val)
if getattr(val, "tzinfo", None) is not None:
raise ValueError(
"Excel does not support datetimes with "
"timezones. Please ensure that datetimes "
"are timezone unaware before writing to Excel."
)
return val
|
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = "-{inf}".format(inf=self.inf_rep)
elif self.float_format is not None:
val = float(self.float_format % val)
return val
|
https://github.com/pandas-dev/pandas/issues/7056
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-4e18a4be2a71> in <module>()
----> 1 df.to_excel('test.xlsx')
/home/silvio/prod34/lib/python3.4/site-packages/pandas/core/frame.py in to_excel(self, excel_writer, sheet_name, na_rep, float_format, cols, header, index, index_label, startrow, startcol, engine, merge_cells)
1202 formatted_cells = formatter.get_formatted_cells()
1203 excel_writer.write_cells(formatted_cells, sheet_name,
-> 1204 startrow=startrow, startcol=startcol)
1205 if need_save:
1206 excel_writer.save()
/home/silvio/prod34/lib/python3.4/site-packages/pandas/io/excel.py in write_cells(self, cells, sheet_name, startrow, startcol)
771 wks.write(startrow + cell.row,
772 startcol + cell.col,
--> 773 cell.val, style)
774
775 def _convert_to_style(self, style_dict, num_format_str=None):
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/worksheet.py in cell_wrapper(self, *args, **kwargs)
55 if len(args):
56 int(args[0])
---> 57 return method(self, *args, **kwargs)
58 except ValueError:
59 # First arg isn't an int, convert to A1 notation.
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/worksheet.py in write(self, row, col, *args)
374 # Write datetime objects.
375 if isinstance(token, date_types):
--> 376 return self.write_datetime(row, col, *args)
377
378 # Write number types.
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/worksheet.py in cell_wrapper(self, *args, **kwargs)
55 if len(args):
56 int(args[0])
---> 57 return method(self, *args, **kwargs)
58 except ValueError:
59 # First arg isn't an int, convert to A1 notation.
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/worksheet.py in write_datetime(self, row, col, date, cell_format)
666
667 # Convert datetime to an Excel date.
--> 668 number = self._convert_date_time(date)
669
670 # Add the default date format.
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/worksheet.py in _convert_date_time(self, dt_obj)
3265 def _convert_date_time(self, dt_obj):
3266 # Convert a datetime object to an Excel serial date and time.
-> 3267 return datetime_to_excel_datetime(dt_obj, self.date_1904)
3268
3269 def _options_changed(self):
/home/silvio/prod34/lib/python3.4/site-packages/xlsxwriter/utility.py in datetime_to_excel_datetime(dt_obj, date_1904)
577
578 # Convert a Python datetime.datetime value to an Excel date number.
--> 579 delta = dt_obj - epoch
580 excel_time = (delta.days
581 + (float(delta.seconds)
/home/silvio/prod34/lib/python3.4/site-packages/pandas/tslib.cpython-34m.so in pandas.tslib._Timestamp.__sub__ (pandas/tslib.c:11918)()
TypeError: can't subtract offset-naive and offset-aware datetimes
|
TypeError
|
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys,
values,
not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (
isinstance(v.index, MultiIndex)
or key_index is None
or isinstance(key_index, MultiIndex)
):
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values, index=key_index, columns=index
)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(
values,
keys=key_index,
names=key_index.names,
axis=self.axis,
).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values.T, index=v.index, columns=key_index
)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
result = _recast_datetimelike_result(result)
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return Series(values, index=key_index)._convert(
datetime=True, coerce=coerce
)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
|
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys,
values,
not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (
isinstance(v.index, MultiIndex)
or key_index is None
or isinstance(key_index, MultiIndex)
):
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values, index=key_index, columns=index
)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(
values,
keys=key_index,
names=key_index.names,
axis=self.axis,
).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v) for v in values])
result = DataFrame(
stacked_values.T, index=v.index, columns=key_index
)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
result = result.apply(lambda x: to_numeric(x, errors="ignore"))
date_cols = self._selected_obj.select_dtypes(
include=["datetime", "timedelta"]
).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = result[date_cols]._convert(
datetime=True, coerce=True
)
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return Series(values, index=key_index)._convert(
datetime=True, coerce=coerce
)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
|
https://github.com/pandas-dev/pandas/issues/13287
|
Traceback (most recent call last):
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-46-ae20c6b6248f>", line 1, in <module>
pd.DataFrame(ts_array)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/frame.py", line 255, in __init__
copy=copy)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/frame.py", line 432, in _init_ndarray
return create_block_manager_from_blocks([values], [columns, index])
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/internals.py", line 3986, in create_block_manager_from_blocks
mgr = BlockManager(blocks, axes)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/internals.py", line 2591, in __init__
(block.ndim, self.ndim))
AssertionError: Number of Block dimensions (1) must equal number of axes (2)
|
AssertionError
|
def init_ndarray(values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values):
# GH#19157
if columns is None:
columns = [0]
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError(
"failed to cast to '{dtype}' (Exception was: {orig})".format(
dtype=dtype, orig=orig
)
)
raise_with_traceback(e)
index, columns = _get_axes(*values.shape, index=index, columns=columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
|
def init_ndarray(values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values):
# GH#19157
if columns is None:
columns = [0]
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError(
"failed to cast to '{dtype}' (Exception was: {orig})".format(
dtype=dtype, orig=orig
)
)
raise_with_traceback(e)
index, columns = _get_axes(*values.shape, index=index, columns=columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
|
https://github.com/pandas-dev/pandas/issues/13287
|
Traceback (most recent call last):
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-46-ae20c6b6248f>", line 1, in <module>
pd.DataFrame(ts_array)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/frame.py", line 255, in __init__
copy=copy)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/frame.py", line 432, in _init_ndarray
return create_block_manager_from_blocks([values], [columns, index])
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/internals.py", line 3986, in create_block_manager_from_blocks
mgr = BlockManager(blocks, axes)
File "/Users/jkelleher/anaconda/lib/python2.7/site-packages/pandas/core/internals.py", line 2591, in __init__
(block.ndim, self.ndim))
AssertionError: Number of Block dimensions (1) must equal number of axes (2)
|
AssertionError
|
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
except (TypeError, ValueError):
pass
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
|
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
|
https://github.com/pandas-dev/pandas/issues/20629
|
Python 3.6.3 (default, Oct 3 2017, 21:45:48)
[GCC 7.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import pandas as pd
x = pd.Series([1, 2, 3], index=pd.CategoricalIndex(["A", "B", "C"]))
x.loc["A"]
1
x.at["A"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py", line 1869, in __getitem__
return self.obj._get_value(*key, takeable=self._takeable)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/series.py", line 929, in _get_value
return self.index.get_value(self._values, label)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexes/category.py", line 423, in get_value
return series.iloc[indexer]
AttributeError: 'numpy.ndarray' object has no attribute 'iloc'
x = pd.DataFrame([[1, 2], [3, 4], [5, 6]], index=pd.CategoricalIndex(["A", "B", "C"]))
x.loc["B", 1]
4
x.at["B", 1]
Traceback (most recent call last):
File "pandas/_libs/index.pyx", line 139, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 811, in pandas._libs.hashtable.Int64HashTable.get_item
TypeError: an integer is required
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py", line 1869, in __getitem__
return self.obj._get_value(*key, takeable=self._takeable)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/frame.py", line 1985, in _get_value
return engine.get_value(series._values, index)
File "pandas/_libs/index.pyx", line 83, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 91, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 141, in pandas._libs.index.IndexEngine.get_loc
KeyError: 'B'
|
AttributeError
|
def get_value(self, series: AnyArrayLike, key: Any):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
Parameters
----------
series : Series, ExtensionArray, Index, or ndarray
1-dimensional array to take values from
key: : scalar
The value of this index at the position of the desired value,
otherwise the positional index of the desired value
Returns
-------
Any
The element of the series at the position indicated by the key
"""
try:
k = com.values_from_object(key)
k = self._convert_scalar_indexer(k, kind="getitem")
indexer = self.get_loc(k)
return series.take([indexer])[0]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super().get_value(series, key)
|
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = com.values_from_object(key)
k = self._convert_scalar_indexer(k, kind="getitem")
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super().get_value(series, key)
|
https://github.com/pandas-dev/pandas/issues/20629
|
Python 3.6.3 (default, Oct 3 2017, 21:45:48)
[GCC 7.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import pandas as pd
x = pd.Series([1, 2, 3], index=pd.CategoricalIndex(["A", "B", "C"]))
x.loc["A"]
1
x.at["A"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py", line 1869, in __getitem__
return self.obj._get_value(*key, takeable=self._takeable)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/series.py", line 929, in _get_value
return self.index.get_value(self._values, label)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexes/category.py", line 423, in get_value
return series.iloc[indexer]
AttributeError: 'numpy.ndarray' object has no attribute 'iloc'
x = pd.DataFrame([[1, 2], [3, 4], [5, 6]], index=pd.CategoricalIndex(["A", "B", "C"]))
x.loc["B", 1]
4
x.at["B", 1]
Traceback (most recent call last):
File "pandas/_libs/index.pyx", line 139, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 811, in pandas._libs.hashtable.Int64HashTable.get_item
TypeError: an integer is required
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/pandas/core/indexing.py", line 1869, in __getitem__
return self.obj._get_value(*key, takeable=self._takeable)
File "/usr/local/lib/python3.6/dist-packages/pandas/core/frame.py", line 1985, in _get_value
return engine.get_value(series._values, index)
File "pandas/_libs/index.pyx", line 83, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 91, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 141, in pandas._libs.index.IndexEngine.get_loc
KeyError: 'B'
|
AttributeError
|
def coerce_to_array(values, dtype, mask=None, copy=False):
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError("invalid dtype specified {}".format(dtype))
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"mixed-integer-float",
]:
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
|
def coerce_to_array(values, dtype, mask=None, copy=False):
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError("invalid dtype specified {}".format(dtype))
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"mixed-integer-float",
]:
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
|
https://github.com/pandas-dev/pandas/issues/25211
|
In [1]: import pandas as pd, numpy as np
# expected behaviour with ordinary dtype
In [2]: pd.Series([True, False], dtype=int)
Out[2]:
0 1
1 0
dtype: int64
# broken
In [3]: pd.Series([True, False], dtype=pd.Int64Dtype())
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure)
694 if is_integer_dtype(dtype):
--> 695 subarr = maybe_cast_to_integer_array(arr, dtype)
696
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/dtypes/cast.py in maybe_cast_to_integer_array(arr, dtype, copy)
1304 if not hasattr(arr, "astype"):
-> 1305 casted = np.array(arr, dtype=dtype, copy=copy)
1306 else:
TypeError: data type not understood
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-3-b747cfcdf17f> in <module>
----> 1 pd.Series([True, False], dtype=pd.Int64Dtype())
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
260 else:
261 data = sanitize_array(data, index, dtype, copy,
--> 262 raise_cast_failure=True)
263
264 data = SingleBlockManager(data, index, fastpath=True)
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
605 try:
606 subarr = _try_cast(data, False, dtype, copy,
--> 607 raise_cast_failure)
608 except Exception:
609 if raise_cast_failure: # pragma: no cover
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure)
714 # create an extension array from its dtype
715 array_type = dtype.construct_array_type()._from_sequence
--> 716 subarr = array_type(arr, dtype=dtype, copy=copy)
717 elif dtype is not None and raise_cast_failure:
718 raise
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/arrays/integer.py in _from_sequence(cls, scalars, dtype, copy)
301 @classmethod
302 def _from_sequence(cls, scalars, dtype=None, copy=False):
--> 303 return integer_array(scalars, dtype=dtype, copy=copy)
304
305 @classmethod
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/arrays/integer.py in integer_array(values, dtype, copy)
109 TypeError if incompatible types
110 """
--> 111 values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
112 return IntegerArray(values, mask)
113
/usr/local/anaconda3/lib/python3.7/site-packages/pandas/core/arrays/integer.py in coerce_to_array(values, dtype, mask, copy)
190 elif not (is_integer_dtype(values) or is_float_dtype(values)):
191 raise TypeError("{} cannot be converted to an IntegerDtype".format(
--> 192 values.dtype))
193
194 if mask is None:
TypeError: bool cannot be converted to an IntegerDtype
|
TypeError
|
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation.
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError(
"DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(name=self.__class__.__name__)
)
kwds = self.kwds
relativedelta_fast = {
"years",
"months",
"weeks",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
}
# relativedelta/_offset path only valid for base DateOffset
if self._use_relativedelta and set(kwds).issubset(relativedelta_fast):
months = (kwds.get("years", 0) * 12 + kwds.get("months", 0)) * self.n
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = type(i)(shifted, dtype=i.dtype)
weeks = (kwds.get("weeks", 0)) * self.n
if weeks:
# integer addition on PeriodIndex is deprecated,
# so we directly use _time_shift instead
asper = i.to_period("W")
if not isinstance(asper._data, np.ndarray):
# unwrap PeriodIndex --> PeriodArray
asper = asper._data
shifted = asper._time_shift(weeks)
i = shifted.to_timestamp() + i.to_perioddelta("W")
timedelta_kwds = {
k: v
for k, v in kwds.items()
if k in ["days", "hours", "minutes", "seconds", "microseconds"]
}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, "_offset"):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError(
"DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd)
)
|
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation.
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError(
"DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(name=self.__class__.__name__)
)
kwds = self.kwds
relativedelta_fast = {
"years",
"months",
"weeks",
"days",
"hours",
"minutes",
"seconds",
"microseconds",
}
# relativedelta/_offset path only valid for base DateOffset
if self._use_relativedelta and set(kwds).issubset(relativedelta_fast):
months = (kwds.get("years", 0) * 12 + kwds.get("months", 0)) * self.n
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = type(i)(shifted, freq=i.freq, dtype=i.dtype)
weeks = (kwds.get("weeks", 0)) * self.n
if weeks:
# integer addition on PeriodIndex is deprecated,
# so we directly use _time_shift instead
asper = i.to_period("W")
if not isinstance(asper._data, np.ndarray):
# unwrap PeriodIndex --> PeriodArray
asper = asper._data
shifted = asper._time_shift(weeks)
i = shifted.to_timestamp() + i.to_perioddelta("W")
timedelta_kwds = {
k: v
for k, v in kwds.items()
if k in ["days", "hours", "minutes", "seconds", "microseconds"]
}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, "_offset"):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError(
"DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd)
)
|
https://github.com/pandas-dev/pandas/issues/26258
|
Traceback (most recent call last):
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\arrays\datetimelike.py", line 884, in _validate_frequency
raise ValueError
ValueError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\indexes\datetimelike.py", line 489, in __add__
result = self._data.__add__(maybe_unwrap_index(other))
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\arrays\datetimelike.py", line 1190, in __add__
result = self._add_offset(other)
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\arrays\datetimes.py", line 737, in _add_offset
result = offset.apply_index(values)
File "pandas/_libs/tslibs/offsets.pyx", line 116, in pandas._libs.tslibs.offsets.apply_index_wraps.wrapper
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\tseries\offsets.py", line 278, in apply_index
i = type(i)(shifted, freq=i.freq, dtype=i.dtype)
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\arrays\datetimes.py", line 351, in __init__
type(self)._validate_frequency(self, freq)
File "C:\mma\local\Anaconda3\envs\pd24\lib\site-packages\pandas\core\arrays\datetimelike.py", line 897, in _validate_frequency
.format(infer=inferred, passed=freq.freqstr))
ValueError: Inferred frequency AS-APR from passed values does not conform to passed frequency AS-JAN
|
ValueError
|
def _construct_result(left, result, index, name, dtype=None):
"""
If the raw op result has a non-None name (e.g. it is an Index object) and
the name argument is None, then passing name to the constructor will
not be enough; we still need to override the name attribute.
"""
out = left._constructor(result, index=index, dtype=dtype)
out = out.__finalize__(left)
out.name = name
return out
|
def _construct_result(left, result, index, name, dtype=None):
"""
If the raw op result has a non-None name (e.g. it is an Index object) and
the name argument is None, then passing name to the constructor will
not be enough; we still need to override the name attribute.
"""
out = left._constructor(result, index=index, dtype=dtype)
out.name = name
return out
|
https://github.com/pandas-dev/pandas/issues/25557
|
a.divmod(b)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/danlaw/Projects/pandas/pandas/core/ops.py", line 1892, in flex_wrapper
return self._binop(other, op, level=level, fill_value=fill_value)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 2522, in _binop
result = self._constructor(result, index=new_index, name=name)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 250, in __init__
.format(val=len(data), ind=len(index)))
ValueError: Length of passed values is 2, index implies 4
|
ValueError
|
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series."""
return (
_construct_result(left, result[0], index=index, name=name, dtype=dtype),
_construct_result(left, result[1], index=index, name=name, dtype=dtype),
)
|
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series."""
constructor = left._constructor
return (
constructor(result[0], index=index, name=name, dtype=dtype),
constructor(result[1], index=index, name=name, dtype=dtype),
)
|
https://github.com/pandas-dev/pandas/issues/25557
|
a.divmod(b)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/danlaw/Projects/pandas/pandas/core/ops.py", line 1892, in flex_wrapper
return self._binop(other, op, level=level, fill_value=fill_value)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 2522, in _binop
result = self._constructor(result, index=new_index, name=name)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 250, in __init__
.format(val=len(data), ind=len(index)))
ValueError: Length of passed values is 2, index implies 4
|
ValueError
|
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
new_index = this.index
this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
if func.__name__ in ["divmod", "rdivmod"]:
ret = ops._construct_divmod_result(self, result, new_index, name)
else:
ret = ops._construct_result(self, result, new_index, name)
return ret
|
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
new_index = this.index
this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
# When name is None, __finalize__ overwrites current name
result.name = None
return result
|
https://github.com/pandas-dev/pandas/issues/25557
|
a.divmod(b)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/danlaw/Projects/pandas/pandas/core/ops.py", line 1892, in flex_wrapper
return self._binop(other, op, level=level, fill_value=fill_value)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 2522, in _binop
result = self._constructor(result, index=new_index, name=name)
File "/Users/danlaw/Projects/pandas/pandas/core/series.py", line 250, in __init__
.format(val=len(data), ind=len(index)))
ValueError: Length of passed values is 2, index implies 4
|
ValueError
|
def _try_cast(self, result, obj, numeric_only=False):
"""
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
"""
if obj.ndim > 1:
dtype = obj._values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if is_datetime64tz_dtype(dtype):
# GH 23683
# Prior results _may_ have been generated in UTC.
# Ensure we localize to UTC first before converting
# to the target timezone
try:
result = obj._values._from_sequence(result, dtype="datetime64[ns, UTC]")
result = result.astype(dtype)
except TypeError:
# _try_cast was called at a point where the result
# was already tz-aware
pass
elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
result = obj._values._from_sequence(result, dtype=dtype)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
|
def _try_cast(self, result, obj, numeric_only=False):
"""
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
"""
if obj.ndim > 1:
dtype = obj._values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
result = obj._values._from_sequence(result, dtype=dtype)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
|
https://github.com/pandas-dev/pandas/issues/23683
|
Traceback (most recent call last):
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2670, in agg_series
return self._aggregate_series_fast(obj, func)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2689, in _aggregate_series_fast
dummy)
File "pandas/_libs/reduction.pyx", line 334, in pandas._libs.reduction.SeriesGrouper.__init__
File "pandas/_libs/reduction.pyx", line 347, in pandas._libs.reduction.SeriesGrouper._check_dummy
ValueError: Dummy array must be same dtype
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 3495, in aggregate
return self._python_agg_general(func_or_funcs, *args, **kwargs)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 1068, in _python_agg_general
result, counts = self.grouper.agg_series(obj, f)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2672, in agg_series
return self._aggregate_series_pure_python(obj, func)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2706, in _aggregate_series_pure_python
raise ValueError('Function does not reduce')
ValueError: Function does not reduce
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 4656, in aggregate
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 4087, in aggregate
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/base.py", line 490, in _aggregate
result = _agg(arg, _agg_1dim)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/base.py", line 441, in _agg
result[fname] = func(fname, agg_how)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/base.py", line 424, in _agg_1dim
return colg.aggregate(how, _level=(_level or 0) + 1)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 3497, in aggregate
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
File "~/.pyenv/versions/anaconda3-5.3.0/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 3627, in _aggregate_named
raise Exception('Must produce aggregated value')
Exception: Must produce aggregated value
|
ValueError
|
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(
dropna=dropna
)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
results.columns.names = obj.columns.names
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
|
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(
dropna=dropna
)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
|
https://github.com/pandas-dev/pandas/issues/23222
|
import pandas as pd
pd.show_versions()
Backend TkAgg is interactive backend. Turning interactive mode on.
Matplotlib support failed
Traceback (most recent call last):
File "C:\Users\Admin\AppData\Local\JetBrains\Toolbox\apps\PyCharm-P\ch-0\183.3647.8\helpers\pydev\_pydev_bundle\pydev_import_hook.py", line 23, in do_import
succeeded = activate_func()
File "C:\Users\Admin\AppData\Local\JetBrains\Toolbox\apps\PyCharm-P\ch-0\183.3647.8\helpers\pydev\pydev_ipython\matplotlibtools.py", line 141, in activate_pylab
pylab = sys.modules['pylab']
KeyError: 'pylab'
INSTALLED VERSIONS
------------------
commit: None
python: 3.6.6.final.0
python-bits: 64
OS: Windows
OS-release: 10
machine: AMD64
processor: Intel64 Family 6 Model 142 Stepping 9, GenuineIntel
byteorder: little
LC_ALL: None
LANG: None
LOCALE: None.None
pandas: 0.23.4
pytest: 3.8.2
pip: 18.1
setuptools: 40.4.3
Cython: 0.28.5
numpy: 1.15.1
scipy: 1.1.0
pyarrow: None
xarray: None
IPython: 7.0.1
sphinx: 1.8.1
patsy: 0.5.0
dateutil: 2.7.3
pytz: 2018.5
blosc: None
bottleneck: 1.2.1
tables: 3.4.4
numexpr: 2.6.8
feather: None
matplotlib: 3.0.0
openpyxl: 2.5.8
xlrd: 1.1.0
xlwt: 1.3.0
xlsxwriter: 1.1.1
lxml: 4.2.5
bs4: 4.6.3
html5lib: 1.0.1
sqlalchemy: 1.2.12
pymysql: None
psycopg2: None
jinja2: 2.10
s3fs: None
fastparquet: None
pandas_gbq: None
pandas_datareader: None
|
KeyError
|
def read_sas(
filepath_or_buffer,
format=None,
index=None,
encoding=None,
chunksize=None,
iterator=False,
):
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to the SAS file.
format : string {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : string, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify "
"a format string"
)
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, compat.string_types):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
if fname.endswith(".xpt"):
format = "xport"
elif fname.endswith(".sas7bdat"):
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
data = reader.read()
reader.close()
return data
|
def read_sas(
filepath_or_buffer,
format=None,
index=None,
encoding=None,
chunksize=None,
iterator=False,
):
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to the SAS file.
format : string {'xport', 'sas7bdat'} or None
If None, file format is inferred. If 'xport' or 'sas7bdat',
uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : string, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify "
"a format string"
)
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, compat.string_types):
raise ValueError(buffer_error_msg)
try:
fname = filepath_or_buffer.lower()
if fname.endswith(".xpt"):
format = "xport"
elif fname.endswith(".sas7bdat"):
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
except ValueError:
pass
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
data = reader.read()
reader.close()
return data
|
https://github.com/pandas-dev/pandas/issues/24548
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-de3f5c15fb71> in <module>
1 import pandas as pd
----> 2 pd.read_sas('/tmp/foo')
~/.virtualenvs/pandas/lib/python3.7/site-packages/pandas/io/sas/sasreader.py in read_sas(filepath_or_buffer, format, index, encoding, chunksize, iterator)
50 pass
51
---> 52 if format.lower() == 'xport':
53 from pandas.io.sas.sas_xport import XportReader
54 reader = XportReader(filepath_or_buffer, index=index,
AttributeError: 'NoneType' object has no attribute 'lower'
|
AttributeError
|
def _write_col_header(self, indent):
truncate_h = self.fmt.truncate_h
if isinstance(self.columns, ABCMultiIndex):
template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = False
levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (
values[:ins_col] + (u("..."),) + values[ins_col:]
)
else:
# sparse col headers do not receive a ...
values = (
values[:ins_col]
+ (values[ins_col - 1],)
+ values[ins_col:]
)
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u("..."),) + values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u("...")] + values[ins_col:]
# see gh-22579
# Column Offset Bug with to_html(index=False) with
# MultiIndex Columns and Index.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code
# block below for standard columns index.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class and create a
# _get_formatted_column_labels function for code
# parity with DataFrameFormatter class.
if self.fmt.show_index_names:
name = self.columns.names[lnum]
row.append(pprint_thing(name or ""))
else:
row.append("")
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
else:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code block
# above for columns MultiIndex.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class.
if self.fmt.show_index_names:
row.append(self.columns.name or "")
else:
row.append("")
row.extend(self.columns)
align = self.fmt.justify
if truncate_h:
ins_col = self.row_levels + self.fmt.tr_col_num
row.insert(ins_col, "...")
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
|
def _write_col_header(self, indent):
truncate_h = self.fmt.truncate_h
if isinstance(self.columns, ABCMultiIndex):
template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (
values[:ins_col] + (u("..."),) + values[ins_col:]
)
else:
# sparse col headers do not receive a ...
values = (
values[:ins_col]
+ (values[ins_col - 1],)
+ values[ins_col:]
)
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u("..."),) + values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u("...")] + values[ins_col:]
# see gh-22579
# Column Offset Bug with to_html(index=False) with
# MultiIndex Columns and Index.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code
# block below for standard columns index.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class and create a
# _get_formatted_column_labels function for code
# parity with DataFrameFormatter class.
if self.fmt.show_index_names:
name = self.columns.names[lnum]
row.append(pprint_thing(name or ""))
else:
row.append("")
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
else:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code block
# above for columns MultiIndex.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class.
if self.fmt.show_index_names:
row.append(self.columns.name or "")
else:
row.append("")
row.extend(self.columns)
align = self.fmt.justify
if truncate_h:
ins_col = self.row_levels + self.fmt.tr_col_num
row.insert(ins_col, "...")
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
|
https://github.com/pandas-dev/pandas/issues/22887
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-24-59e289592884> in <module>()
3 ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
4 df = pd.DataFrame(index=arrays, columns=arrays)
----> 5 df.to_html(max_cols=7, sparsify=False)
~\Anaconda3\lib\site-packages\pandas\core\frame.py in to_html(self, buf, columns, col_space, header, index, na_rep, formatters, float_format, sparsify, index_names, justify, bold_rows, classes, escape, max_rows, max_cols, show_dimensions, notebook, decimal, border, table_id)
2032 decimal=decimal, table_id=table_id)
2033 # TODO: a generic formatter wld b in DataFrameFormatter
-> 2034 formatter.to_html(classes=classes, notebook=notebook, border=border)
2035
2036 if buf is None:
~\Anaconda3\lib\site-packages\pandas\io\formats\format.py in to_html(self, classes, notebook, border)
749 table_id=self.table_id)
750 if hasattr(self.buf, 'write'):
--> 751 html_renderer.write_result(self.buf)
752 elif isinstance(self.buf, compat.string_types):
753 with open(self.buf, 'w') as f:
~\Anaconda3\lib\site-packages\pandas\io\formats\html.py in write_result(self, buf)
177
178 indent += self.indent_delta
--> 179 indent = self._write_header(indent)
180 indent = self._write_body(indent)
181
~\Anaconda3\lib\site-packages\pandas\io\formats\html.py in _write_header(self, indent)
279 recs_new[ins_col] = 1
280 records = recs_new
--> 281 values = (values[:ins_col] + [u('...')] +
282 values[ins_col:])
283
TypeError: can only concatenate tuple (not "list") to tuple
|
TypeError
|
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="{span}" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
nrows = len(frame)
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u("...")
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u("...")
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(
ins_row, tuple([u("...")] * len(level_lengths))
)
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u("...")
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, "...")
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(self.row_levels - sparse_offset + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=tags,
nindex_levels=len(levels) - sparse_offset,
)
else:
row = []
for i in range(len(frame)):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
idx_values = list(
zip(*frame.index.format(sparsify=False, adjoin=False, names=False))
)
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(self.row_levels + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=None,
nindex_levels=frame.index.nlevels,
)
|
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="{span}" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
nrows = len(frame)
# TODO: after gh-22887 fixed, refactor to use class property
# in place of row_levels
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u("...")
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u("...")
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(
ins_row, tuple([u("...")] * len(level_lengths))
)
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u("...")
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, "...")
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=tags,
nindex_levels=len(levels) - sparse_offset,
)
else:
for i in range(len(frame)):
idx_values = list(
zip(*frame.index.format(sparsify=False, adjoin=False, names=False))
)
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=None,
nindex_levels=frame.index.nlevels,
)
|
https://github.com/pandas-dev/pandas/issues/22887
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-24-59e289592884> in <module>()
3 ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
4 df = pd.DataFrame(index=arrays, columns=arrays)
----> 5 df.to_html(max_cols=7, sparsify=False)
~\Anaconda3\lib\site-packages\pandas\core\frame.py in to_html(self, buf, columns, col_space, header, index, na_rep, formatters, float_format, sparsify, index_names, justify, bold_rows, classes, escape, max_rows, max_cols, show_dimensions, notebook, decimal, border, table_id)
2032 decimal=decimal, table_id=table_id)
2033 # TODO: a generic formatter wld b in DataFrameFormatter
-> 2034 formatter.to_html(classes=classes, notebook=notebook, border=border)
2035
2036 if buf is None:
~\Anaconda3\lib\site-packages\pandas\io\formats\format.py in to_html(self, classes, notebook, border)
749 table_id=self.table_id)
750 if hasattr(self.buf, 'write'):
--> 751 html_renderer.write_result(self.buf)
752 elif isinstance(self.buf, compat.string_types):
753 with open(self.buf, 'w') as f:
~\Anaconda3\lib\site-packages\pandas\io\formats\html.py in write_result(self, buf)
177
178 indent += self.indent_delta
--> 179 indent = self._write_header(indent)
180 indent = self._write_body(indent)
181
~\Anaconda3\lib\site-packages\pandas\io\formats\html.py in _write_header(self, indent)
279 recs_new[ins_col] = 1
280 records = recs_new
--> 281 values = (values[:ins_col] + [u('...')] +
282 values[ins_col:])
283
TypeError: can only concatenate tuple (not "list") to tuple
|
TypeError
|
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index, list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return (isinstance(part, slice) or is_list_like(part)) and not isinstance(
part, tuple
)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
|
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index, list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
|
https://github.com/pandas-dev/pandas/issues/19861
|
Traceback (most recent call last):
File "<stdin>", line 5, in <module>
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 434, in render
self._compute()
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 502, in _compute
r = func(self)(*args, **kwargs)
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 591, in _applymap
result = self.data.loc[subset].applymap(func)
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\core\generic.py", line 3081, in __getattr__
return object.__getattribute__(self, name)
AttributeError: 'Series' object has no attribute 'applymap'
|
AttributeError
|
def pred(part):
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return (isinstance(part, slice) or is_list_like(part)) and not isinstance(
part, tuple
)
|
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
|
https://github.com/pandas-dev/pandas/issues/19861
|
Traceback (most recent call last):
File "<stdin>", line 5, in <module>
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 434, in render
self._compute()
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 502, in _compute
r = func(self)(*args, **kwargs)
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\io\formats\style.py", line 591, in _applymap
result = self.data.loc[subset].applymap(func)
File "c:\Users\user\AppData\Local\Continuum\Miniconda3\envs\lg\lib\site-packages\pandas\core\generic.py", line 3081, in __getattr__
return object.__getattribute__(self, name)
AttributeError: 'Series' object has no attribute 'applymap'
|
AttributeError
|
def _has_bool_dtype(x):
try:
if isinstance(x, ABCDataFrame):
return "bool" in x.dtypes
else:
return x.dtype == bool
except AttributeError:
return isinstance(x, (bool, np.bool_))
|
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return "bool" in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
|
https://github.com/pandas-dev/pandas/issues/22383
|
In [8]: df.loc[:, ['a','dtype']].ne(df.loc[:, ['a', 'dtype']])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-a0710f18822f> in <module>()
----> 1 df.loc[:, ['a','dtype']].ne(df.loc[:, ['a', 'dtype']])
/home/blistein/env/aan/local/lib/python2.7/site-packages/pandas/core/ops.pyc in f(self, other, axis, level)
1588 self, other = self.align(other, 'outer',
1589 level=level, copy=False)
-> 1590 return self._compare_frame(other, na_op, str_rep)
1591
1592 elif isinstance(other, ABCSeries):
/home/blistein/env/aan/local/lib/python2.7/site-packages/pandas/core/frame.pyc in _compare_frame(self, other, func, str_rep)
4790 return {col: func(a[col], b[col]) for col in a.columns}
4791
-> 4792 new_data = expressions.evaluate(_compare, str_rep, self, other)
4793 return self._constructor(data=new_data, index=self.index,
4794 columns=self.columns, copy=False)
/home/blistein/env/aan/local/lib/python2.7/site-packages/pandas/core/computation/expressions.pyc in evaluate(op, op_str, a, b, use_numexpr, **eval_kwargs)
201 """
202
--> 203 use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
204 if use_numexpr:
205 return _evaluate(op, op_str, a, b, **eval_kwargs)
/home/blistein/env/aan/local/lib/python2.7/site-packages/pandas/core/computation/expressions.pyc in _bool_arith_check(op_str, a, b, not_allowed, unsupported)
173 unsupported = {'+': '|', '*': '&', '-': '^'}
174
--> 175 if _has_bool_dtype(a) and _has_bool_dtype(b):
176 if op_str in unsupported:
177 warnings.warn("evaluating in Python space because the {op!r} "
/home/blistein/env/aan/local/lib/python2.7/site-packages/pandas/core/generic.pyc in __nonzero__(self)
1574 raise ValueError("The truth value of a {0} is ambiguous. "
1575 "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
-> 1576 .format(self.__class__.__name__))
1577
1578 __bool__ = __nonzero__
ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
from pandas import Index
from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, (PeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)):
# convert tz if needed
if getattr(other, "tz", None) is not None:
if to_utc:
other = other.tz_convert("UTC")
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view("i8")
except TypeError:
# period array cannot be coerced to int
other = Index(other)
return other.asi8
|
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
from pandas import Index
from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, (PeriodArray, ABCIndexClass)):
# convert tz if needed
if getattr(other, "tz", None) is not None:
if to_utc:
other = other.tz_convert("UTC")
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view("i8")
except TypeError:
# period array cannot be coerced to int
other = Index(other)
return other.asi8
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _dt_array_cmp(cls, op):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
def wrapper(self, other):
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
other = lib.item_from_zerodim(other)
if isinstance(other, (datetime, np.datetime64, compat.string_types)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_m8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = op(self.asi8, other.view("i8"))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other) or np.ndim(other) == 0:
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
try:
other = type(self)._from_sequence(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(
other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArrayMixin)
):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
result = op(self.astype("O"), np.array(other))
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if not hasattr(other, "asi8"):
# ndarray, Series
other = type(self)(other)
result = meth(self, other)
o_mask = other._isnan
result = com.values_from_object(result)
# Make sure to pass an array to result[...]; indexing with
# Series breaks with older version of numpy
o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
def _dt_array_cmp(cls, op):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
def wrapper(self, other):
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
if isinstance(other, (datetime, np.datetime64, compat.string_types)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_m8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = op(self.asi8, other.view("i8"))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other):
return ops.invalid_comparison(self, other, op)
else:
if isinstance(other, list):
try:
other = type(self)._from_sequence(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(
other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArrayMixin)
):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
result = op(self.astype("O"), np.array(other))
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if not hasattr(other, "asi8"):
# ndarray, Series
other = type(self)(other)
result = meth(self, other)
o_mask = other._isnan
result = com.values_from_object(result)
# Make sure to pass an array to result[...]; indexing with
# Series breaks with older version of numpy
o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def wrapper(self, other):
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
other = lib.item_from_zerodim(other)
if isinstance(other, (datetime, np.datetime64, compat.string_types)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_m8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = op(self.asi8, other.view("i8"))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other) or np.ndim(other) == 0:
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
try:
other = type(self)._from_sequence(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(
other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArrayMixin)
):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
result = op(self.astype("O"), np.array(other))
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if not hasattr(other, "asi8"):
# ndarray, Series
other = type(self)(other)
result = meth(self, other)
o_mask = other._isnan
result = com.values_from_object(result)
# Make sure to pass an array to result[...]; indexing with
# Series breaks with older version of numpy
o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
|
def wrapper(self, other):
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
if isinstance(other, (datetime, np.datetime64, compat.string_types)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_m8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = op(self.asi8, other.view("i8"))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other):
return ops.invalid_comparison(self, other, op)
else:
if isinstance(other, list):
try:
other = type(self)._from_sequence(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(
other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArrayMixin)
):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
result = op(self.astype("O"), np.array(other))
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if not hasattr(other, "asi8"):
# ndarray, Series
other = type(self)(other)
result = meth(self, other)
o_mask = other._isnan
result = com.values_from_object(result)
# Make sure to pass an array to result[...]; indexing with
# Series breaks with older version of numpy
o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _period_array_cmp(cls, op):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
def wrapper(self, other):
op = getattr(self.asi8, opname)
# We want to eventually defer to the Series or PeriodIndex (which will
# return here with an unboxed PeriodArray). But before we do that,
# we do a bit of validation on type (Period) and freq, so that our
# error messages are sensible
if is_list_like(other) and len(other) != len(self):
raise ValueError("Lengths must match")
not_implemented = isinstance(other, (ABCSeries, ABCIndexClass))
if not_implemented:
other = other._values
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
if not_implemented:
return NotImplemented
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
def _period_array_cmp(cls, op):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
def wrapper(self, other):
op = getattr(self.asi8, opname)
# We want to eventually defer to the Series or PeriodIndex (which will
# return here with an unboxed PeriodArray). But before we do that,
# we do a bit of validation on type (Period) and freq, so that our
# error messages are sensible
not_implemented = isinstance(other, (ABCSeries, ABCIndexClass))
if not_implemented:
other = other._values
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
if not_implemented:
return NotImplemented
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def wrapper(self, other):
op = getattr(self.asi8, opname)
# We want to eventually defer to the Series or PeriodIndex (which will
# return here with an unboxed PeriodArray). But before we do that,
# we do a bit of validation on type (Period) and freq, so that our
# error messages are sensible
if is_list_like(other) and len(other) != len(self):
raise ValueError("Lengths must match")
not_implemented = isinstance(other, (ABCSeries, ABCIndexClass))
if not_implemented:
other = other._values
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
if not_implemented:
return NotImplemented
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
|
def wrapper(self, other):
op = getattr(self.asi8, opname)
# We want to eventually defer to the Series or PeriodIndex (which will
# return here with an unboxed PeriodArray). But before we do that,
# we do a bit of validation on type (Period) and freq, so that our
# error messages are sensible
not_implemented = isinstance(other, (ABCSeries, ABCIndexClass))
if not_implemented:
other = other._values
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
if not_implemented:
return NotImplemented
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _td_array_cmp(cls, op):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
def wrapper(self, other):
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
result = com.values_from_object(result)
o_mask = np.array(isna(other))
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
def _td_array_cmp(cls, op):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
opname = "__{name}__".format(name=op.__name__)
nat_result = True if opname == "__ne__" else False
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
def wrapper(self, other):
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return ops.invalid_comparison(self, other, op)
else:
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
result = com.values_from_object(result)
o_mask = np.array(isna(other))
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def wrapper(self, other):
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
result = com.values_from_object(result)
o_mask = np.array(isna(other))
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
|
def wrapper(self, other):
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return ops.invalid_comparison(self, other, op)
else:
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
result = com.values_from_object(result)
o_mask = np.array(isna(other))
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _add_numeric_methods_unary(cls):
"""
Add in numeric unary methods.
"""
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
_evaluate_numeric_unary.__name__ = opstr
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(operator.neg, "__neg__")
cls.__pos__ = _make_evaluate_unary(operator.pos, "__pos__")
cls.__abs__ = _make_evaluate_unary(np.abs, "__abs__")
cls.__inv__ = _make_evaluate_unary(lambda x: -x, "__inv__")
|
def _add_numeric_methods_unary(cls):
"""
Add in numeric unary methods.
"""
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(operator.neg, "__neg__")
cls.__pos__ = _make_evaluate_unary(operator.pos, "__pos__")
cls.__abs__ = _make_evaluate_unary(np.abs, "__abs__")
cls.__inv__ = _make_evaluate_unary(lambda x: -x, "__inv__")
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
_evaluate_numeric_unary.__name__ = opstr
return _evaluate_numeric_unary
|
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def add_special_arithmetic_methods(cls):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
cls : class
special methods will be defined and pinned to this class
"""
_, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)
new_methods = _create_methods(
cls, arith_method, comp_method, bool_method, special=True
)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False)._data, verify_is_copy=False
)
return self
f.__name__ = "__i{name}__".format(name=method.__name__.strip("__"))
return f
new_methods.update(
dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
__imod__=_wrap_inplace_method(new_methods["__mod__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
)
)
if not compat.PY3:
new_methods["__idiv__"] = _wrap_inplace_method(new_methods["__div__"])
new_methods.update(
dict(
__iand__=_wrap_inplace_method(new_methods["__and__"]),
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"]),
)
)
add_methods(cls, new_methods=new_methods)
|
def add_special_arithmetic_methods(cls):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
cls : class
special methods will be defined and pinned to this class
"""
_, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)
new_methods = _create_methods(
cls, arith_method, comp_method, bool_method, special=True
)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False)._data, verify_is_copy=False
)
return self
return f
new_methods.update(
dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
__imod__=_wrap_inplace_method(new_methods["__mod__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
)
)
if not compat.PY3:
new_methods["__idiv__"] = _wrap_inplace_method(new_methods["__div__"])
new_methods.update(
dict(
__iand__=_wrap_inplace_method(new_methods["__and__"]),
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"]),
)
)
add_methods(cls, new_methods=new_methods)
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False)._data, verify_is_copy=False
)
return self
f.__name__ = "__i{name}__".format(name=method.__name__.strip("__"))
return f
|
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False)._data, verify_is_copy=False
)
return self
return f
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
fill_zeros = _gen_fill_zeros(op_name)
construct_result = (
_construct_divmod_result if op in [divmod, rdivmod] else _construct_result
)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
"""
return the result of evaluating na_op on the passed in values
try coercion to object type if the native types are not compatible
Parameters
----------
lvalues : array-like
rvalues : array-like
Raises
------
TypeError: invalid operation
"""
try:
with np.errstate(all="ignore"):
return na_op(lvalues, rvalues)
except Exception:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues, lambda x: op(x, rvalues))
raise
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
right = maybe_upcast_for_op(right)
if is_categorical_dtype(left):
raise TypeError(
"{typ} cannot perform the operation {op}".format(
typ=type(left).__name__, op=str_rep
)
)
elif is_extension_array_dtype(left) or (
is_extension_array_dtype(right) and not is_scalar(right)
):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return dispatch_to_extension_op(op, left, right)
elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(
left, result, index=left.index, name=res_name, dtype=result.dtype
)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
return construct_result(left, result, index=left.index, name=res_name)
elif is_timedelta64_dtype(right):
# We should only get here with non-scalar or timedelta64('NaT')
# values for right
# Note: we cannot use dispatch_to_index_op because
# that may incorrectly raise TypeError when we
# should get NullFrequencyError
result = op(pd.Index(left), right)
return construct_result(
left, result, index=left.index, name=res_name, dtype=result.dtype
)
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
rvalues = rvalues.values
result = safe_na_op(lvalues, rvalues)
return construct_result(
left, result, index=left.index, name=res_name, dtype=None
)
wrapper.__name__ = op_name
return wrapper
|
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
fill_zeros = _gen_fill_zeros(op_name)
construct_result = (
_construct_divmod_result if op in [divmod, rdivmod] else _construct_result
)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
"""
return the result of evaluating na_op on the passed in values
try coercion to object type if the native types are not compatible
Parameters
----------
lvalues : array-like
rvalues : array-like
Raises
------
TypeError: invalid operation
"""
try:
with np.errstate(all="ignore"):
return na_op(lvalues, rvalues)
except Exception:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues, lambda x: op(x, rvalues))
raise
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
right = maybe_upcast_for_op(right)
if is_categorical_dtype(left):
raise TypeError(
"{typ} cannot perform the operation {op}".format(
typ=type(left).__name__, op=str_rep
)
)
elif is_extension_array_dtype(left) or (
is_extension_array_dtype(right) and not is_scalar(right)
):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return dispatch_to_extension_op(op, left, right)
elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(
left, result, index=left.index, name=res_name, dtype=result.dtype
)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
return construct_result(left, result, index=left.index, name=res_name)
elif is_timedelta64_dtype(right):
# We should only get here with non-scalar or timedelta64('NaT')
# values for right
# Note: we cannot use dispatch_to_index_op because
# that may incorrectly raise TypeError when we
# should get NullFrequencyError
result = op(pd.Index(left), right)
return construct_result(
left, result, index=left.index, name=res_name, dtype=result.dtype
)
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
rvalues = rvalues.values
result = safe_na_op(lvalues, rvalues)
return construct_result(
left, result, index=left.index, name=res_name, dtype=None
)
return wrapper
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
masker = _gen_eval_kwargs(op_name).get("masker", False)
def na_op(x, y):
# TODO:
# should have guarantess on what x, y can be type-wise
# Extension Dtypes are not called here
# Checking that cases that were once handled here are no longer
# reachable.
assert not (is_categorical_dtype(y) and not is_scalar(y))
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
elif is_datetimelike_v_numeric(x, y):
return invalid_comparison(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
# we have a datetime/timedelta and may need to convert
assert not needs_i8_conversion(x)
mask = None
if not is_scalar(y) and needs_i8_conversion(y):
mask = isna(x) | isna(y)
y = y.view("i8")
x = x.view("i8")
method = getattr(x, op_name, None)
if method is not None:
with np.errstate(all="ignore"):
result = method(y)
if result is NotImplemented:
return invalid_comparison(x, y, op)
else:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
res_name = get_op_result_name(self, other)
if isinstance(other, list):
# TODO: same for tuples?
other = np.asarray(other)
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled Series objects")
elif is_categorical_dtype(self):
# Dispatch to Categorical implementation; pd.CategoricalIndex
# behavior is non-canonical GH#19513
res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
if isinstance(other, datetime.date) and not isinstance(
other, datetime.datetime
):
# https://github.com/pandas-dev/pandas/issues/21152
# Compatibility for difference between Series comparison w/
# datetime and date
msg = (
"Comparing Series of datetimes with 'datetime.date'. "
"Currently, the 'datetime.date' is coerced to a "
"datetime. In the future pandas will not coerce, "
"and {future}. "
"To retain the current behavior, "
"convert the 'datetime.date' to a datetime with "
"'pd.Timestamp'."
)
if op in {operator.lt, operator.le, operator.gt, operator.ge}:
future = "a TypeError will be raised"
else:
future = "'the values will not compare equal to the 'datetime.date'"
msg = "\n".join(textwrap.wrap(msg.format(future=future)))
warnings.warn(msg, FutureWarning, stacklevel=2)
other = pd.Timestamp(other)
res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_timedelta64_dtype(self):
res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_extension_array_dtype(self) or (
is_extension_array_dtype(other) and not is_scalar(other)
):
# Note: the `not is_scalar(other)` condition rules out
# e.g. other == "category"
return dispatch_to_extension_op(op, self, other)
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
# rename is needed in case res_name is None and res_values.name
# is not.
return self._constructor(
res_values, index=self.index, name=res_name
).rename(res_name)
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if other.ndim != 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
res_values = na_op(self.values, np.asarray(other))
result = self._constructor(res_values, index=self.index)
# rename is needed in case res_name is None and self.name
# is not.
return result.__finalize__(self).rename(res_name)
elif is_scalar(other) and isna(other):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(len(self), dtype=bool)
else:
res_values = np.zeros(len(self), dtype=bool)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
else:
values = self.get_values()
with np.errstate(all="ignore"):
res = na_op(values, other)
if is_scalar(res):
raise TypeError(
"Could not compare {typ} type with Series".format(typ=type(other))
)
# always return a full value series here
res_values = com.values_from_object(res)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
wrapper.__name__ = op_name
return wrapper
|
def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
masker = _gen_eval_kwargs(op_name).get("masker", False)
def na_op(x, y):
# TODO:
# should have guarantess on what x, y can be type-wise
# Extension Dtypes are not called here
# Checking that cases that were once handled here are no longer
# reachable.
assert not (is_categorical_dtype(y) and not is_scalar(y))
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
elif is_datetimelike_v_numeric(x, y):
return invalid_comparison(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
# we have a datetime/timedelta and may need to convert
assert not needs_i8_conversion(x)
mask = None
if not is_scalar(y) and needs_i8_conversion(y):
mask = isna(x) | isna(y)
y = y.view("i8")
x = x.view("i8")
method = getattr(x, op_name, None)
if method is not None:
with np.errstate(all="ignore"):
result = method(y)
if result is NotImplemented:
return invalid_comparison(x, y, op)
else:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
res_name = get_op_result_name(self, other)
if isinstance(other, list):
# TODO: same for tuples?
other = np.asarray(other)
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled Series objects")
elif is_categorical_dtype(self):
# Dispatch to Categorical implementation; pd.CategoricalIndex
# behavior is non-canonical GH#19513
res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
if isinstance(other, datetime.date) and not isinstance(
other, datetime.datetime
):
# https://github.com/pandas-dev/pandas/issues/21152
# Compatibility for difference between Series comparison w/
# datetime and date
msg = (
"Comparing Series of datetimes with 'datetime.date'. "
"Currently, the 'datetime.date' is coerced to a "
"datetime. In the future pandas will not coerce, "
"and {future}. "
"To retain the current behavior, "
"convert the 'datetime.date' to a datetime with "
"'pd.Timestamp'."
)
if op in {operator.lt, operator.le, operator.gt, operator.ge}:
future = "a TypeError will be raised"
else:
future = "'the values will not compare equal to the 'datetime.date'"
msg = "\n".join(textwrap.wrap(msg.format(future=future)))
warnings.warn(msg, FutureWarning, stacklevel=2)
other = pd.Timestamp(other)
res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_timedelta64_dtype(self):
res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_extension_array_dtype(self) or (
is_extension_array_dtype(other) and not is_scalar(other)
):
# Note: the `not is_scalar(other)` condition rules out
# e.g. other == "category"
return dispatch_to_extension_op(op, self, other)
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
# rename is needed in case res_name is None and res_values.name
# is not.
return self._constructor(
res_values, index=self.index, name=res_name
).rename(res_name)
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if other.ndim != 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
res_values = na_op(self.values, np.asarray(other))
result = self._constructor(res_values, index=self.index)
# rename is needed in case res_name is None and self.name
# is not.
return result.__finalize__(self).rename(res_name)
elif is_scalar(other) and isna(other):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(len(self), dtype=bool)
else:
res_values = np.zeros(len(self), dtype=bool)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
else:
values = self.get_values()
with np.errstate(all="ignore"):
res = na_op(values, other)
if is_scalar(res):
raise TypeError(
"Could not compare {typ} type with Series".format(typ=type(other))
)
# always return a full value series here
res_values = com.values_from_object(res)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
return wrapper
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to {0}".format(str(converter)))
wrapper.__name__ = "__{name}__".format(name=converter.__name__)
return wrapper
|
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to {0}".format(str(converter)))
return wrapper
|
https://github.com/pandas-dev/pandas/issues/23078
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-8a2a887e6efd> in <module>
----> 1 idx <= idx[[0]]
~/sandbox/pandas/pandas/core/indexes/datetimes.py in wrapper(self, other)
90
91 def wrapper(self, other):
---> 92 result = getattr(DatetimeArrayMixin, opname)(self, other)
93 if is_bool_dtype(result):
94 return result
~/sandbox/pandas/pandas/core/arrays/datetimes.py in wrapper(self, other)
133 else:
134 self._assert_tzawareness_compat(other)
--> 135 result = meth(self, np.asarray(other))
136
137 result = com.values_from_object(result)
~/sandbox/pandas/pandas/core/arrays/datetimelike.py in cmp_method(self, other)
51 if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)):
52 if other.ndim > 0 and len(self) != len(other):
---> 53 raise ValueError('Lengths must match to compare')
54
55 if needs_i8_conversion(self) and needs_i8_conversion(other):
ValueError: Lengths must match to compare
|
ValueError
|
def _factorize_keys(lk, rk, sort=True):
# Some pre-processing for non-ndarray lk / rk
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
elif (
is_categorical_dtype(lk) and is_categorical_dtype(rk) and lk.is_dtype_equal(rk)
):
if lk.categories.equals(rk.categories):
# if we exactly match in categories, allow us to factorize on codes
rk = rk.codes
else:
# Same categories in different orders -> recode
rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk)
elif (
is_extension_array_dtype(lk.dtype)
and is_extension_array_dtype(rk.dtype)
and lk.dtype == rk.dtype
):
lk, _ = lk._values_for_factorize()
rk, _ = rk._values_for_factorize()
if is_integer_dtype(lk) and is_integer_dtype(rk):
# GH#23917 TODO: needs tests for case where lk is integer-dtype
# and rk is datetime-dtype
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
elif issubclass(lk.dtype.type, (np.timedelta64, np.datetime64)) and issubclass(
rk.dtype.type, (np.timedelta64, np.datetime64)
):
# GH#23917 TODO: Needs tests for non-matching dtypes
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
|
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to factorize on codes
if is_categorical_dtype(lk) and is_categorical_dtype(rk) and lk.is_dtype_equal(rk):
klass = libhashtable.Int64Factorizer
if lk.categories.equals(rk.categories):
rk = rk.codes
else:
# Same categories in different orders -> recode
rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk)
elif is_integer_dtype(lk) and is_integer_dtype(rk):
# GH#23917 TODO: needs tests for case where lk is integer-dtype
# and rk is datetime-dtype
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
elif issubclass(lk.dtype.type, (np.timedelta64, np.datetime64)) and issubclass(
rk.dtype.type, (np.timedelta64, np.datetime64)
):
# GH#23917 TODO: Needs tests for non-matching dtypes
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
|
https://github.com/pandas-dev/pandas/issues/23020
|
In [4]: pd.merge(df, df, on='A').dtypes
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-e77608ca3973> in <module>()
----> 1 pd.merge(df, df, on='A').dtypes
~/pandas/pandas/core/reshape/merge.py in merge(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)
61 copy=copy, indicator=indicator,
62 validate=validate)
---> 63 return op.get_result()
64
65
~/pandas/pandas/core/reshape/merge.py in get_result(self)
562 self.left, self.right)
563
--> 564 join_index, left_indexer, right_indexer = self._get_join_info()
565
566 ldata, rdata = self.left._data, self.right._data
~/pandas/pandas/core/reshape/merge.py in _get_join_info(self)
771 else:
772 (left_indexer,
--> 773 right_indexer) = self._get_join_indexers()
774
775 if self.right_index:
~/pandas/pandas/core/reshape/merge.py in _get_join_indexers(self)
750 self.right_join_keys,
751 sort=self.sort,
--> 752 how=self.how)
753
754 def _get_join_info(self):
~/pandas/pandas/core/reshape/merge.py in _get_join_indexers(left_keys, right_keys, sort, how, **kwargs)
1120
1121 # get left & right join labels and num. of levels at each location
-> 1122 llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
1123
1124 # get flat i8 keys from label lists
~/pandas/pandas/core/reshape/merge.py in _factorize_keys(lk, rk, sort)
1554 elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
1555 klass = libhashtable.Int64Factorizer
-> 1556 lk = ensure_int64(com.values_from_object(lk))
1557 rk = ensure_int64(com.values_from_object(rk))
1558 else: 1557 rk = ensure_int64(com.values_from_object(rk))
1558 else:
~/pandas/pandas/_libs/algos_common_helper.pxi in pandas._libs.algos.ensure_int64()
ValueError: cannot convert float NaN to integer
|
ValueError
|
def _simple_new(cls, values, name=None, freq=None, **kwargs):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray, PeriodIndex, Index[int64], ndarray[int64]
Values that can be converted to a PeriodArray without inference
or coercion.
"""
# TODO: raising on floats is tested, but maybe not useful.
# Should the callers know not to pass floats?
# At the very least, I think we can ensure that lists aren't passed.
if isinstance(values, list):
values = np.asarray(values)
if is_float_dtype(values):
raise TypeError("PeriodIndex._simple_new does not accept floats.")
if freq:
freq = Period._maybe_convert_freq(freq)
values = PeriodArray(values, freq=freq)
if not isinstance(values, PeriodArray):
raise TypeError("PeriodIndex._simple_new only accepts PeriodArray")
result = object.__new__(cls)
result._data = values
result.name = name
result._reset_identity()
return result
|
def _simple_new(cls, values, name=None, freq=None, **kwargs):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray, PeriodIndex, Index[int64], ndarray[int64]
Values that can be converted to a PeriodArray without inference
or coercion.
"""
# TODO: raising on floats is tested, but maybe not useful.
# Should the callers know not to pass floats?
# At the very least, I think we can ensure that lists aren't passed.
if isinstance(values, list):
values = np.asarray(values)
if is_float_dtype(values):
raise TypeError("PeriodIndex._simple_new does not accept floats.")
values = PeriodArray(values, freq=freq)
if not isinstance(values, PeriodArray):
raise TypeError("PeriodIndex._simple_new only accepts PeriodArray")
result = object.__new__(cls)
result._data = values
result.name = name
result._reset_identity()
return result
|
https://github.com/pandas-dev/pandas/issues/24135
|
In [18]: s = pd.Series(np.random.rand(5), index=pd.date_range('20130101', periods=5))
In [19]: s
Out[19]:
2013-01-01 0.130706
2013-01-02 0.232104
2013-01-03 0.506547
2013-01-04 0.155568
2013-01-05 0.873604
Freq: D, dtype: float64
In [20]: s.to_msgpack('test.msg')
In [22]: s2 = pd.read_msgpack('test.msg')
In [23]: s2
Out[23]: ---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/scipy/pandas/pandas/core/base.py in __repr__(self)
75 Yields Bytestring in Py2, Unicode String in py3.
76 """
---> 77 return str(self)
78
79
~/scipy/pandas/pandas/core/base.py in __str__(self)
54
55 if compat.PY3:
---> 56 return self.__unicode__()
57 return self.__bytes__()
58
~/scipy/pandas/pandas/core/series.py in __unicode__(self)
1272
1273 self.to_string(buf=buf, name=self.name, dtype=self.dtype,
-> 1274 max_rows=max_rows, length=show_dimensions)
1275 result = buf.getvalue()
1276
~/scipy/pandas/pandas/core/series.py in to_string(self, buf, na_rep, float_format, header, index, length, dtype, name, max_rows)
1316 float_format=float_format,
1317 max_rows=max_rows)
-> 1318 result = formatter.to_string()
1319
1320 # catch contract violations
~/scipy/pandas/pandas/io/formats/format.py in to_string(self)
258 def to_string(self):
259 series = self.tr_series
--> 260 footer = self._get_footer()
261
262 if len(series) == 0:
~/scipy/pandas/pandas/io/formats/format.py in _get_footer(self)
205
206 if getattr(self.series.index, 'freq', None) is not None:
--> 207 footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
208
209 if self.name is not False and name is not None:
~/scipy/pandas/pandas/core/arrays/datetimelike.py in freqstr(self)
502 if self.freq is None:
503 return None
--> 504 return self.freq.freqstr
505
506 @property # NB: override with cache_readonly in immutable subclasses
AttributeError: 'str' object has no attribute 'freqstr'
|
AttributeError
|
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get("typ")
if typ is None:
return obj
elif typ == "timestamp":
freq = obj["freq"] if "freq" in obj else obj["offset"]
return Timestamp(obj["value"], tz=obj["tz"], freq=freq)
elif typ == "nat":
return NaT
elif typ == "period":
return Period(ordinal=obj["ordinal"], freq=obj["freq"])
elif typ == "index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
return globals()[obj["klass"]](data, dtype=dtype, name=obj["name"])
elif typ == "range_index":
return globals()[obj["klass"]](
obj["start"], obj["stop"], obj["step"], name=obj["name"]
)
elif typ == "multi_index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
data = [tuple(x) for x in data]
return globals()[obj["klass"]].from_tuples(data, names=obj["names"])
elif typ == "period_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
freq = d.pop("freq", None)
return globals()[obj["klass"]](PeriodArray(data, freq), **d)
elif typ == "datetime_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
result = DatetimeIndex(data, **d)
tz = obj["tz"]
# reverse tz conversion
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
elif typ in ("interval_index", "interval_array"):
return globals()[obj["klass"]].from_arrays(
obj["left"], obj["right"], obj["closed"], name=obj["name"]
)
elif typ == "category":
from_codes = globals()[obj["klass"]].from_codes
return from_codes(
codes=obj["codes"], categories=obj["categories"], ordered=obj["ordered"]
)
elif typ == "interval":
return Interval(obj["left"], obj["right"], obj["closed"])
elif typ == "series":
dtype = dtype_for(obj["dtype"])
pd_dtype = pandas_dtype(dtype)
index = obj["index"]
result = globals()[obj["klass"]](
unconvert(obj["data"], dtype, obj["compress"]),
index=index,
dtype=pd_dtype,
name=obj["name"],
)
return result
elif typ == "block_manager":
axes = obj["axes"]
def create_block(b):
values = _safe_reshape(
unconvert(b["values"], dtype_for(b["dtype"]), b["compress"]), b["shape"]
)
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if "locs" in b:
placement = b["locs"]
else:
placement = axes[0].get_indexer(b["items"])
return make_block(
values=values,
klass=getattr(internals, b["klass"]),
placement=placement,
dtype=b["dtype"],
)
blocks = [create_block(b) for b in obj["blocks"]]
return globals()[obj["klass"]](BlockManager(blocks, axes))
elif typ == "datetime":
return parse(obj["data"])
elif typ == "datetime64":
return np.datetime64(parse(obj["data"]))
elif typ == "date":
return parse(obj["data"]).date()
elif typ == "timedelta":
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
return globals()[obj["klass"]](obj["length"], obj["indices"])
elif typ == "ndarray":
return unconvert(
obj["data"], np.typeDict[obj["dtype"]], obj.get("compress")
).reshape(obj["shape"])
elif typ == "np_scalar":
if obj.get("sub_typ") == "np_complex":
return c2f(obj["real"], obj["imag"], obj["dtype"])
else:
dtype = dtype_for(obj["dtype"])
try:
return dtype(obj["data"])
except (ValueError, TypeError):
return dtype.type(obj["data"])
elif typ == "np_complex":
return complex(obj["real"] + "+" + obj["imag"] + "j")
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
|
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get("typ")
if typ is None:
return obj
elif typ == "timestamp":
freq = obj["freq"] if "freq" in obj else obj["offset"]
return Timestamp(obj["value"], tz=obj["tz"], freq=freq)
elif typ == "nat":
return NaT
elif typ == "period":
return Period(ordinal=obj["ordinal"], freq=obj["freq"])
elif typ == "index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
return globals()[obj["klass"]](data, dtype=dtype, name=obj["name"])
elif typ == "range_index":
return globals()[obj["klass"]](
obj["start"], obj["stop"], obj["step"], name=obj["name"]
)
elif typ == "multi_index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
data = [tuple(x) for x in data]
return globals()[obj["klass"]].from_tuples(data, names=obj["names"])
elif typ == "period_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
freq = d.pop("freq", None)
return globals()[obj["klass"]](PeriodArray(data, freq), **d)
elif typ == "datetime_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
result = DatetimeIndex._simple_new(data, **d)
tz = obj["tz"]
# reverse tz conversion
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
elif typ in ("interval_index", "interval_array"):
return globals()[obj["klass"]].from_arrays(
obj["left"], obj["right"], obj["closed"], name=obj["name"]
)
elif typ == "category":
from_codes = globals()[obj["klass"]].from_codes
return from_codes(
codes=obj["codes"], categories=obj["categories"], ordered=obj["ordered"]
)
elif typ == "interval":
return Interval(obj["left"], obj["right"], obj["closed"])
elif typ == "series":
dtype = dtype_for(obj["dtype"])
pd_dtype = pandas_dtype(dtype)
index = obj["index"]
result = globals()[obj["klass"]](
unconvert(obj["data"], dtype, obj["compress"]),
index=index,
dtype=pd_dtype,
name=obj["name"],
)
return result
elif typ == "block_manager":
axes = obj["axes"]
def create_block(b):
values = _safe_reshape(
unconvert(b["values"], dtype_for(b["dtype"]), b["compress"]), b["shape"]
)
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if "locs" in b:
placement = b["locs"]
else:
placement = axes[0].get_indexer(b["items"])
return make_block(
values=values,
klass=getattr(internals, b["klass"]),
placement=placement,
dtype=b["dtype"],
)
blocks = [create_block(b) for b in obj["blocks"]]
return globals()[obj["klass"]](BlockManager(blocks, axes))
elif typ == "datetime":
return parse(obj["data"])
elif typ == "datetime64":
return np.datetime64(parse(obj["data"]))
elif typ == "date":
return parse(obj["data"]).date()
elif typ == "timedelta":
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
return globals()[obj["klass"]](obj["length"], obj["indices"])
elif typ == "ndarray":
return unconvert(
obj["data"], np.typeDict[obj["dtype"]], obj.get("compress")
).reshape(obj["shape"])
elif typ == "np_scalar":
if obj.get("sub_typ") == "np_complex":
return c2f(obj["real"], obj["imag"], obj["dtype"])
else:
dtype = dtype_for(obj["dtype"])
try:
return dtype(obj["data"])
except (ValueError, TypeError):
return dtype.type(obj["data"])
elif typ == "np_complex":
return complex(obj["real"] + "+" + obj["imag"] + "j")
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
|
https://github.com/pandas-dev/pandas/issues/24135
|
In [18]: s = pd.Series(np.random.rand(5), index=pd.date_range('20130101', periods=5))
In [19]: s
Out[19]:
2013-01-01 0.130706
2013-01-02 0.232104
2013-01-03 0.506547
2013-01-04 0.155568
2013-01-05 0.873604
Freq: D, dtype: float64
In [20]: s.to_msgpack('test.msg')
In [22]: s2 = pd.read_msgpack('test.msg')
In [23]: s2
Out[23]: ---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
~/miniconda3/envs/dev/lib/python3.5/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/scipy/pandas/pandas/core/base.py in __repr__(self)
75 Yields Bytestring in Py2, Unicode String in py3.
76 """
---> 77 return str(self)
78
79
~/scipy/pandas/pandas/core/base.py in __str__(self)
54
55 if compat.PY3:
---> 56 return self.__unicode__()
57 return self.__bytes__()
58
~/scipy/pandas/pandas/core/series.py in __unicode__(self)
1272
1273 self.to_string(buf=buf, name=self.name, dtype=self.dtype,
-> 1274 max_rows=max_rows, length=show_dimensions)
1275 result = buf.getvalue()
1276
~/scipy/pandas/pandas/core/series.py in to_string(self, buf, na_rep, float_format, header, index, length, dtype, name, max_rows)
1316 float_format=float_format,
1317 max_rows=max_rows)
-> 1318 result = formatter.to_string()
1319
1320 # catch contract violations
~/scipy/pandas/pandas/io/formats/format.py in to_string(self)
258 def to_string(self):
259 series = self.tr_series
--> 260 footer = self._get_footer()
261
262 if len(series) == 0:
~/scipy/pandas/pandas/io/formats/format.py in _get_footer(self)
205
206 if getattr(self.series.index, 'freq', None) is not None:
--> 207 footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
208
209 if self.name is not False and name is not None:
~/scipy/pandas/pandas/core/arrays/datetimelike.py in freqstr(self)
502 if self.freq is None:
503 return None
--> 504 return self.freq.freqstr
505
506 @property # NB: override with cache_readonly in immutable subclasses
AttributeError: 'str' object has no attribute 'freqstr'
|
AttributeError
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
sep=".",
):
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(y)] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name {name}, need distinguishing prefix ".format(
name=k
)
)
result[k] = np.array(v).repeat(lengths)
return result
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
sep=".",
):
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(y)] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name {name}, need distinguishing prefix ".format(
name=k
)
)
result[k] = np.array(v).repeat(lengths)
return result
|
https://github.com/pandas-dev/pandas/issues/22706
|
Traceback (most recent call last):
File ".\test.py", line 15, in <module>
json_normalize(d, record_path = ["info", "phones"])
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 262, in json_normalize
_recursive_extract(data, record_path, {}, level=0)
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 235, in _recursive_extract
seen_meta, level=level + 1)
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 238, in _recursive_extract
recs = _pull_field(obj, path[0])
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 185, in _pull_field
result = result[spec]
TypeError: string indices must be integers
|
TypeError
|
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
|
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
|
https://github.com/pandas-dev/pandas/issues/22706
|
Traceback (most recent call last):
File ".\test.py", line 15, in <module>
json_normalize(d, record_path = ["info", "phones"])
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 262, in json_normalize
_recursive_extract(data, record_path, {}, level=0)
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 235, in _recursive_extract
seen_meta, level=level + 1)
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 238, in _recursive_extract
recs = _pull_field(obj, path[0])
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 185, in _pull_field
result = result[spec]
TypeError: string indices must be integers
|
TypeError
|
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index, columns=columns)
return result
|
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index, columns=columns)
return result
|
https://github.com/pandas-dev/pandas/issues/23556
|
import pandas as pd
pd.Index(['a', 'b', 'aa'], dtype='category').str.replace('a', 'c')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\ProgramData\Miniconda3\envs\pandas-dev\lib\site-packages\pandas\core\strings.py", line 2430, in replace
return self._wrap_result(result)
File "C:\ProgramData\Miniconda3\envs\pandas-dev\lib\site-packages\pandas\core\strings.py", line 1964, in _wrap_result
result = take_1d(result, self._orig.cat.codes)
AttributeError: 'CategoricalIndex' object has no attribute 'cat'
|
AttributeError
|
def _wrap_result(
self, result, use_codes=True, name=None, expand=None, fill_value=np.nan
):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(
result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
|
def _wrap_result(
self, result, use_codes=True, name=None, expand=None, fill_value=np.nan
):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes, fill_value=fill_value)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
|
https://github.com/pandas-dev/pandas/issues/23556
|
import pandas as pd
pd.Index(['a', 'b', 'aa'], dtype='category').str.replace('a', 'c')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\ProgramData\Miniconda3\envs\pandas-dev\lib\site-packages\pandas\core\strings.py", line 2430, in replace
return self._wrap_result(result)
File "C:\ProgramData\Miniconda3\envs\pandas-dev\lib\site-packages\pandas\core\strings.py", line 1964, in _wrap_result
result = take_1d(result, self._orig.cat.codes)
AttributeError: 'CategoricalIndex' object has no attribute 'cat'
|
AttributeError
|
def _print_as_set(s):
return "{" + "{arg}".format(arg=", ".join(pprint_thing(el) for el in s)) + "}"
|
def _print_as_set(s):
return "{{arg}}".format(arg=", ".join(pprint_thing(el) for el in s))
|
https://github.com/pandas-dev/pandas/issues/23549
|
import pandas as pd
df_list = pd.read_html('https://google.com', flavor='unknown')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/achabot/envs/tmp-b79b7181308bb9c1/lib/python3.7/site-packages/pandas/io/html.py", line 987, in read_html
displayed_only=displayed_only)
File "/Users/achabot/envs/tmp-b79b7181308bb9c1/lib/python3.7/site-packages/pandas/io/html.py", line 787, in _parse
flavor = _validate_flavor(flavor)
File "/Users/achabot/envs/tmp-b79b7181308bb9c1/lib/python3.7/site-packages/pandas/io/html.py", line 782, in _validate_flavor
valid=_print_as_set(valid_flavors)))
ValueError: {arg} is not a valid set of flavors, valid flavors are {arg}
|
ValueError
|
def searchsorted(self, value, side="left", sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
try:
value = Period(value, freq=self.freq).ordinal
except DateParseError:
raise KeyError("Cannot interpret '{}' as period".format(value))
return self._ndarray_values.searchsorted(value, side=side, sorter=sorter)
|
def searchsorted(self, value, side="left", sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._ndarray_values.searchsorted(value, side=side, sorter=sorter)
|
https://github.com/pandas-dev/pandas/issues/22803
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item()
TypeError: an integer is required
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance)
881 try:
--> 882 return self._engine.get_loc(key)
883 except KeyError:
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
KeyError: '__next__'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso()
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.dateutil_parse()
ValueError: Unknown datetime string format, unable to parse: __next__
During handling of the above exception, another exception occurred:
DateParseError Traceback (most recent call last)
<ipython-input-9-81179fad4d42> in <module>()
3 index = pandas.MultiIndex.from_tuples(tuples)
4 s = pandas.Series([1.0], index=index)
----> 5 print(s[~s.isnull()])
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/series.py in __getitem__(self, key)
802 raise
803
--> 804 if is_iterator(key):
805 key = list(key)
806
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/dtypes/inference.py in is_iterator(obj)
153 # Python 3 generators have
154 # __next__ instead of next
--> 155 return hasattr(obj, '__next__')
156
157
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name)
4372 return object.__getattribute__(self, name)
4373 else:
-> 4374 if self._info_axis._can_hold_identifiers_and_holds_name(name):
4375 return self[name]
4376 return object.__getattribute__(self, name)
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/base.py in _can_hold_identifiers_and_holds_name(self, name)
2109 """
2110 if self.is_object() or self.is_categorical():
-> 2111 return name in self
2112 return False
2113
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in __contains__(self, key)
547 hash(key)
548 try:
--> 549 self.get_loc(key)
550 return True
551 except (LookupError, TypeError):
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in get_loc(self, key, method)
2235
2236 if not isinstance(key, tuple):
-> 2237 loc = self._get_level_indexer(key, level=0)
2238
2239 # _get_level_indexer returns an empty slice if the key has
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in _get_level_indexer(self, key, level, indexer)
2494 else:
2495
-> 2496 loc = level_index.get_loc(key)
2497 if isinstance(loc, slice):
2498 return loc
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance)
886
887 try:
--> 888 asdt, parsed, reso = parse_time_string(key, self.freq)
889 key = asdt
890 except TypeError:
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_time_string()
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso()
DateParseError: Unknown datetime string format, unable to parse: __next__
|
TypeError
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
except DateParseError:
# A string with invalid format
raise KeyError("Cannot interpret '{}' as period".format(key))
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
|
https://github.com/pandas-dev/pandas/issues/22803
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item()
TypeError: an integer is required
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance)
881 try:
--> 882 return self._engine.get_loc(key)
883 except KeyError:
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
KeyError: '__next__'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso()
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.dateutil_parse()
ValueError: Unknown datetime string format, unable to parse: __next__
During handling of the above exception, another exception occurred:
DateParseError Traceback (most recent call last)
<ipython-input-9-81179fad4d42> in <module>()
3 index = pandas.MultiIndex.from_tuples(tuples)
4 s = pandas.Series([1.0], index=index)
----> 5 print(s[~s.isnull()])
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/series.py in __getitem__(self, key)
802 raise
803
--> 804 if is_iterator(key):
805 key = list(key)
806
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/dtypes/inference.py in is_iterator(obj)
153 # Python 3 generators have
154 # __next__ instead of next
--> 155 return hasattr(obj, '__next__')
156
157
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name)
4372 return object.__getattribute__(self, name)
4373 else:
-> 4374 if self._info_axis._can_hold_identifiers_and_holds_name(name):
4375 return self[name]
4376 return object.__getattribute__(self, name)
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/base.py in _can_hold_identifiers_and_holds_name(self, name)
2109 """
2110 if self.is_object() or self.is_categorical():
-> 2111 return name in self
2112 return False
2113
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in __contains__(self, key)
547 hash(key)
548 try:
--> 549 self.get_loc(key)
550 return True
551 except (LookupError, TypeError):
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in get_loc(self, key, method)
2235
2236 if not isinstance(key, tuple):
-> 2237 loc = self._get_level_indexer(key, level=0)
2238
2239 # _get_level_indexer returns an empty slice if the key has
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/multi.py in _get_level_indexer(self, key, level, indexer)
2494 else:
2495
-> 2496 loc = level_index.get_loc(key)
2497 if isinstance(loc, slice):
2498 return loc
~/.virtualenvs/shackleton3/lib/python3.7/site-packages/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance)
886
887 try:
--> 888 asdt, parsed, reso = parse_time_string(key, self.freq)
889 key = asdt
890 except TypeError:
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_time_string()
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso()
DateParseError: Unknown datetime string format, unable to parse: __next__
|
TypeError
|
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if is_scalar(key):
key = [key]
elif not isinstance(key, (list, Series, np.ndarray)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == "integer":
if self.index.inferred_type == "integer":
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == "boolean":
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
|
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == "integer":
if self.index.inferred_type == "integer":
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == "boolean":
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
|
https://github.com/pandas-dev/pandas/issues/23451
|
import pandas
x = pandas.Series([1,2,3], index=['Date','b','other'])
x
Date 1
b 2
other 3
dtype: int64
from datetime import date
x.Date = date.today()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python37\lib\site-packages\pandas\core\generic.py", line 4405, in __setattr__
self[name] = value
File "C:\Python37\lib\site-packages\pandas\core\series.py", line 939, in __setitem__
setitem(key, value)
File "C:\Python37\lib\site-packages\pandas\core\series.py", line 935, in setitem
self._set_with(key, value)
File "C:\Python37\lib\site-packages\pandas\core\series.py", line 983, in _set_with
self._set_labels(key, value)
File "C:\Python37\lib\site-packages\pandas\core\series.py", line 993, in _set_labels
raise ValueError('%s not contained in the index' % str(key[mask]))
ValueError: ['D' 'a' 't' 'e'] not contained in the index
x.b = date.today()
x.b
datetime.date(2018, 11, 1)
x
Date 1
b 2018-11-01
other 3
dtype: object
|
ValueError
|
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = (
Series(data.ravel()).str.encode(encoding, errors).values.reshape(data.shape)
)
# create the sized dtype
if itemsize is None:
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
|
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = (
Series(data.ravel()).str.encode(encoding, errors).values.reshape(data.shape)
)
# create the sized dtype
if itemsize is None:
ensured = ensure_object(data.ravel())
itemsize = libwriters.max_len_string_array(ensured)
data = np.asarray(data, dtype="S%d" % itemsize)
return data
|
https://github.com/pandas-dev/pandas/issues/12242
|
In [3]: store = pd.HDFStore('teststore.h5', 'w')
In [4]: chunk = pd.DataFrame({'V1':['a','b','c','d','e'], 'data':np.arange(5)})
In [5]: store.append('df', chunk, min_itemsize={'V1': 4})
In [6]: chunk = pd.DataFrame({'V1':['', ''], 'data': [3, 5]})
In [7]: store.append('df', chunk, min_itemsize={'V1': 4})
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-c9bafa18ead0> in <module>()
----> 1 store.append('df', chunk, min_itemsize={'V1': 4})
/Users/amcpherson/Anaconda/lib/python2.7/site-packages/pandas/io/pytables.pyc in append(self, key, value, format, append, columns, dropna, **kwargs)
905 kwargs = self._validate_format(format, kwargs)
906 self._write_to_group(key, value, append=append, dropna=dropna,
--> 907 **kwargs)
908
909 def append_to_multiple(self, d, value, selector, data_columns=None,
/Users/amcpherson/Anaconda/lib/python2.7/site-packages/pandas/io/pytables.pyc in _write_to_group(self, key, value, format, index, append, complib, encoding, **kwargs)
1250
1251 # write the object
-> 1252 s.write(obj=value, append=append, complib=complib, **kwargs)
1253
1254 if s.is_table and index:
/Users/amcpherson/Anaconda/lib/python2.7/site-packages/pandas/io/pytables.pyc in write(self, obj, axes, append, complib, complevel, fletcher32, min_itemsize, chunksize, expectedrows, dropna, **kwargs)
3755 self.create_axes(axes=axes, obj=obj, validate=append,
3756 min_itemsize=min_itemsize,
-> 3757 **kwargs)
3758
3759 for a in self.axes:
/Users/amcpherson/Anaconda/lib/python2.7/site-packages/pandas/io/pytables.pyc in create_axes(self, axes, obj, validate, nan_rep, data_columns, min_itemsize, **kwargs)
3432 self.values_axes.append(col)
3433 except (NotImplementedError, ValueError, TypeError) as e:
-> 3434 raise e
3435 except Exception as detail:
3436 raise Exception(
ValueError: Trying to store a string with len [8] in [V1] column but
this column has a limit of [4]!
Consider using min_itemsize to preset the sizes on these columns
|
ValueError
|
def astype(self, dtype, copy=True, errors="raise", **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True.
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'.
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
raise_on_error : raise on invalid input
.. deprecated:: 0.20.0
Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> ser.astype('category', ordered=True, categories=[2, 1])
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
See also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
"astype() only accepts a dtype arg of type dict when "
"invoked on Series and DataFrames. A single dtype must be "
"specified when invoked on a Panel."
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099: columnwise conversion to categorical
# and extension dtype
results = (self[col].astype(dtype, copy=copy) for col in self)
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
|
def astype(self, dtype, copy=True, errors="raise", **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True.
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'.
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
raise_on_error : raise on invalid input
.. deprecated:: 0.20.0
Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> ser.astype('category', ordered=True, categories=[2, 1])
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
See also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
"astype() only accepts a dtype arg of type dict when "
"invoked on Series and DataFrames. A single dtype must be "
"specified when invoked on a Panel."
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
elif is_categorical_dtype(dtype) and self.ndim > 1:
# GH 18099: columnwise conversion to categorical
results = (self[col].astype(dtype, copy=copy) for col in self)
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
|
https://github.com/pandas-dev/pandas/issues/22578
|
In [8]: df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
In [9]: df
Out[9]:
a b
0 1.0 2.0
1 3.0 4.0
2 5.0 6.0
In [10]: df.astype('Int64')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-b9d2763e69d8> in <module>()
----> 1 df.astype('Int64')
~/scipy/pandas/pandas/util/_decorators.py in wrapper(*args, **kwargs)
175 else:
176 kwargs[new_arg_name] = new_arg_value
--> 177 return func(*args, **kwargs)
178 return wrapper
179 return _deprecate_kwarg
~/scipy/pandas/pandas/core/generic.py in astype(self, dtype, copy, errors, **kwargs)
5162 # else, only a single dtype is given
5163 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5164 **kwargs)
5165 return self._constructor(new_data).__finalize__(self)
5166
~/scipy/pandas/pandas/core/internals/managers.py in astype(self, dtype, **kwargs)
554
555 def astype(self, dtype, **kwargs):
--> 556 return self.apply('astype', dtype=dtype, **kwargs)
557
558 def convert(self, **kwargs):
~/scipy/pandas/pandas/core/internals/managers.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
421
422 kwargs['mgr'] = self
--> 423 applied = getattr(b, f)(**kwargs)
424 result_blocks = _extend_blocks(applied, result_blocks)
425
~/scipy/pandas/pandas/core/internals/blocks.py in astype(self, dtype, copy, errors, values, **kwargs)
562 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
563 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 564 **kwargs)
565
566 def _astype(self, dtype, copy=False, errors='raise', values=None,
~/scipy/pandas/pandas/core/internals/blocks.py in _astype(self, dtype, copy, errors, values, klass, mgr, **kwargs)
679 "current ({newb_dtype} [{newb_size}])".format(
680 copy=copy, dtype=self.dtype.name,
--> 681 itemsize=self.itemsize, newb_dtype=newb.dtype.name,
682 newb_size=newb.itemsize))
683 return newb
AttributeError: 'FloatBlock' object has no attribute 'itemsize'
|
AttributeError
|
def _astype(
self, dtype, copy=False, errors="raise", values=None, klass=None, mgr=None, **kwargs
):
"""Coerce to the new type
Parameters
----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(
dtype, (PandasExtensionDtype, ExtensionDtype)
):
msg = (
"Expected an instance of {}, but got the class instead. "
"Try instantiating 'dtype'.".format(dtype.__name__)
)
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
# deprecated 17636
if "categories" in kwargs or "ordered" in kwargs:
if isinstance(dtype, CategoricalDtype):
raise TypeError(
"Cannot specify a CategoricalDtype and also "
"`categories` or `ordered`. Use "
"`dtype=CategoricalDtype(categories, ordered)`"
" instead."
)
warnings.warn(
"specifying 'categories' or 'ordered' in "
".astype() is deprecated; pass a "
"CategoricalDtype instead",
FutureWarning,
stacklevel=7,
)
categories = kwargs.get("categories", None)
ordered = kwargs.get("ordered", None)
if com._any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
# convert dtypes if needed
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if self.is_extension:
values = self.values.astype(dtype)
else:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
# TODO(extension)
# should we make this attribute?
try:
values = values.reshape(self.shape)
except AttributeError:
pass
newb = make_block(values, placement=self.mgr_locs, klass=klass, ndim=self.ndim)
except Exception: # noqa: E722
if errors == "raise":
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [{copy}] for dtype "
"({dtype} [{shape}]) to different shape "
"({newb_dtype} [{newb_shape}])".format(
copy=copy,
dtype=self.dtype.name,
shape=self.shape,
newb_dtype=newb.dtype.name,
newb_shape=newb.shape,
)
)
return newb
|
def _astype(
self, dtype, copy=False, errors="raise", values=None, klass=None, mgr=None, **kwargs
):
"""Coerce to the new type
Parameters
----------
dtype : str, dtype convertible
copy : boolean, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(
dtype, (PandasExtensionDtype, ExtensionDtype)
):
msg = (
"Expected an instance of {}, but got the class instead. "
"Try instantiating 'dtype'.".format(dtype.__name__)
)
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
# deprecated 17636
if "categories" in kwargs or "ordered" in kwargs:
if isinstance(dtype, CategoricalDtype):
raise TypeError(
"Cannot specify a CategoricalDtype and also "
"`categories` or `ordered`. Use "
"`dtype=CategoricalDtype(categories, ordered)`"
" instead."
)
warnings.warn(
"specifying 'categories' or 'ordered' in "
".astype() is deprecated; pass a "
"CategoricalDtype instead",
FutureWarning,
stacklevel=7,
)
categories = kwargs.get("categories", None)
ordered = kwargs.get("ordered", None)
if com._any_not_none(categories, ordered):
dtype = CategoricalDtype(categories, ordered)
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
# convert dtypes if needed
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if self.is_extension:
values = self.values.astype(dtype)
else:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
# TODO(extension)
# should we make this attribute?
try:
values = values.reshape(self.shape)
except AttributeError:
pass
newb = make_block(values, placement=self.mgr_locs, klass=klass, ndim=self.ndim)
except Exception: # noqa: E722
if errors == "raise":
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [{copy}] for dtype "
"({dtype} [{itemsize}]) with smaller itemsize than "
"current ({newb_dtype} [{newb_size}])".format(
copy=copy,
dtype=self.dtype.name,
itemsize=self.itemsize,
newb_dtype=newb.dtype.name,
newb_size=newb.itemsize,
)
)
return newb
|
https://github.com/pandas-dev/pandas/issues/22578
|
In [8]: df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
In [9]: df
Out[9]:
a b
0 1.0 2.0
1 3.0 4.0
2 5.0 6.0
In [10]: df.astype('Int64')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-b9d2763e69d8> in <module>()
----> 1 df.astype('Int64')
~/scipy/pandas/pandas/util/_decorators.py in wrapper(*args, **kwargs)
175 else:
176 kwargs[new_arg_name] = new_arg_value
--> 177 return func(*args, **kwargs)
178 return wrapper
179 return _deprecate_kwarg
~/scipy/pandas/pandas/core/generic.py in astype(self, dtype, copy, errors, **kwargs)
5162 # else, only a single dtype is given
5163 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5164 **kwargs)
5165 return self._constructor(new_data).__finalize__(self)
5166
~/scipy/pandas/pandas/core/internals/managers.py in astype(self, dtype, **kwargs)
554
555 def astype(self, dtype, **kwargs):
--> 556 return self.apply('astype', dtype=dtype, **kwargs)
557
558 def convert(self, **kwargs):
~/scipy/pandas/pandas/core/internals/managers.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
421
422 kwargs['mgr'] = self
--> 423 applied = getattr(b, f)(**kwargs)
424 result_blocks = _extend_blocks(applied, result_blocks)
425
~/scipy/pandas/pandas/core/internals/blocks.py in astype(self, dtype, copy, errors, values, **kwargs)
562 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
563 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 564 **kwargs)
565
566 def _astype(self, dtype, copy=False, errors='raise', values=None,
~/scipy/pandas/pandas/core/internals/blocks.py in _astype(self, dtype, copy, errors, values, klass, mgr, **kwargs)
679 "current ({newb_dtype} [{newb_size}])".format(
680 copy=copy, dtype=self.dtype.name,
--> 681 itemsize=self.itemsize, newb_dtype=newb.dtype.name,
682 newb_size=newb.itemsize))
683 return newb
AttributeError: 'FloatBlock' object has no attribute 'itemsize'
|
AttributeError
|
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False) or is_datetimetz(
empty_dtype
):
if self.block is None:
array = empty_dtype.construct_array_type()
missing_arr = array([fill_value], dtype=empty_dtype)
return missing_arr.repeat(self.shape[1])
pass
elif getattr(self.block, "is_categorical", False):
pass
elif getattr(self.block, "is_sparse", False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
|
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False) or is_datetimetz(
empty_dtype
):
pass
elif getattr(self.block, "is_categorical", False):
pass
elif getattr(self.block, "is_sparse", False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
|
https://github.com/pandas-dev/pandas/issues/22796
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-42-457226d62f27> in <module>()
1 a = pd.DataFrame([[1, 2]], dtype='datetime64[ns, UTC]')
2 b = pd.DataFrame([[3]], dtype='datetime64[ns, UTC]')
----> 3 pd.concat([a, b])
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, join_axes, ignore_index, keys, levels, names, verify_integrity, sort, copy)
224 verify_integrity=verify_integrity,
225 copy=copy, sort=sort)
--> 226 return op.get_result()
227
228
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/reshape/concat.py in get_result(self)
421 new_data = concatenate_block_managers(
422 mgrs_indexers, self.new_axes, concat_axis=self.axis,
--> 423 copy=self.copy)
424 if not self.copy:
425 new_data._consolidate_inplace()
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/internals.py in concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy)
5419 else:
5420 b = make_block(
-> 5421 concatenate_join_units(join_units, concat_axis, copy=copy),
5422 placement=placement)
5423 blocks.append(b)
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/internals.py in concatenate_join_units(join_units, concat_axis, copy)
5563 to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
5564 upcasted_na=upcasted_na)
-> 5565 for ju in join_units]
5566
5567 if len(to_concat) == 1:
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/internals.py in <listcomp>(.0)
5563 to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
5564 upcasted_na=upcasted_na)
-> 5565 for ju in join_units]
5566
5567 if len(to_concat) == 1:
~/.pyenv/versions/3.6.2/envs/general/lib/python3.6/site-packages/pandas/core/internals.py in get_reindexed_values(self, empty_dtype, upcasted_na)
5849
5850 if not self.indexers:
-> 5851 if not self.block._can_consolidate:
5852 # preserve these for validation in _concat_compat
5853 return self.block.values
AttributeError: 'NoneType' object has no attribute '_can_consolidate'
|
AttributeError
|
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# TODO(jreback)
# we need to listify to avoid ndarray, or non-same-type extension array
# dispatching
if is_extension_array_dtype(left):
new_left = left.values
if isinstance(right, np.ndarray):
# handle numpy scalars, this is a PITA
# TODO(jreback)
new_right = lib.item_from_zerodim(right)
if is_scalar(new_right):
new_right = [new_right]
new_right = list(new_right)
elif is_extension_array_dtype(right) and type(left) != type(right):
new_right = list(right)
else:
new_right = right
else:
new_left = list(left.values)
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ == "divmod":
return _construct_divmod_result(left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name)
|
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# TODO(jreback)
# we need to listify to avoid ndarray, or non-same-type extension array
# dispatching
if is_extension_array_dtype(left):
new_left = left.values
if isinstance(right, np.ndarray):
# handle numpy scalars, this is a PITA
# TODO(jreback)
new_right = lib.item_from_zerodim(right)
if is_scalar(new_right):
new_right = [new_right]
new_right = list(new_right)
elif is_extension_array_dtype(right) and type(left) != type(right):
new_right = list(new_right)
else:
new_right = right
else:
new_left = list(left.values)
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ == "divmod":
return _construct_divmod_result(left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name)
|
https://github.com/pandas-dev/pandas/issues/22478
|
In [41]: s
Out[41]:
0 1
1 2
2 3
dtype: Int64
In [42]: s.values
Out[42]: IntegerArray([1, 2, 3], dtype='Int64')
In [43]: s + s.values
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-43-c3f015376225> in <module>()
----> 1 s + s.values
C:\Users\Public\pandas-peter\pandas\core\ops.py in wrapper(left, right)
1231 (is_extension_array_dtype(right) and not is_scalar(right
))):
1232 # GH#22378 disallow scalar to exclude e.g. "category", "Int6
4"
-> 1233 return dispatch_to_extension_op(op, left, right)
1234
1235 elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
C:\Users\Public\pandas-peter\pandas\core\ops.py in dispatch_to_extension_op(op,
left, right)
1152 new_right = list(new_right)
1153 elif is_extension_array_dtype(right) and type(left) != type(righ
t):
-> 1154 new_right = list(new_right)
1155 else:
1156 new_right = right
UnboundLocalError: local variable 'new_right' referenced before assignment
|
UnboundLocalError
|
def _ensure_localized(self, arg, ambiguous="raise", from_utc=False):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
arg : DatetimeIndex / i8 ndarray
ambiguous : str, bool, or bool-ndarray, default 'raise'
from_utc : bool, default False
If True, localize the i8 ndarray to UTC first before converting to
the appropriate tz. If False, localize directly to the tz.
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, "tz", None) is not None:
if not isinstance(arg, ABCIndexClass):
arg = self._simple_new(arg)
if from_utc:
arg = arg.tz_localize("UTC").tz_convert(self.tz)
else:
arg = arg.tz_localize(self.tz, ambiguous=ambiguous)
return arg
|
def _ensure_localized(self, result, ambiguous="raise"):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
ambiguous : str, bool, or bool-ndarray
default 'raise'
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, "tz", None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz, ambiguous=ambiguous)
return result
|
https://github.com/pandas-dev/pandas/issues/18885
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 58, in merge
return op.get_result()
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 604, in get_result
self._maybe_add_join_keys(result, left_indexer, right_indexer)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 715, in _maybe_add_join_keys
key_col = Index(lvals).where(~mask, rvals)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 809, in where
result = self._ensure_localized(result)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)//venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other, to_utc=True)
values = _ensure_datetimelike_to_i8(self, to_utc=True)
result = np.where(cond, values, other).astype("i8")
result = self._ensure_localized(result, from_utc=True)
return self._shallow_copy(result, **self._get_attributes_dict())
|
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other)
values = _ensure_datetimelike_to_i8(self)
result = np.where(cond, values, other).astype("i8")
result = self._ensure_localized(result)
return self._shallow_copy(result, **self._get_attributes_dict())
|
https://github.com/pandas-dev/pandas/issues/18885
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 58, in merge
return op.get_result()
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 604, in get_result
self._maybe_add_join_keys(result, left_indexer, right_indexer)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 715, in _maybe_add_join_keys
key_col = Index(lvals).where(~mask, rvals)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 809, in where
result = self._ensure_localized(result)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)//venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
helper for coercing an input scalar or array to i8
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
if is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, "tz", None) is not None:
if to_utc:
other = other.tz_convert("UTC")
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view("i8")
except TypeError:
# period array cannot be coerces to int
other = Index(other)
return other.asi8
|
def _ensure_datetimelike_to_i8(other):
"""helper for coercing an input scalar or array to i8"""
if is_scalar(other) and isna(other):
other = iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, "tz", None) is not None:
other = other.tz_localize(None).asi8
else:
other = other.asi8
else:
try:
other = np.array(other, copy=False).view("i8")
except TypeError:
# period array cannot be coerces to int
other = Index(other).asi8
return other
|
https://github.com/pandas-dev/pandas/issues/18885
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 58, in merge
return op.get_result()
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 604, in get_result
self._maybe_add_join_keys(result, left_indexer, right_indexer)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/reshape/merge.py", line 715, in _maybe_add_join_keys
key_col = Index(lvals).where(~mask, rvals)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 809, in where
result = self._ensure_localized(result)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)//venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)//venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(
"Column(s) {selection} already selected".format(selection=self._selection)
)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(
"Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
)
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
|
def __getitem__(self, key):
if self._selection is not None:
raise Exception(
"Column(s) {selection} already selected".format(selection=self._selection)
)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(
"Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
)
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
|
https://github.com/pandas-dev/pandas/issues/15072
|
In [13]: df = pd.DataFrame({"A": pd.to_datetime(['2015', '2017']), "B": [1, 1]})
In [14]: df
Out[14]:
A B
0 2015-01-01 1
1 2017-01-01 1
In [15]: df.set_index("A").groupby([0, 0]).resample("AS")
Out[15]: DatetimeIndexResamplerGroupby [freq=<YearBegin: month=1>, axis=0, closed=left, label=left, convention=e, base=0]
In [16]: df.set_index("A").groupby([0, 0]).resample("AS").agg(['sum', 'count'])
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-16-5f1c18a8d4ac> in <module>()
----> 1 df.set_index("A").groupby([0, 0]).resample("AS").agg(['sum', 'count'])
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/resample.py in aggregate(self, arg, *args, **kwargs)
339
340 self._set_binner()
--> 341 result, how = self._aggregate(arg, *args, **kwargs)
342 if result is None:
343 result = self._groupby_and_aggregate(arg,
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs)
538 return self._aggregate_multiple_funcs(arg,
539 _level=_level,
--> 540 _axis=_axis), None
541 else:
542 result = None
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate_multiple_funcs(self, arg, _level, _axis)
583 try:
584 colg = self._gotitem(col, ndim=1, subset=obj[col])
--> 585 results.append(colg.aggregate(arg))
586 keys.append(col)
587 except (TypeError, DataError):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/resample.py in aggregate(self, arg, *args, **kwargs)
339
340 self._set_binner()
--> 341 result, how = self._aggregate(arg, *args, **kwargs)
342 if result is None:
343 result = self._groupby_and_aggregate(arg,
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs)
538 return self._aggregate_multiple_funcs(arg,
539 _level=_level,
--> 540 _axis=_axis), None
541 else:
542 result = None
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate_multiple_funcs(self, arg, _level, _axis)
582 for col in obj:
583 try:
--> 584 colg = self._gotitem(col, ndim=1, subset=obj[col])
585 results.append(colg.aggregate(arg))
586 keys.append(col)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _gotitem(self, key, ndim, subset)
675 for attr in self._attributes])
676 self = self.__class__(subset,
--> 677 groupby=self._groupby[key],
678 parent=self,
679 **kwargs)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in __getitem__(self, key)
241 if self._selection is not None:
242 raise Exception('Column(s) {selection} already selected'
--> 243 .format(selection=self._selection))
244
245 if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
Exception: Column(s) B already selected
|
Exception
|
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
# Try to select from a DataFrame, falling back to a Series
try:
groupby = self._groupby[key]
except IndexError:
groupby = self._groupby
self = self.__class__(subset, groupby=groupby, parent=self, **kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
|
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
self = self.__class__(subset, groupby=self._groupby[key], parent=self, **kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
|
https://github.com/pandas-dev/pandas/issues/15072
|
In [13]: df = pd.DataFrame({"A": pd.to_datetime(['2015', '2017']), "B": [1, 1]})
In [14]: df
Out[14]:
A B
0 2015-01-01 1
1 2017-01-01 1
In [15]: df.set_index("A").groupby([0, 0]).resample("AS")
Out[15]: DatetimeIndexResamplerGroupby [freq=<YearBegin: month=1>, axis=0, closed=left, label=left, convention=e, base=0]
In [16]: df.set_index("A").groupby([0, 0]).resample("AS").agg(['sum', 'count'])
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-16-5f1c18a8d4ac> in <module>()
----> 1 df.set_index("A").groupby([0, 0]).resample("AS").agg(['sum', 'count'])
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/resample.py in aggregate(self, arg, *args, **kwargs)
339
340 self._set_binner()
--> 341 result, how = self._aggregate(arg, *args, **kwargs)
342 if result is None:
343 result = self._groupby_and_aggregate(arg,
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs)
538 return self._aggregate_multiple_funcs(arg,
539 _level=_level,
--> 540 _axis=_axis), None
541 else:
542 result = None
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate_multiple_funcs(self, arg, _level, _axis)
583 try:
584 colg = self._gotitem(col, ndim=1, subset=obj[col])
--> 585 results.append(colg.aggregate(arg))
586 keys.append(col)
587 except (TypeError, DataError):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/resample.py in aggregate(self, arg, *args, **kwargs)
339
340 self._set_binner()
--> 341 result, how = self._aggregate(arg, *args, **kwargs)
342 if result is None:
343 result = self._groupby_and_aggregate(arg,
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs)
538 return self._aggregate_multiple_funcs(arg,
539 _level=_level,
--> 540 _axis=_axis), None
541 else:
542 result = None
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _aggregate_multiple_funcs(self, arg, _level, _axis)
582 for col in obj:
583 try:
--> 584 colg = self._gotitem(col, ndim=1, subset=obj[col])
585 results.append(colg.aggregate(arg))
586 keys.append(col)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in _gotitem(self, key, ndim, subset)
675 for attr in self._attributes])
676 self = self.__class__(subset,
--> 677 groupby=self._groupby[key],
678 parent=self,
679 **kwargs)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/base.py in __getitem__(self, key)
241 if self._selection is not None:
242 raise Exception('Column(s) {selection} already selected'
--> 243 .format(selection=self._selection))
244
245 if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
Exception: Column(s) B already selected
|
Exception
|
def _round(self, freq, rounder, ambiguous):
# round the local times
values = _ensure_datetimelike_to_i8(self)
result = round_ns(values, rounder, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if "freq" in attribs:
attribs["freq"] = None
if "tz" in attribs:
attribs["tz"] = None
return self._ensure_localized(self._shallow_copy(result, **attribs), ambiguous)
|
def _round(self, freq, rounder):
# round the local times
values = _ensure_datetimelike_to_i8(self)
result = round_ns(values, rounder, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if "freq" in attribs:
attribs["freq"] = None
if "tz" in attribs:
attribs["tz"] = None
return self._ensure_localized(self._shallow_copy(result, **attribs))
|
https://github.com/pandas-dev/pandas/issues/18946
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/accessor.py", line 115, in f
return self._delegate_method(name, *args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py", line 131, in _delegate_method
result = method(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 118, in floor
return self._round(freq, np.floor)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 110, in _round
self._shallow_copy(result, **attribs))
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)/venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def round(self, freq, ambiguous="raise"):
return self._round(freq, np.round, ambiguous)
|
def round(self, freq, *args, **kwargs):
return self._round(freq, np.round)
|
https://github.com/pandas-dev/pandas/issues/18946
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/accessor.py", line 115, in f
return self._delegate_method(name, *args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py", line 131, in _delegate_method
result = method(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 118, in floor
return self._round(freq, np.floor)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 110, in _round
self._shallow_copy(result, **attribs))
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)/venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def floor(self, freq, ambiguous="raise"):
return self._round(freq, np.floor, ambiguous)
|
def floor(self, freq):
return self._round(freq, np.floor)
|
https://github.com/pandas-dev/pandas/issues/18946
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/accessor.py", line 115, in f
return self._delegate_method(name, *args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py", line 131, in _delegate_method
result = method(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 118, in floor
return self._round(freq, np.floor)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 110, in _round
self._shallow_copy(result, **attribs))
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)/venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def ceil(self, freq, ambiguous="raise"):
return self._round(freq, np.ceil, ambiguous)
|
def ceil(self, freq):
return self._round(freq, np.ceil)
|
https://github.com/pandas-dev/pandas/issues/18946
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/accessor.py", line 115, in f
return self._delegate_method(name, *args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py", line 131, in _delegate_method
result = method(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 118, in floor
return self._round(freq, np.floor)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 110, in _round
self._shallow_copy(result, **attribs))
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)/venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def _ensure_localized(self, result, ambiguous="raise"):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
ambiguous : str, bool, or bool-ndarray
default 'raise'
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, "tz", None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz, ambiguous=ambiguous)
return result
|
def _ensure_localized(self, result):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, "tz", None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz)
return result
|
https://github.com/pandas-dev/pandas/issues/18946
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "(...)/venv/lib/python3.6/site-packages/pandas/core/accessor.py", line 115, in f
return self._delegate_method(name, *args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py", line 131, in _delegate_method
result = method(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 118, in floor
return self._round(freq, np.floor)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 110, in _round
self._shallow_copy(result, **attribs))
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 230, in _ensure_localized
result = result.tz_localize(self.tz)
File "(...)/venv/lib/python3.6/site-packages/pandas/util/_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "(...)/venv/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py", line 1858, in tz_localize
errors=errors)
File "pandas/_libs/tslib.pyx", line 3593, in pandas._libs.tslib.tz_localize_to_utc
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2017-10-29 02:00:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(ax).__name__
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_range_edges(
ax.min(), ax.max(), self.freq, closed=self.closed, base=self.base
)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(
freq=self.freq, start=first, end=last, tz=tz, name=ax.name
)
# GH 15549
# In edge case of tz-aware resapmling binner last index can be
# less than the last variable in data object, this happens because of
# DST time change
if len(binner) > 1 and binner[-1] < last:
extra_date_range = pd.date_range(
binner[-1], last + self.freq, freq=self.freq, tz=tz, name=ax.name
)
binner = labels = binner.append(extra_date_range[1:])
# a little hack
trimmed = False
if len(binner) > 2 and binner[-2] == last and self.closed == "right":
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
|
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError(
"axis must be a DatetimeIndex, but got "
"an instance of %r" % type(ax).__name__
)
if len(ax) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(
first, last, self.freq, closed=self.closed, base=self.base
)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(
freq=self.freq, start=first, end=last, tz=tz, name=ax.name
)
# GH 15549
# In edge case of tz-aware resapmling binner last index can be
# less than the last variable in data object, this happens because of
# DST time change
if len(binner) > 1 and binner[-1] < last:
extra_date_range = pd.date_range(
binner[-1], last + self.freq, freq=self.freq, tz=tz, name=ax.name
)
binner = labels = binner.append(extra_date_range[1:])
# a little hack
trimmed = False
if len(binner) > 2 and binner[-2] == last and self.closed == "right":
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == "right":
labels = binner
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == "right":
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[: len(bins)]
return binner, bins, labels
|
https://github.com/pandas-dev/pandas/issues/10117
|
In [27]: idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
In [28]: series = pd.Series(np.random.randn(len(idx)), index=idx)
In [31]: series
Out[31]:
2014-10-25 22:00:00+01:00 -0.874014
2014-10-25 22:30:00+01:00 1.316258
2014-10-25 23:00:00+01:00 -1.334616
2014-10-25 23:30:00+01:00 -1.200390
2014-10-26 00:00:00+01:00 -0.341764
2014-10-26 00:30:00+01:00 1.509091
Freq: 30T, dtype: float64
In [29]: series.resample('30T')
---------------------------------------------------------------------------
AmbiguousTimeError Traceback (most recent call last)
<ipython-input-29-bb9e86068ce1> in <module>()
----> 1 series.resample('30T')
/usr/local/lib/python2.7/dist-packages/pandas/core/generic.pyc in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
3195 fill_method=fill_method, convention=convention,
3196 limit=limit, base=base)
-> 3197 return sampler.resample(self).__finalize__(self)
3198
3199 def first(self, offset):
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in resample(self, obj)
83
84 if isinstance(ax, DatetimeIndex):
---> 85 rs = self._resample_timestamps()
86 elif isinstance(ax, PeriodIndex):
87 offset = to_offset(self.freq)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _resample_timestamps(self, kind)
273 axlabels = self.ax
274
--> 275 self._get_binner_for_resample(kind=kind)
276 grouper = self.grouper
277 binner = self.binner
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_binner_for_resample(self, kind)
121 kind = self.kind
122 if kind is None or kind == 'timestamp':
--> 123 self.binner, bins, binlabels = self._get_time_bins(ax)
124 elif kind == 'timedelta':
125 self.binner, bins, binlabels = self._get_time_delta_bins(ax)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_time_bins(self, ax)
162 first, last = ax.min(), ax.max()
163 first, last = _get_range_edges(first, last, self.freq, closed=self.closed,
--> 164 base=self.base)
165 tz = ax.tz
166 binner = labels = DatetimeIndex(freq=self.freq,
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_range_edges(first, last, offset, closed, base)
392 if (is_day and day_nanos % offset.nanos == 0) or not is_day:
393 return _adjust_dates_anchored(first, last, offset,
--> 394 closed=closed, base=base)
395
396 if not isinstance(offset, Tick): # and first.time() != last.time():
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _adjust_dates_anchored(first, last, offset, closed, base)
459
460 return (Timestamp(fresult).tz_localize(first_tzinfo),
--> 461 Timestamp(lresult).tz_localize(first_tzinfo))
462
463
pandas/tslib.pyx in pandas.tslib.Timestamp.tz_localize (pandas/tslib.c:10535)()
pandas/tslib.pyx in pandas.tslib.tz_localize_to_utc (pandas/tslib.c:50297)()
AmbiguousTimeError: Cannot infer dst time from Timestamp('2014-10-26 01:00:00'),
try using the 'ambiguous' argument
|
AmbiguousTimeError
|
def _get_range_edges(first, last, offset, closed="left", base=0):
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset, closed=closed, base=base)
else:
first = first.normalize()
last = last.normalize()
if closed == "left":
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
|
def _get_range_edges(first, last, offset, closed="left", base=0):
if isinstance(offset, compat.string_types):
offset = to_offset(offset)
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset, closed=closed, base=base)
if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = first.normalize()
last = last.normalize()
if closed == "left":
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
|
https://github.com/pandas-dev/pandas/issues/10117
|
In [27]: idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
In [28]: series = pd.Series(np.random.randn(len(idx)), index=idx)
In [31]: series
Out[31]:
2014-10-25 22:00:00+01:00 -0.874014
2014-10-25 22:30:00+01:00 1.316258
2014-10-25 23:00:00+01:00 -1.334616
2014-10-25 23:30:00+01:00 -1.200390
2014-10-26 00:00:00+01:00 -0.341764
2014-10-26 00:30:00+01:00 1.509091
Freq: 30T, dtype: float64
In [29]: series.resample('30T')
---------------------------------------------------------------------------
AmbiguousTimeError Traceback (most recent call last)
<ipython-input-29-bb9e86068ce1> in <module>()
----> 1 series.resample('30T')
/usr/local/lib/python2.7/dist-packages/pandas/core/generic.pyc in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
3195 fill_method=fill_method, convention=convention,
3196 limit=limit, base=base)
-> 3197 return sampler.resample(self).__finalize__(self)
3198
3199 def first(self, offset):
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in resample(self, obj)
83
84 if isinstance(ax, DatetimeIndex):
---> 85 rs = self._resample_timestamps()
86 elif isinstance(ax, PeriodIndex):
87 offset = to_offset(self.freq)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _resample_timestamps(self, kind)
273 axlabels = self.ax
274
--> 275 self._get_binner_for_resample(kind=kind)
276 grouper = self.grouper
277 binner = self.binner
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_binner_for_resample(self, kind)
121 kind = self.kind
122 if kind is None or kind == 'timestamp':
--> 123 self.binner, bins, binlabels = self._get_time_bins(ax)
124 elif kind == 'timedelta':
125 self.binner, bins, binlabels = self._get_time_delta_bins(ax)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_time_bins(self, ax)
162 first, last = ax.min(), ax.max()
163 first, last = _get_range_edges(first, last, self.freq, closed=self.closed,
--> 164 base=self.base)
165 tz = ax.tz
166 binner = labels = DatetimeIndex(freq=self.freq,
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_range_edges(first, last, offset, closed, base)
392 if (is_day and day_nanos % offset.nanos == 0) or not is_day:
393 return _adjust_dates_anchored(first, last, offset,
--> 394 closed=closed, base=base)
395
396 if not isinstance(offset, Tick): # and first.time() != last.time():
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _adjust_dates_anchored(first, last, offset, closed, base)
459
460 return (Timestamp(fresult).tz_localize(first_tzinfo),
--> 461 Timestamp(lresult).tz_localize(first_tzinfo))
462
463
pandas/tslib.pyx in pandas.tslib.Timestamp.tz_localize (pandas/tslib.c:10535)()
pandas/tslib.pyx in pandas.tslib.tz_localize_to_utc (pandas/tslib.c:50297)()
AmbiguousTimeError: Cannot infer dst time from Timestamp('2014-10-26 01:00:00'),
try using the 'ambiguous' argument
|
AmbiguousTimeError
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# GH 10117 & GH 19375. If first and last contain timezone information,
# Perform the calculation in UTC in order to avoid localizing on an
# Ambiguous or Nonexistent time.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
start_day_nanos = first.normalize().value
if first_tzinfo is not None:
first = first.tz_convert("UTC")
if last_tzinfo is not None:
last = last.tz_convert("UTC")
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
fresult = Timestamp(fresult)
lresult = Timestamp(lresult)
if first_tzinfo is not None:
fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo)
if last_tzinfo is not None:
lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo)
return fresult, lresult
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# 14682 - Since we need to drop the TZ information to perform
# the adjustment in the presence of a DST change,
# save TZ Info and the DST state of the first and last parameters
# so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
first_dst = bool(first.dst())
last_dst = bool(last.dst())
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (
Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst),
)
|
https://github.com/pandas-dev/pandas/issues/10117
|
In [27]: idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
In [28]: series = pd.Series(np.random.randn(len(idx)), index=idx)
In [31]: series
Out[31]:
2014-10-25 22:00:00+01:00 -0.874014
2014-10-25 22:30:00+01:00 1.316258
2014-10-25 23:00:00+01:00 -1.334616
2014-10-25 23:30:00+01:00 -1.200390
2014-10-26 00:00:00+01:00 -0.341764
2014-10-26 00:30:00+01:00 1.509091
Freq: 30T, dtype: float64
In [29]: series.resample('30T')
---------------------------------------------------------------------------
AmbiguousTimeError Traceback (most recent call last)
<ipython-input-29-bb9e86068ce1> in <module>()
----> 1 series.resample('30T')
/usr/local/lib/python2.7/dist-packages/pandas/core/generic.pyc in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
3195 fill_method=fill_method, convention=convention,
3196 limit=limit, base=base)
-> 3197 return sampler.resample(self).__finalize__(self)
3198
3199 def first(self, offset):
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in resample(self, obj)
83
84 if isinstance(ax, DatetimeIndex):
---> 85 rs = self._resample_timestamps()
86 elif isinstance(ax, PeriodIndex):
87 offset = to_offset(self.freq)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _resample_timestamps(self, kind)
273 axlabels = self.ax
274
--> 275 self._get_binner_for_resample(kind=kind)
276 grouper = self.grouper
277 binner = self.binner
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_binner_for_resample(self, kind)
121 kind = self.kind
122 if kind is None or kind == 'timestamp':
--> 123 self.binner, bins, binlabels = self._get_time_bins(ax)
124 elif kind == 'timedelta':
125 self.binner, bins, binlabels = self._get_time_delta_bins(ax)
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_time_bins(self, ax)
162 first, last = ax.min(), ax.max()
163 first, last = _get_range_edges(first, last, self.freq, closed=self.closed,
--> 164 base=self.base)
165 tz = ax.tz
166 binner = labels = DatetimeIndex(freq=self.freq,
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _get_range_edges(first, last, offset, closed, base)
392 if (is_day and day_nanos % offset.nanos == 0) or not is_day:
393 return _adjust_dates_anchored(first, last, offset,
--> 394 closed=closed, base=base)
395
396 if not isinstance(offset, Tick): # and first.time() != last.time():
/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.pyc in _adjust_dates_anchored(first, last, offset, closed, base)
459
460 return (Timestamp(fresult).tz_localize(first_tzinfo),
--> 461 Timestamp(lresult).tz_localize(first_tzinfo))
462
463
pandas/tslib.pyx in pandas.tslib.Timestamp.tz_localize (pandas/tslib.c:10535)()
pandas/tslib.pyx in pandas.tslib.tz_localize_to_utc (pandas/tslib.c:50297)()
AmbiguousTimeError: Cannot infer dst time from Timestamp('2014-10-26 01:00:00'),
try using the 'ambiguous' argument
|
AmbiguousTimeError
|
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, "values", values)
categories = getattr(categories, "values", categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
|
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype, categories.dtype):
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
|
https://github.com/pandas-dev/pandas/issues/22702
|
In [16]: pd.Categorical([], categories=[True, False])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-16-8e79cd310199> in <module>()
----> 1 pd.Categorical([], categories=[True, False])
~/sandbox/pandas/pandas/core/arrays/categorical.py in __init__(self, values, categories, ordered, dtype, fastpath)
426
427 else:
--> 428 codes = _get_codes_for_values(values, dtype.categories)
429
430 if null_mask.any():
~/sandbox/pandas/pandas/core/arrays/categorical.py in _get_codes_for_values(values, categories)
2449 (_, _), cats = _get_data_algo(categories, _hashtables)
2450 t = hash_klass(len(cats))
-> 2451 t.map_locations(cats)
2452 return coerce_indexer_dtype(t.lookup(vals), cats)
2453
~/sandbox/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.map_locations()
1330 raise KeyError(key)
1331
-> 1332 def map_locations(self, ndarray[object] values):
1333 cdef:
1334 Py_ssize_t i, n = len(values)
ValueError: Buffer dtype mismatch, expected 'Python object' but got 'unsigned long'
|
ValueError
|
def _bool_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
if is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype):
result = op(x, y) # when would this be hit?
else:
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except:
raise TypeError(
"cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]".format(
dtype=x.dtype, typ=type(y).__name__
)
)
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries):
name = get_op_result_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
res_values = na_op(self.values, other.values)
unfilled = self._constructor(res_values, index=self.index, name=name)
return filler(unfilled)
else:
# scalars, list, tuple, np.array
filler = (
fill_int
if is_self_int_dtype and is_integer_dtype(np.asarray(other))
else fill_bool
)
res_values = na_op(self.values, other)
unfilled = self._constructor(res_values, index=self.index)
return filler(unfilled).__finalize__(self)
return wrapper
|
def _bool_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype):
result = op(x, y) # when would this be hit?
else:
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except:
raise TypeError(
"cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]".format(
dtype=x.dtype, typ=type(y).__name__
)
)
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries):
name = get_op_result_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
res_values = na_op(self.values, other.values)
unfilled = self._constructor(res_values, index=self.index, name=name)
return filler(unfilled)
else:
# scalars, list, tuple, np.array
filler = (
fill_int
if is_self_int_dtype and is_integer_dtype(np.asarray(other))
else fill_bool
)
res_values = na_op(self.values, other)
unfilled = self._constructor(res_values, index=self.index)
return filler(unfilled).__finalize__(self)
return wrapper
|
https://github.com/pandas-dev/pandas/issues/22092
|
ser & idx
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/ops.py", line 1481, in wrapper
res_values = na_op(self.values, other)
File "pandas/core/ops.py", line 1439, in na_op
if not isna(y):
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
if is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype):
result = op(x, y) # when would this be hit?
else:
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except:
raise TypeError(
"cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]".format(
dtype=x.dtype, typ=type(y).__name__
)
)
return result
|
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype):
result = op(x, y) # when would this be hit?
else:
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except:
raise TypeError(
"cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]".format(
dtype=x.dtype, typ=type(y).__name__
)
)
return result
|
https://github.com/pandas-dev/pandas/issues/22092
|
ser & idx
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/ops.py", line 1481, in wrapper
res_values = na_op(self.values, other)
File "pandas/core/ops.py", line 1439, in na_op
if not isna(y):
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month).value
i += nanos.astype("timedelta64[ns]")
return i + Timedelta(days=-1)
|
def _apply_index_days(self, i, roll):
i += (roll % 2) * Timedelta(days=self.day_of_month).value
return i + Timedelta(days=-1)
|
https://github.com/pandas-dev/pandas/issues/19123
|
dti = pd.date_range('2016-01-01', periods=3, freq='D')
tdi = pd.TimedeltaIndex(['1 day', '2days', '3 days'], freq='D')
arr = np.array([1, 2, 3], dtype=np.int64)
dti + arr
DatetimeIndex(['2016-01-01', '2016-01-02', '2016-01-03'], dtype='datetime64[ns]', freq='86400000000001N')
dti - arr
DatetimeIndex(['2015-12-31 23:59:59.999999999',
'2016-01-01 23:59:59.999999998',
'2016-01-02 23:59:59.999999997'],
dtype='datetime64[ns]', freq='86399999999999N')
arr + dti
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'DatetimeIndex'
tdi + arr
TimedeltaIndex(['1 days 00:00:00.000000', '2 days 00:00:00.000000',
'3 days 00:00:00.000000'],
dtype='timedelta64[ns]', freq='86400000000001N')
arr + tdi
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'TimedeltaIndex'
tdi - arr
TimedeltaIndex(['0 days 23:59:59.999999', '1 days 23:59:59.999999',
'2 days 23:59:59.999999'],
dtype='timedelta64[ns]', freq='86399999999999N')
arr - tdi
TimedeltaIndex(['-1 days +00:00:00.000000', '-2 days +00:00:00.000000',
'-3 days +00:00:00.000000'],
dtype='timedelta64[ns]', freq='-86399999999999N')
|
TypeError
|
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value
return i + nanos.astype("timedelta64[ns]")
|
def _apply_index_days(self, i, roll):
return i + (roll % 2) * Timedelta(days=self.day_of_month - 1).value
|
https://github.com/pandas-dev/pandas/issues/19123
|
dti = pd.date_range('2016-01-01', periods=3, freq='D')
tdi = pd.TimedeltaIndex(['1 day', '2days', '3 days'], freq='D')
arr = np.array([1, 2, 3], dtype=np.int64)
dti + arr
DatetimeIndex(['2016-01-01', '2016-01-02', '2016-01-03'], dtype='datetime64[ns]', freq='86400000000001N')
dti - arr
DatetimeIndex(['2015-12-31 23:59:59.999999999',
'2016-01-01 23:59:59.999999998',
'2016-01-02 23:59:59.999999997'],
dtype='datetime64[ns]', freq='86399999999999N')
arr + dti
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'DatetimeIndex'
tdi + arr
TimedeltaIndex(['1 days 00:00:00.000000', '2 days 00:00:00.000000',
'3 days 00:00:00.000000'],
dtype='timedelta64[ns]', freq='86400000000001N')
arr + tdi
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'TimedeltaIndex'
tdi - arr
TimedeltaIndex(['0 days 23:59:59.999999', '1 days 23:59:59.999999',
'2 days 23:59:59.999999'],
dtype='timedelta64[ns]', freq='86399999999999N')
arr - tdi
TimedeltaIndex(['-1 days +00:00:00.000000', '-2 days +00:00:00.000000',
'-3 days +00:00:00.000000'],
dtype='timedelta64[ns]', freq='-86399999999999N')
|
TypeError
|
def apply_index(self, i):
if self.weekday is None:
return (i.to_period("W") + self.n).to_timestamp() + i.to_perioddelta("W")
else:
return self._end_apply_index(i)
|
def apply_index(self, i):
if self.weekday is None:
return (i.to_period("W") + self.n).to_timestamp() + i.to_perioddelta("W")
else:
return self._end_apply_index(i, self.freqstr)
|
https://github.com/pandas-dev/pandas/issues/19123
|
dti = pd.date_range('2016-01-01', periods=3, freq='D')
tdi = pd.TimedeltaIndex(['1 day', '2days', '3 days'], freq='D')
arr = np.array([1, 2, 3], dtype=np.int64)
dti + arr
DatetimeIndex(['2016-01-01', '2016-01-02', '2016-01-03'], dtype='datetime64[ns]', freq='86400000000001N')
dti - arr
DatetimeIndex(['2015-12-31 23:59:59.999999999',
'2016-01-01 23:59:59.999999998',
'2016-01-02 23:59:59.999999997'],
dtype='datetime64[ns]', freq='86399999999999N')
arr + dti
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'DatetimeIndex'
tdi + arr
TimedeltaIndex(['1 days 00:00:00.000000', '2 days 00:00:00.000000',
'3 days 00:00:00.000000'],
dtype='timedelta64[ns]', freq='86400000000001N')
arr + tdi
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'TimedeltaIndex'
tdi - arr
TimedeltaIndex(['0 days 23:59:59.999999', '1 days 23:59:59.999999',
'2 days 23:59:59.999999'],
dtype='timedelta64[ns]', freq='86399999999999N')
arr - tdi
TimedeltaIndex(['-1 days +00:00:00.000000', '-2 days +00:00:00.000000',
'-3 days +00:00:00.000000'],
dtype='timedelta64[ns]', freq='-86399999999999N')
|
TypeError
|
def __init__(self, n=1, normalize=False, week=0, weekday=0):
self.n = self._validate_n(n)
self.normalize = normalize
self.weekday = weekday
self.week = week
if self.n == 0:
raise ValueError("N cannot be 0")
if self.weekday < 0 or self.weekday > 6:
raise ValueError("Day must be 0<=day<=6, got {day}".format(day=self.weekday))
if self.week < 0 or self.week > 3:
raise ValueError("Week must be 0<=week<=3, got {week}".format(week=self.week))
self.kwds = {"weekday": weekday, "week": week}
|
def __init__(self, n=1, normalize=False, week=None, weekday=None):
self.n = self._validate_n(n)
self.normalize = normalize
self.weekday = weekday
self.week = week
if self.n == 0:
raise ValueError("N cannot be 0")
if self.weekday < 0 or self.weekday > 6:
raise ValueError("Day must be 0<=day<=6, got {day}".format(day=self.weekday))
if self.week < 0 or self.week > 3:
raise ValueError("Week must be 0<=week<=3, got {week}".format(week=self.week))
self.kwds = {"weekday": weekday, "week": week}
|
https://github.com/pandas-dev/pandas/issues/19123
|
dti = pd.date_range('2016-01-01', periods=3, freq='D')
tdi = pd.TimedeltaIndex(['1 day', '2days', '3 days'], freq='D')
arr = np.array([1, 2, 3], dtype=np.int64)
dti + arr
DatetimeIndex(['2016-01-01', '2016-01-02', '2016-01-03'], dtype='datetime64[ns]', freq='86400000000001N')
dti - arr
DatetimeIndex(['2015-12-31 23:59:59.999999999',
'2016-01-01 23:59:59.999999998',
'2016-01-02 23:59:59.999999997'],
dtype='datetime64[ns]', freq='86399999999999N')
arr + dti
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'DatetimeIndex'
tdi + arr
TimedeltaIndex(['1 days 00:00:00.000000', '2 days 00:00:00.000000',
'3 days 00:00:00.000000'],
dtype='timedelta64[ns]', freq='86400000000001N')
arr + tdi
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'TimedeltaIndex'
tdi - arr
TimedeltaIndex(['0 days 23:59:59.999999', '1 days 23:59:59.999999',
'2 days 23:59:59.999999'],
dtype='timedelta64[ns]', freq='86399999999999N')
arr - tdi
TimedeltaIndex(['-1 days +00:00:00.000000', '-2 days +00:00:00.000000',
'-3 days +00:00:00.000000'],
dtype='timedelta64[ns]', freq='-86399999999999N')
|
TypeError
|
def __init__(self, n=1, normalize=False, weekday=0):
self.n = self._validate_n(n)
self.normalize = normalize
self.weekday = weekday
if self.n == 0:
raise ValueError("N cannot be 0")
if self.weekday < 0 or self.weekday > 6:
raise ValueError("Day must be 0<=day<=6, got {day}".format(day=self.weekday))
self.kwds = {"weekday": weekday}
|
def __init__(self, n=1, normalize=False, weekday=None):
self.n = self._validate_n(n)
self.normalize = normalize
self.weekday = weekday
if self.n == 0:
raise ValueError("N cannot be 0")
if self.weekday < 0 or self.weekday > 6:
raise ValueError("Day must be 0<=day<=6, got {day}".format(day=self.weekday))
self.kwds = {"weekday": weekday}
|
https://github.com/pandas-dev/pandas/issues/19123
|
dti = pd.date_range('2016-01-01', periods=3, freq='D')
tdi = pd.TimedeltaIndex(['1 day', '2days', '3 days'], freq='D')
arr = np.array([1, 2, 3], dtype=np.int64)
dti + arr
DatetimeIndex(['2016-01-01', '2016-01-02', '2016-01-03'], dtype='datetime64[ns]', freq='86400000000001N')
dti - arr
DatetimeIndex(['2015-12-31 23:59:59.999999999',
'2016-01-01 23:59:59.999999998',
'2016-01-02 23:59:59.999999997'],
dtype='datetime64[ns]', freq='86399999999999N')
arr + dti
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'DatetimeIndex'
tdi + arr
TimedeltaIndex(['1 days 00:00:00.000000', '2 days 00:00:00.000000',
'3 days 00:00:00.000000'],
dtype='timedelta64[ns]', freq='86400000000001N')
arr + tdi
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'numpy.ndarray' and 'TimedeltaIndex'
tdi - arr
TimedeltaIndex(['0 days 23:59:59.999999', '1 days 23:59:59.999999',
'2 days 23:59:59.999999'],
dtype='timedelta64[ns]', freq='86399999999999N')
arr - tdi
TimedeltaIndex(['-1 days +00:00:00.000000', '-2 days +00:00:00.000000',
'-3 days +00:00:00.000000'],
dtype='timedelta64[ns]', freq='-86399999999999N')
|
TypeError
|
def drop_duplicates(self, subset=None, keep="first", inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
if inplace:
(inds,) = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
|
def drop_duplicates(self, subset=None, keep="first", inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
if inplace:
(inds,) = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
|
https://github.com/pandas-dev/pandas/issues/20516
|
pd.DataFrame().drop_duplicates()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/analytical-monk/miniconda3/lib/python3.6/site-packages/pandas/core/frame.py", line 3098, in drop_duplicates
duplicated = self.duplicated(subset, keep=keep)
File "/home/analytical-monk/miniconda3/lib/python3.6/site-packages/pandas/core/frame.py", line 3144, in duplicated
labels, shape = map(list, zip(*map(f, vals)))
ValueError: not enough values to unpack (expected 2, got 0)
|
ValueError
|
def duplicated(self, subset=None, keep="first"):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series()
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, compat.string_types)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
|
def duplicated(self, subset=None, keep="first"):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, compat.string_types)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
|
https://github.com/pandas-dev/pandas/issues/20516
|
pd.DataFrame().drop_duplicates()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/analytical-monk/miniconda3/lib/python3.6/site-packages/pandas/core/frame.py", line 3098, in drop_duplicates
duplicated = self.duplicated(subset, keep=keep)
File "/home/analytical-monk/miniconda3/lib/python3.6/site-packages/pandas/core/frame.py", line 3144, in duplicated
labels, shape = map(list, zip(*map(f, vals)))
ValueError: not enough values to unpack (expected 2, got 0)
|
ValueError
|
def __init__(
self, obj, func, broadcast, raw, reduce, result_type, ignore_failures, args, kwds
):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
if broadcast is not None:
warnings.warn(
"The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning,
stacklevel=4,
)
if broadcast:
result_type = "broadcast"
if reduce is not None:
warnings.warn(
"The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning,
stacklevel=4,
)
if reduce:
if result_type is not None:
raise ValueError("cannot pass both reduce=True and result_type")
result_type = "reduce"
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, compat.string_types)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
|
def __init__(
self, obj, func, broadcast, raw, reduce, result_type, ignore_failures, args, kwds
):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
if broadcast is not None:
warnings.warn(
"The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning,
stacklevel=4,
)
if broadcast:
result_type = "broadcast"
if reduce is not None:
warnings.warn(
"The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning,
stacklevel=4,
)
if reduce:
if result_type is not None:
raise ValueError("cannot pass both reduce=True and result_type")
result_type = "reduce"
self.result_type = result_type
# curry if needed
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
|
https://github.com/pandas-dev/pandas/issues/22376
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<cut>/pandas/core/frame.py", line 6173, in apply
return op.get_result()
File "<cut>/pandas/core/apply.py", line 151, in get_result
return self.apply_standard()
File "<cut>/pandas/core/apply.py", line 257, in apply_standard
self.apply_series_generator()
File "<cut>/pandas/core/apply.py", line 286, in apply_series_generator
results[i] = self.f(v)
File "<cut>/pandas-dev/pandas/core/apply.py", line 78, in f
return func(x, *args, **kwds)
TypeError: ("'str' object is not callable", 'occurred at index 0')
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.