after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), "object", "object"
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype("uint64"), "bool", "uint64"
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), "int64", "int64"
elif is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype):
return ensure_uint64(values), "uint64", "uint64"
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), "float64", "float64"
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), "object", "object"
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
values = ensure_float64(values)
return values, "float64", "float64"
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), "object", "object"
# datetimelike
if (
needs_i8_conversion(values)
or is_period_dtype(dtype)
or is_datetime64_any_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, "int64"
elif is_categorical_dtype(values) and (
is_categorical_dtype(dtype) or dtype is None
):
values = getattr(values, "values", values)
values = values.codes
dtype = "category"
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, "int64"
# we have failed, return object
values = np.asarray(values)
return ensure_object(values), "object", "object"
|
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), "object", "object"
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype("uint64"), "bool", "uint64"
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), "int64", "int64"
elif is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype):
return ensure_uint64(values), "uint64", "uint64"
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), "float64", "float64"
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), "object", "object"
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
values = ensure_float64(values)
return values, "float64", "float64"
except (TypeError, ValueError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), "object", "object"
# datetimelike
if (
needs_i8_conversion(values)
or is_period_dtype(dtype)
or is_datetime64_any_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, "int64"
elif is_categorical_dtype(values) and (
is_categorical_dtype(dtype) or dtype is None
):
values = getattr(values, "values", values)
values = values.codes
dtype = "category"
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, "int64"
# we have failed, return object
values = np.asarray(values)
return ensure_object(values), "object", "object"
|
https://github.com/pandas-dev/pandas/issues/17128
|
Traceback (most recent call last):
File "run.py", line 9, in <module>
df = pd.read_csv(data, na_values=na_values, index_col=index_col)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 660, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 416, in _read
data = parser.read(nrows)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1010, in read
ret = self._engine.read(nrows)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1837, in read
index, names = self._make_index(data, alldata, names)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1347, in _make_index
index = self._agg_index(index)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1440, in _agg_index
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1524, in _infer_types
mask = algorithms.isin(values, list(na_values))
File "/home/liauys/Code/pandas/pandas/core/algorithms.py", line 408, in isin
values, _, _ = _ensure_data(values, dtype=dtype)
File "/home/liauys/Code/pandas/pandas/core/algorithms.py", line 74, in _ensure_data
return _ensure_int64(values), 'int64', 'int64'
File "pandas/_libs/algos_common_helper.pxi", line 3227, in pandas._libs.algos.ensure_int64
File "pandas/_libs/algos_common_helper.pxi", line 3232, in pandas._libs.algos.ensure_int64
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]".format(
comps_type=type(comps).__name__
)
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(
values_type=type(values).__name__
)
)
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
return comps._values.isin(values)
comps = com.values_from_object(comps)
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
comps = comps.astype("int64", copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError, OverflowError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype("float64", copy=False)
comps = comps.astype("float64", copy=False)
checknull = isna(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
|
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]".format(
comps_type=type(comps).__name__
)
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(
values_type=type(values).__name__
)
)
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
return comps._values.isin(values)
comps = com.values_from_object(comps)
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
comps = comps.astype("int64", copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype("float64", copy=False)
comps = comps.astype("float64", copy=False)
checknull = isna(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
|
https://github.com/pandas-dev/pandas/issues/17128
|
Traceback (most recent call last):
File "run.py", line 9, in <module>
df = pd.read_csv(data, na_values=na_values, index_col=index_col)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 660, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 416, in _read
data = parser.read(nrows)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1010, in read
ret = self._engine.read(nrows)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1837, in read
index, names = self._make_index(data, alldata, names)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1347, in _make_index
index = self._agg_index(index)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1440, in _agg_index
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
File "/home/liauys/Code/pandas/pandas/io/parsers.py", line 1524, in _infer_types
mask = algorithms.isin(values, list(na_values))
File "/home/liauys/Code/pandas/pandas/core/algorithms.py", line 408, in isin
values, _, _ = _ensure_data(values, dtype=dtype)
File "/home/liauys/Code/pandas/pandas/core/algorithms.py", line 74, in _ensure_data
return _ensure_int64(values), 'int64', 'int64'
File "pandas/_libs/algos_common_helper.pxi", line 3227, in pandas._libs.algos.ensure_int64
File "pandas/_libs/algos_common_helper.pxi", line 3232, in pandas._libs.algos.ensure_int64
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in favor of errors='raise|ignore'",
FutureWarning,
stacklevel=2,
)
if raise_on_error:
errors = "raise"
else:
errors = "ignore"
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com._apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
|
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in favor of errors='raise|ignore'",
FutureWarning,
stacklevel=2,
)
if raise_on_error:
errors = "raise"
else:
errors = "ignore"
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com._apply_if_callable(cond, self)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
|
https://github.com/pandas-dev/pandas/issues/21891
|
pd.Series([1, 2]).where([True, False])
# 0 1.0
# 1 NaN
# dtype: float64
pd.Series([1, 2]).mask([True, False])
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "/usr/local/lib/python3.7/site-packages/pandas/core/generic.py", line 7792, in mask
# return self.where(~cond, other=other, inplace=inplace, axis=axis,
# TypeError: bad operand type for unary ~: 'list'
|
TypeError
|
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
(
"using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"
),
FutureWarning,
stacklevel=3,
)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError(
"Function names must be unique, found multiple named %s" % name
)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in compat.itervalues(results)):
# let higher level handle
if _level:
return results
return DataFrame(results, columns=columns)
|
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
(
"using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"
),
FutureWarning,
stacklevel=3,
)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError(
"Function names must be unique, found multiple named %s" % name
)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0], DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
|
https://github.com/pandas-dev/pandas/issues/21716
|
In [12]: np.random.seed(1234)
In [13]: df = pd.DataFrame(np.random.rand(10, 1),
index=pd.date_range(start='2018-01-01', freq='5H', periods=10))
# This call works fine
In [14]: df.groupby(pd.Grouper(freq='D')).agg(['ohlc', 'sum'])
Out[14]:
0
ohlc sum
open high low close 0
2018-01-01 0.191519 0.785359 0.191519 0.779976 2.816690
2018-01-02 0.272593 0.958139 0.272593 0.875933 3.185001
# Switching the order of ops in agg doesn't work
In [16]: df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc'])
ValueError: no results
|
ValueError
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
sep=".",
):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([[isinstance(x, dict) for x in compat.itervalues(y)] for y in data]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name {name}, need distinguishing prefix ".format(
name=k
)
)
result[k] = np.array(v).repeat(lengths)
return result
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
sep=".",
):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([[isinstance(x, dict) for x in compat.itervalues(y)] for y in data]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"{err} is not always present".format(err=e)
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name {name}, need distinguishing prefix ".format(
name=k
)
)
result[k] = np.array(v).repeat(lengths)
return result
|
https://github.com/pandas-dev/pandas/issues/21536
|
Traceback (most recent call last):
File "c:\Users\levu\Desktop\tmp\json_normalize\main.py", line 3, in <module>
df = json_normalize({'A': [1, 2]}, 'A', record_prefix='Prefix.')
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 262, in json_normalize
result.rename(columns=lambda x: record_prefix + x, inplace=True)
File "C:\Python36\lib\site-packages\pandas\util\_decorators.py", line 187, in wrapper
return func(*args, **kwargs)
File "C:\Python36\lib\site-packages\pandas\core\frame.py", line 3781, in rename
return super(DataFrame, self).rename(**kwargs)
File "C:\Python36\lib\site-packages\pandas\core\generic.py", line 973, in rename
level=level)
File "C:\Python36\lib\site-packages\pandas\core\internals.py", line 3340, in rename_axis
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
File "C:\Python36\lib\site-packages\pandas\core\internals.py", line 5298, in _transform_index
items = [func(x) for x in index]
File "C:\Python36\lib\site-packages\pandas\core\internals.py", line 5298, in <listcomp>
items = [func(x) for x in index]
File "C:\Python36\lib\site-packages\pandas\io\json\normalize.py", line 262, in <lambda>
result.rename(columns=lambda x: record_prefix + x, inplace=True)
TypeError: must be str, not int
|
TypeError
|
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in compat.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
# Even though we have no data, the "index" of the empty DataFrame
# could for example still be an empty MultiIndex. Thus, we need to
# check whether we have any index columns specified, via either:
#
# 1) index_col (column indices)
# 2) index_names (column names)
#
# Both must be non-null to ensure a successful construction. Otherwise,
# we have to create a generic emtpy Index.
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = _ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
return index, columns, col_dict
|
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in compat.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_col is None or index_col is False:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = _ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
return index, columns, col_dict
|
https://github.com/pandas-dev/pandas/issues/21141
|
Traceback (most recent call last):
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 1848, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 876, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 916, in pandas._libs.parsers.TextReader._read_low_memory
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "pd_bug.py", line 12, in <module>
df = pd.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 678, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 446, in _read
data = parser.read(nrows)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 1036, in read
ret = self._engine.read(nrows)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 1855, in read
dtype=self.kwds.get('dtype'))
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/io/parsers.py", line 3215, in _get_empty_meta
data = [Series([], dtype=dtype[name]) for name in index_names]
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
try:
if obj.name != name:
name = None
except ValueError:
name = None
return name
|
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
|
https://github.com/pandas-dev/pandas/issues/21015
|
In [2]: s1 = pd.Series({'a': 1.5}, name=np.int64(190))
In [3]: s2 = pd.Series([], name=(43, 0))
In [4]: pd.concat([s1, s2])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-5f754290d56c> in <module>()
----> 1 pd.concat([s1, s2])
~/scipy/pandas/pandas/core/reshape/concat.py in concat(objs, axis, join, join_axes, ignore_index, keys, levels, names, verify_integrity, sort, copy)
224 verify_integrity=verify_integrity,
225 copy=copy, sort=sort)
--> 226 return op.get_result()
227
228
~/scipy/pandas/pandas/core/reshape/concat.py in get_result(self)
385 # stack blocks
386 if self.axis == 0:
--> 387 name = com._consensus_name_attr(self.objs)
388
389 mgr = self.objs[0]._data.concat([x._data for x in self.objs],
~/scipy/pandas/pandas/core/common.py in _consensus_name_attr(objs)
56 name = objs[0].name
57 for obj in objs[1:]:
---> 58 if obj.name != name:
59 return None
60 return name
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {"key": obj}
raise
except TypeError:
pass
except ValueError:
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == "loc":
return {"key": obj}
# a positional
if obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex):
raise ValueError("cannot set by positional indexing with enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
(inds,) = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer, missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros(len(objarr))
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError("{mask} not in index".format(mask=objarr[mask]))
return com._values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {"key": obj}
raise
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {"key": obj}
raise
except TypeError:
pass
except ValueError:
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == "loc":
return {"key": obj}
# a positional
if obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex):
raise ValueError("cannot set by positional indexing with enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
(inds,) = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer, missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros_like(objarr)
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError("{mask} not in index".format(mask=objarr[mask]))
return com._values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {"key": obj}
raise
|
https://github.com/pandas-dev/pandas/issues/17105
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-32-b0e70145e9a6> in <module>()
----> 1 df.loc[trange[bool_idx], "A"] += 1
/usr/local/lib/python3.6/site-packages/pandas/core/indexing.py in __setitem__(self, key, value)
176 else:
177 key = com._apply_if_callable(key, self.obj)
--> 178 indexer = self._get_setitem_indexer(key)
179 self._setitem_with_indexer(indexer, value)
180
/usr/local/lib/python3.6/site-packages/pandas/core/indexing.py in _get_setitem_indexer(self, key)
155 if isinstance(key, tuple):
156 try:
--> 157 return self._convert_tuple(key, is_setter=True)
158 except IndexingError:
159 pass
/usr/local/lib/python3.6/site-packages/pandas/core/indexing.py in _convert_tuple(self, key, is_setter)
222 if i >= self.obj.ndim:
223 raise IndexingError('Too many indexers')
--> 224 idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
225 keyidx.append(idx)
226 return tuple(keyidx)
/usr/local/lib/python3.6/site-packages/pandas/core/indexing.py in _convert_to_indexer(self, obj, axis, is_setter)
1228
1229 mask = check == -1
-> 1230 if mask.any():
1231 raise KeyError('%s not in index' % objarr[mask])
1232
AttributeError: 'bool' object has no attribute 'any'
|
AttributeError
|
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if "
"bounds are non-overlapping and all monotonic "
"increasing or decreasing"
)
if isinstance(label, IntervalMixin):
raise NotImplementedError
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
|
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if "
"bounds are non-overlapping and all monotonic "
"increasing or decreasing"
)
if isinstance(label, IntervalMixin):
raise NotImplementedError
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and self.left.is_monotonic_decreasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
|
https://github.com/pandas-dev/pandas/issues/20921
|
import pandas as pd
pd.IntervalIndex.from_tuples([(1,100)]).get_loc(50)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/imre/code/pandas/pandas/core/indexes/interval.py", line 1049, in get_loc
raise KeyError(original_key)
KeyError: 50
|
KeyError
|
def _ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> _ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> _ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> _ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
labels=[[0, 0], [0, 1]])
See Also
--------
_ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, "name"):
return Index(index_like, name=index_like.name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
|
def _ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> _ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> _ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> _ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
labels=[[0, 0], [0, 1]])
See Also
--------
_ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, "name"):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _has_valid_tuple(self, key):
"""check the key for valid keys across my indexer"""
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError("Too many indexers")
try:
self._validate_key(k, i)
except ValueError:
raise ValueError(
"Location based indexing can only have [{types}] types".format(
types=self._valid_types
)
)
|
def _has_valid_tuple(self, key):
"""check the key for valid keys across my indexer"""
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError("Too many indexers")
if not self._has_valid_type(k, i):
raise ValueError(
"Location based indexing can only have [{types}] types".format(
types=self._valid_types
)
)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _multi_take(self, tup):
"""create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = {}
for key, axis in zip(tup, o._AXIS_ORDERS):
ax = o._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key, kind=self.name)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis)
d[axis] = (ax[indexer], indexer)
continue
# If we are trying to get actual keys from empty Series, we
# patiently wait for a KeyError later on - otherwise, convert
if len(ax) or not len(key):
key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
self._validate_read_indexer(keyarr, indexer, o._get_axis_number(axis))
d[axis] = (keyarr, indexer)
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
except (KeyError, IndexingError) as detail:
raise self._exception(detail)
|
def _multi_take(self, tup):
"""create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = {
a: self._convert_for_reindex(t, axis=o._get_axis_number(a))
for t, a in zip(tup, o._AXIS_ORDERS)
}
return o.reindex(**d)
except (KeyError, IndexingError):
raise self._exception
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _convert_for_reindex(self, key, axis=None):
return key
|
def _convert_for_reindex(self, key, axis=None):
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = com._asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = _ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if is_iterator(key):
key = list(key)
self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif is_list_like_indexer(key) and not (
isinstance(key, tuple) and isinstance(labels, MultiIndex)
):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif is_list_like_indexer(key) and not (
isinstance(key, tuple) and isinstance(labels, MultiIndex)
):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _getitem_iterable(self, key, axis=None):
if axis is None:
axis = self.axis or 0
self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
(inds,) = key.nonzero()
return self.obj._take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(key, kind=self.name)
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis)
return self.obj.take(indexer, axis=axis)
ax = self.obj._get_axis(axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
indexer = ax.get_indexer_for(key)
self._validate_read_indexer(key, indexer, axis)
d = {axis: [ax.reindex(keyarr)[0], indexer]}
return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr)
if new_indexer is not None:
result = self.obj._take(
indexer[indexer != -1], axis=axis, convert=False
)
self._validate_read_indexer(key, new_indexer, axis)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]}, copy=True, allow_dups=True
)
else:
self._validate_read_indexer(key, indexer, axis)
result = self.obj._take(indexer, axis=axis)
return result
|
def _getitem_iterable(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
(inds,) = key.nonzero()
return self.obj._take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(key, kind=self.name)
if indexer is not None and (indexer != -1).all():
return self.obj.take(indexer, axis=axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
return self.obj.reindex(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError("axis must be 0")
return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr)
if new_indexer is not None:
result = self.obj._take(
indexer[indexer != -1], axis=axis, convert=False
)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]}, copy=True, allow_dups=True
)
else:
result = self.obj._take(indexer, axis=axis)
return result
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if is_iterator(key):
key = list(key)
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError(
"Indexing a MultiIndex with a DataFrame key is not implemented"
)
elif hasattr(key, "ndim") and key.ndim > 1:
raise NotImplementedError(
"Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented"
)
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._validate_key(key, axis)
return self._get_label(key, axis=axis)
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError(
"Indexing a MultiIndex with a DataFrame key is not implemented"
)
elif hasattr(key, "ndim") and key.ndim > 1:
raise NotImplementedError(
"Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented"
)
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
key = np.asarray(key)
if com.is_bool_indexer(key):
self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a non-integer key")
# validate the location
self._validate_integer(key, axis)
return self._get_loc(key, axis=axis)
|
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
try:
key = np.asarray(key)
except TypeError: # pragma: no cover
pass
if com.is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""much simpler as we only have to deal with our valid types"""
if axis is None:
axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
try:
self._validate_key(obj, axis)
return obj
except ValueError:
raise ValueError(
"Can only index by location with a [{types}]".format(
types=self._valid_types
)
)
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""much simpler as we only have to deal with our valid types"""
if axis is None:
axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError(
"Can only index by location with a [{types}]".format(types=self._valid_types)
)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def _reindex_with_indexers(
self,
reindexers,
method=None,
fill_value=None,
limit=None,
copy=False,
allow_dups=False,
):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {
self._get_axis_number(a): val for (a, val) in compat.iteritems(reindexers)
}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(
self[col].get_values(), row_indexer, fill_value=fill_value
)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index, columns=columns).__finalize__(
self
)
|
def _reindex_with_indexers(
self,
reindexers,
method=None,
fill_value=None,
limit=None,
copy=False,
allow_dups=False,
):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit with sparse")
if fill_value is None:
fill_value = np.nan
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(
self[col].get_values(), row_indexer, fill_value=fill_value
)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index, columns=columns).__finalize__(
self
)
|
https://github.com/pandas-dev/pandas/issues/20753
|
In [2]: pd.Series(range(3)).ix[5]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3017 try:
-> 3018 return self._engine.get_loc(key)
3019 except KeyError:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
131 try:
--> 132 return self.obj._xs(label, axis=axis)
133 except:
/home/nobackup/repo/pandas/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
2991 else:
-> 2992 loc = self.index.get_loc(key)
2993
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
3019 except KeyError:
-> 3020 return self._engine.get_loc(self._maybe_cast_indexer(key))
3021
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5729)()
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
963 else:
--> 964 raise KeyError(val)
965
KeyError: 5
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-bba00cd402c6> in <module>()
----> 1 pd.Series(range(3)).ix[5]
/home/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
118
119 key = com._apply_if_callable(key, self.obj)
--> 120 return self._getitem_axis(key, axis=axis)
121
122 def _get_label(self, label, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1104 return self._get_loc(key, axis=axis)
1105
-> 1106 return self._get_label(key, axis=axis)
1107
1108 def _getitem_iterable(self, key, axis=None):
/home/nobackup/repo/pandas/pandas/core/indexing.py in _get_label(self, label, axis)
132 return self.obj._xs(label, axis=axis)
133 except:
--> 134 return self.obj[label]
135 elif isinstance(label, tuple) and isinstance(label[axis], slice):
136 raise IndexingError('no slices here, handle elsewhere')
/home/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
753 key = com._apply_if_callable(key, self)
754 try:
--> 755 result = self.index.get_value(self, key)
756
757 if not is_scalar(result):
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in get_value(self, series, key)
3051 try:
3052 return self._engine.get_value(s, k,
-> 3053 tz=getattr(series.dtype, 'tz', None))
3054 except KeyError as e1:
3055 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4856)()
103 return val in self.mapping
104
--> 105 cpdef get_value(self, ndarray arr, object key, object tz=None):
106 """
107 arr : 1-dimensional ndarray
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_value (pandas/_libs/index.c:4539)()
111 void* data_ptr
112
--> 113 loc = self.get_loc(key)
114 if PySlice_Check(loc) or cnp.PyArray_Check(loc):
115 return arr[loc]
/home/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc (pandas/_libs/index.c:5575)()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15398)()
956 sizeof(uint32_t)) # flags
957
--> 958 cpdef get_item(self, int64_t val):
959 cdef khiter_t k
960 k = kh_get_int64(self.table, val)
/home/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item (pandas/_libs/hashtable.c:15342)()
962 return self.table.vals[k]
963 else:
--> 964 raise KeyError(val)
965
966 cpdef set_item(self, int64_t key, Py_ssize_t val):
KeyError: 5
In [3]: pd.Series(range(3)).ix[[5]]
/usr/bin/ipython3:1: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
#! /bin/sh
Out[3]:
5 NaN
dtype: float64
|
KeyError
|
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = u("Empty {name}\nColumns: {col}\nIndex: {idx}").format(
name=type(self.frame).__name__, col=self.frame.columns, idx=self.frame.index
)
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
else:
return "l"
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
out = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in out]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = "".join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = "l" * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format, compat.string_types): # pragma: no cover
raise AssertionError(
"column_format must be str or unicode, not {typ}".format(
typ=type(column_format)
)
)
if not self.longtable:
buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
buf.write("\\toprule\n")
else:
buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
buf.write("\\toprule\n")
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write("\\midrule\n") # End of header
if self.longtable:
buf.write("\\endhead\n")
buf.write("\\midrule\n")
buf.write(
"\\multicolumn{{{n}}}{{r}}{{{{Continued on next "
"page}}}} \\\\\n".format(n=len(row))
)
buf.write("\\midrule\n")
buf.write("\\endfoot\n\n")
buf.write("\\bottomrule\n")
buf.write("\\endlastfoot\n")
if self.fmt.kwds.get("escape", True):
# escape backslashes first
crow = [
(
x.replace("\\", "\\textbackslash")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde")
.replace("^", "\\textasciicircum")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
else:
crow = [x if x else "{}" for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = [
"\\textbf{{{x}}}".format(x=x)
if j < ilevels and x.strip() not in ["", "{}"]
else x
for j, x in enumerate(crow)
]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if i >= nlevels and self.fmt.index and self.multirow and ilevels > 1:
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(" & ".join(crow))
buf.write(" \\\\\n")
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write("\\bottomrule\n")
buf.write("\\end{tabular}\n")
else:
buf.write("\\end{longtable}\n")
|
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = u("Empty {name}\nColumns: {col}\nIndex: {idx}").format(
name=type(self.frame).__name__, col=self.frame.columns, idx=self.frame.index
)
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
else:
return "l"
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
cname = any(self.frame.columns.names)
lastcol = self.frame.index.nlevels - 1
previous_lev3 = None
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = " " * len(lev2[0])
# display column names in last index-column
if cname and i == lastcol:
lev3 = [x if x else "{}" for x in self.frame.columns.names]
else:
lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
current_idx_val = None
for level_idx in self.frame.index.labels[i]:
if (
previous_lev3 is None or previous_lev3[len(lev3)].isspace()
) and lev2[level_idx] == current_idx_val:
# same index as above row and left index was the same
lev3.append(blank)
else:
# different value than above or left index different
lev3.append(lev2[level_idx])
current_idx_val = lev2[level_idx]
strcols.insert(i, lev3)
previous_lev3 = lev3
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = "".join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = "l" * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format, compat.string_types): # pragma: no cover
raise AssertionError(
"column_format must be str or unicode, not {typ}".format(
typ=type(column_format)
)
)
if not self.longtable:
buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
buf.write("\\toprule\n")
else:
buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
buf.write("\\toprule\n")
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if any(self.frame.index.names):
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write("\\midrule\n") # End of header
if self.longtable:
buf.write("\\endhead\n")
buf.write("\\midrule\n")
buf.write(
"\\multicolumn{{{n}}}{{r}}{{{{Continued on next "
"page}}}} \\\\\n".format(n=len(row))
)
buf.write("\\midrule\n")
buf.write("\\endfoot\n\n")
buf.write("\\bottomrule\n")
buf.write("\\endlastfoot\n")
if self.fmt.kwds.get("escape", True):
# escape backslashes first
crow = [
(
x.replace("\\", "\\textbackslash")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde")
.replace("^", "\\textasciicircum")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
else:
crow = [x if x else "{}" for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = [
"\\textbf{{{x}}}".format(x=x)
if j < ilevels and x.strip() not in ["", "{}"]
else x
for j, x in enumerate(crow)
]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if i >= nlevels and self.fmt.index and self.multirow and ilevels > 1:
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(" & ".join(crow))
buf.write(" \\\\\n")
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write("\\bottomrule\n")
buf.write("\\end{tabular}\n")
else:
buf.write("\\end{longtable}\n")
|
https://github.com/pandas-dev/pandas/issues/14249
|
test.set_index(['robustness', 'value']).to_latex()
Traceback (most recent call last):
File "/usr/local/anaconda2/envs/myenv2/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-35-901bbd119ed6>", line 1, in <module>
test.set_index(['robustness', 'value']).to_latex()
File "/usr/local/anaconda2/envs/myenv2/lib/python2.7/site-packages/pandas/core/frame.py", line 1601, in to_latex
encoding=encoding)
File "/usr/local/anaconda2/envs/myenv2/lib/python2.7/site-packages/pandas/core/format.py", line 638, in to_latex
latex_renderer.write_result(self.buf)
File "/usr/local/anaconda2/envs/myenv2/lib/python2.7/site-packages/pandas/core/format.py", line 824, in write_result
blank = ' ' * len(lev2[0])
IndexError: list index out of range
|
IndexError
|
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
|
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/20671
|
s = pandas.Series([{0: 'a', 1: 'b'}])
s
0 {0: 'a', 1: 'b'}
dtype: object
s.str.get(-1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1556, in get
result = str_get(self._data, i)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1264, in str_get
return _na_map(f, arr)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 156, in _na_map
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 171, in _map
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
File "pandas/_libs/src/inference.pyx", line 1482, in pandas._libs.lib.map_infer_mask
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1263, in <lambda>
f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan
KeyError: -1
|
KeyError
|
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
|
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
|
https://github.com/pandas-dev/pandas/issues/20671
|
s = pandas.Series([{0: 'a', 1: 'b'}])
s
0 {0: 'a', 1: 'b'}
dtype: object
s.str.get(-1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1556, in get
result = str_get(self._data, i)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1264, in str_get
return _na_map(f, arr)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 156, in _na_map
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 171, in _map
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
File "pandas/_libs/src/inference.pyx", line 1482, in pandas._libs.lib.map_infer_mask
File "/home/mgarcia/.anaconda3/lib/python3.6/site-packages/pandas/core/strings.py", line 1263, in <lambda>
f = lambda x: x[i] if len(x) > i >= -len(x) else np.nan
KeyError: -1
|
KeyError
|
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g
for g in self._handle.walk_nodes()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != u("table"))
)
)
]
|
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g
for g in self._handle.walk_nodes()
if (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != u("table"))
)
]
|
https://github.com/pandas-dev/pandas/issues/20523
|
#! /path/to/python3.6
import pandas as pd
df = pd.DataFrame({ "a": [1], "b": [2] })
print(df.to_string())
hdf = pd.HDFStore("/tmp/test.hdf", mode="w")
hdf.put("/test/key", df)
#Brittle
hdf._handle.create_soft_link(hdf._handle.root.test, "symlink", "/test/key")
hdf.close()
print("Successful write")
hdf = pd.HDFStore("/tmp/test.hdf", mode="r")
'''
Traceback (most recent call last):
File "snippet.py", line 31, in <module>
print(hdf.keys())
File "python3.6.3/lib/python3.6/site-packages/pandas/io/pytables.py", line 529, in keys
return [n._v_pathname for n in self.groups()]
File "python3.6.3/lib/python3.6/site-packages/pandas/io/pytables.py", line 1077, in groups
g for g in self._handle.walk_nodes()
File "python3.6.3/lib/python3.6/site-packages/pandas/io/pytables.py", line 1078, in <listcomp>
if (getattr(g._v_attrs, 'pandas_type', None) or
File "python3.6.3/lib/python3.6/site-packages/tables/link.py", line 79, in __getattr__
"`%s` instance" % self.__class__.__name__)
KeyError: 'you cannot get attributes from this `NoAttrs` instance'
'''
print(hdf.keys()) #causes exception
hdf.close()
print("Successful read")
|
KeyError
|
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop("names", kwargs.pop("name", self.names))
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.view()
|
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
if "name" in kwargs:
kwargs["names"] = kwargs.pop("name", None)
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, **kwargs)
return self.view()
|
https://github.com/pandas-dev/pandas/issues/20308
|
import pandas as pd
import numpy as np
x = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=['a', 'b'])
y = x[np.array([False, False])] # multiindex y is now empty
y.unique()
-->
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-1-290d71fd5bc3> in <module>()
3 x = pd.MultiIndex.from_tuples([(1, 2), (3, 4)]. names=['a', 'b'])
4 y = x[np.array([False, False])]
----> 5 y.unique()
C:\projects\pandas-dk\pandas\core\indexes\multi.py in unique(self, level)
1072
1073 if level is None:
-> 1074 return super(MultiIndex, self).unique()
1075 else:
1076 level = self._get_level_number(level)
C:\projects\pandas-dk\pandas\core\indexes\base.py in unique(self, level)
3971 self._validate_index_level(level)
3972 result = super(Index, self).unique()
-> 3973 return self._shallow_copy(result)
3974
3975 @Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
C:\projects\pandas-dk\pandas\core\indexes\multi.py in _shallow_copy(self, values, **kwargs)
559 # discards freq
560 kwargs.pop('freq', None)
--> 561 return MultiIndex.from_tuples(values, **kwargs)
562 return self.view()
563
C:\projects\pandas-dk\pandas\core\indexes\multi.py in from_tuples(cls, tuples, sortorder, names)
1316 if names is None:
1317 msg = 'Cannot infer number of levels from empty list'
-> 1318 raise TypeError(msg)
1319 arrays = [[]] * len(names)
1320 elif isinstance(tuples, (np.ndarray, Index)):
TypeError: Cannot infer number of levels from empty list
|
TypeError
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories, ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
# We don't allow casting to third party dtypes, since we don't
# know what array belongs to which type.
msg = (
"Cannot cast data to extension dtype '{}'. "
"Pass the extension array directly.".format(dtype)
)
raise ValueError(msg)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isna(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=copy)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, True)
elif isinstance(data, ExtensionArray):
subarr = data
if dtype is not None and not data.dtype.is_dtype(dtype):
msg = (
"Cannot coerce extension array to dtype '{typ}'. "
"Do the coercion before passing to the constructor "
"instead.".format(typ=dtype)
)
raise ValueError(msg)
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH 16804
start, stop, step = get_range_parameters(data)
arr = np.arange(start, stop, step, dtype="int64")
subarr = _try_cast(arr, False)
else:
subarr = _try_cast(data, False)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = com._asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
# GH 16605
# If not empty convert the data to dtype
# GH 19853: If data is a scalar, subarr has already the result
if not is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories, ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
# We don't allow casting to third party dtypes, since we don't
# know what array belongs to which type.
msg = (
"Cannot cast data to extension dtype '{}'. "
"Pass the extension array directly.".format(dtype)
)
raise ValueError(msg)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isna(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=copy)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, True)
elif isinstance(data, ExtensionArray):
subarr = data
if dtype is not None and not data.dtype.is_dtype(dtype):
msg = (
"Cannot coerce extension array to dtype '{typ}'. "
"Do the coercion before passing to the constructor "
"instead.".format(typ=dtype)
)
raise ValueError(msg)
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH 16804
start, stop, step = get_range_parameters(data)
arr = np.arange(start, stop, step, dtype="int64")
subarr = _try_cast(arr, False)
else:
subarr = _try_cast(data, False)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = com._asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
# GH 16605
# If not empty convert the data to dtype
if not isna(data).all():
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/19853
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\james\AppData\Local\Programs\Python\Python36-32\lib\site-packages\pandas\core\series.py", line 266, in __init__
data = SingleBlockManager(data, index, fastpath=True)
File "C:\Users\james\AppData\Local\Programs\Python\Python36-32\lib\site-packages\pandas\core\internals.py", line 4402, in __init__
fastpath=True)
File "C:\Users\james\AppData\Local\Programs\Python\Python36-32\lib\site-packages\pandas\core\internals.py", line 2957, in make_block
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
File "C:\Users\james\AppData\Local\Programs\Python\Python36-32\lib\site-packages\pandas\core\internals.py", line 2082, in __init__
placement=placement, **kwargs)
File "C:\Users\james\AppData\Local\Programs\Python\Python36-32\lib\site-packages\pandas\core\internals.py", line 111, in __init__
raise ValueError('Wrong number of dimensions')
ValueError: Wrong number of dimensions
|
ValueError
|
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
# TODO: what about the existing index?
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(
value_vars, list
):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, ABCMultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [
"variable_{i}".format(i=i) for i in range(len(frame.columns.names))
]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
id_data = frame.pop(col)
if is_extension_type(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
id_data = np.tile(id_data.values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
|
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
# TODO: what about the existing index?
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(
value_vars, list
):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, ABCMultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [
"variable_{i}".format(i=i) for i in range(len(frame.columns.names))
]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
|
https://github.com/pandas-dev/pandas/issues/15785
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 140, in __setitem__
indexer = self._get_setitem_indexer(key)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 127, in _get_setitem_indexer
return self._convert_to_indexer(key, is_setter=True)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 1230, in _convert_to_indexer
raise KeyError('%s not in index' % objarr[mask])
KeyError: "MultiIndex(levels=[[2017-03-23 07:22:42.163378, 2017-03-23 07:22:42.173378, 2017-03-23 07:22:42.173578, 2017-03-23 07:22:42.178378, 2017-03-23 07:22:42.178578], [0, 1, 2, 3, 4]],\n labels=[[3, 0], [3, 4]],\n names=['ts', 'klass']) not in index"
|
KeyError
|
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {
"typ": "range_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"start": getattr(obj, "_start", None),
"stop": getattr(obj, "_stop", None),
"step": getattr(obj, "_step", None),
}
elif isinstance(obj, PeriodIndex):
return {
"typ": "period_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"freq": u_safe(getattr(obj, "freqstr", None)),
"dtype": u(obj.dtype.name),
"data": convert(obj.asi8),
"compress": compressor,
}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, "tz", None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert("UTC")
return {
"typ": "datetime_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.asi8),
"freq": u_safe(getattr(obj, "freqstr", None)),
"tz": tz,
"compress": compressor,
}
elif isinstance(obj, IntervalIndex):
return {
"typ": "interval_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"left": getattr(obj, "_left", None),
"right": getattr(obj, "_right", None),
"closed": getattr(obj, "_closed", None),
}
elif isinstance(obj, MultiIndex):
return {
"typ": "multi_index",
"klass": u(obj.__class__.__name__),
"names": getattr(obj, "names", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
else:
return {
"typ": "index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
elif isinstance(obj, Categorical):
return {
"typ": "category",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"codes": obj.codes,
"categories": obj.categories,
"ordered": obj.ordered,
"compress": compressor,
}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError("msgpack sparse series is not implemented")
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {
"typ": "series",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"index": obj.index,
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError("msgpack sparse frame is not implemented")
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {
"typ": "block_manager",
"klass": u(obj.__class__.__name__),
"axes": data.axes,
"blocks": [
{
"locs": b.mgr_locs.as_array,
"values": convert(b.values),
"shape": b.values.shape,
"dtype": u(b.dtype.name),
"klass": u(b.__class__.__name__),
"compress": compressor,
}
for b in data.blocks
],
}
elif (
isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64))
or obj is NaT
):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {"typ": "timestamp", "value": obj.value, "freq": freq, "tz": tz}
if obj is NaT:
return {"typ": "nat"}
elif isinstance(obj, np.timedelta64):
return {"typ": "timedelta64", "data": obj.view("i8")}
elif isinstance(obj, timedelta):
return {
"typ": "timedelta",
"data": (obj.days, obj.seconds, obj.microseconds),
}
elif isinstance(obj, np.datetime64):
return {"typ": "datetime64", "data": u(str(obj))}
elif isinstance(obj, datetime):
return {"typ": "datetime", "data": u(obj.isoformat())}
elif isinstance(obj, date):
return {"typ": "date", "data": u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {"typ": "period", "ordinal": obj.ordinal, "freq": u_safe(obj.freqstr)}
elif isinstance(obj, Interval):
return {
"typ": "interval",
"left": obj.left,
"right": obj.right,
"closed": obj.closed,
}
elif isinstance(obj, BlockIndex):
return {
"typ": "block_index",
"klass": u(obj.__class__.__name__),
"blocs": obj.blocs,
"blengths": obj.blengths,
"length": obj.length,
}
elif isinstance(obj, IntIndex):
return {
"typ": "int_index",
"klass": u(obj.__class__.__name__),
"indices": obj.indices,
"length": obj.length,
}
elif isinstance(obj, np.ndarray):
return {
"typ": "ndarray",
"shape": obj.shape,
"ndim": obj.ndim,
"dtype": u(obj.dtype.name),
"data": convert(obj),
"compress": compressor,
}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {
"typ": "np_scalar",
"sub_typ": "np_complex",
"dtype": u(obj.dtype.name),
"real": u(obj.real.__repr__()),
"imag": u(obj.imag.__repr__()),
}
else:
return {
"typ": "np_scalar",
"dtype": u(obj.dtype.name),
"data": u(obj.__repr__()),
}
elif isinstance(obj, complex):
return {
"typ": "np_complex",
"real": u(obj.real.__repr__()),
"imag": u(obj.imag.__repr__()),
}
return obj
|
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {
"typ": "range_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"start": getattr(obj, "_start", None),
"stop": getattr(obj, "_stop", None),
"step": getattr(obj, "_step", None),
}
elif isinstance(obj, PeriodIndex):
return {
"typ": "period_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"freq": u_safe(getattr(obj, "freqstr", None)),
"dtype": u(obj.dtype.name),
"data": convert(obj.asi8),
"compress": compressor,
}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, "tz", None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert("UTC")
return {
"typ": "datetime_index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.asi8),
"freq": u_safe(getattr(obj, "freqstr", None)),
"tz": tz,
"compress": compressor,
}
elif isinstance(obj, MultiIndex):
return {
"typ": "multi_index",
"klass": u(obj.__class__.__name__),
"names": getattr(obj, "names", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
else:
return {
"typ": "index",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
elif isinstance(obj, Categorical):
return {
"typ": "category",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"codes": obj.codes,
"categories": obj.categories,
"ordered": obj.ordered,
"compress": compressor,
}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError("msgpack sparse series is not implemented")
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {
"typ": "series",
"klass": u(obj.__class__.__name__),
"name": getattr(obj, "name", None),
"index": obj.index,
"dtype": u(obj.dtype.name),
"data": convert(obj.values),
"compress": compressor,
}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError("msgpack sparse frame is not implemented")
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {
"typ": "block_manager",
"klass": u(obj.__class__.__name__),
"axes": data.axes,
"blocks": [
{
"locs": b.mgr_locs.as_array,
"values": convert(b.values),
"shape": b.values.shape,
"dtype": u(b.dtype.name),
"klass": u(b.__class__.__name__),
"compress": compressor,
}
for b in data.blocks
],
}
elif (
isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64))
or obj is NaT
):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {"typ": "timestamp", "value": obj.value, "freq": freq, "tz": tz}
if obj is NaT:
return {"typ": "nat"}
elif isinstance(obj, np.timedelta64):
return {"typ": "timedelta64", "data": obj.view("i8")}
elif isinstance(obj, timedelta):
return {
"typ": "timedelta",
"data": (obj.days, obj.seconds, obj.microseconds),
}
elif isinstance(obj, np.datetime64):
return {"typ": "datetime64", "data": u(str(obj))}
elif isinstance(obj, datetime):
return {"typ": "datetime", "data": u(obj.isoformat())}
elif isinstance(obj, date):
return {"typ": "date", "data": u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {"typ": "period", "ordinal": obj.ordinal, "freq": u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {
"typ": "block_index",
"klass": u(obj.__class__.__name__),
"blocs": obj.blocs,
"blengths": obj.blengths,
"length": obj.length,
}
elif isinstance(obj, IntIndex):
return {
"typ": "int_index",
"klass": u(obj.__class__.__name__),
"indices": obj.indices,
"length": obj.length,
}
elif isinstance(obj, np.ndarray):
return {
"typ": "ndarray",
"shape": obj.shape,
"ndim": obj.ndim,
"dtype": u(obj.dtype.name),
"data": convert(obj),
"compress": compressor,
}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {
"typ": "np_scalar",
"sub_typ": "np_complex",
"dtype": u(obj.dtype.name),
"real": u(obj.real.__repr__()),
"imag": u(obj.imag.__repr__()),
}
else:
return {
"typ": "np_scalar",
"dtype": u(obj.dtype.name),
"data": u(obj.__repr__()),
}
elif isinstance(obj, complex):
return {
"typ": "np_complex",
"real": u(obj.real.__repr__()),
"imag": u(obj.imag.__repr__()),
}
return obj
|
https://github.com/pandas-dev/pandas/issues/13463
|
import pandas as pd
pd.DataFrame({'C': pd.period_range('2015-01-01', periods=3)}).to_msgpack()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-65-8e5ad9cd6036> in <module>()
----> 1 pd.DataFrame({'C': pd.period_range('2015-01-01', periods=3)}).to_msgpack()
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/core/generic.pyc in to_msgpack(self, path_or_buf, encoding, **kwargs)
1120 from pandas.io import packers
1121 return packers.to_msgpack(path_or_buf, self, encoding=encoding,
-> 1122 **kwargs)
1123
1124 def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail',
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in to_msgpack(path_or_buf, *args, **kwargs)
152 elif path_or_buf is None:
153 buf = compat.BytesIO()
--> 154 writer(buf)
155 return buf.getvalue()
156 else:
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in writer(fh)
145 def writer(fh):
146 for a in args:
--> 147 fh.write(pack(a, **kwargs))
148
149 if isinstance(path_or_buf, compat.string_types):
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in pack(o, default, encoding, unicode_errors, use_single_float, autoreset, use_bin_type)
699 use_single_float=use_single_float,
700 autoreset=autoreset,
--> 701 use_bin_type=use_bin_type).pack(o)
702
703
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer.pack (pandas/msgpack/_packer.cpp:3434)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer.pack (pandas/msgpack/_packer.cpp:3276)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:2433)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3006)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:2433)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3006)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3088)()
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in encode(obj)
510 return {u'typ': u'period',
511 u'ordinal': obj.ordinal,
--> 512 u'freq': u(obj.freq)}
513 elif isinstance(obj, BlockIndex):
514 return {u'typ': u'block_index',
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/compat/__init__.pyc in u(s)
262
263 def u(s):
--> 264 return unicode(s, "unicode_escape")
265
266 def u_safe(s):
TypeError: coercing to Unicode: need string or buffer, Day found
|
TypeError
|
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get("typ")
if typ is None:
return obj
elif typ == "timestamp":
freq = obj["freq"] if "freq" in obj else obj["offset"]
return Timestamp(obj["value"], tz=obj["tz"], freq=freq)
elif typ == "nat":
return NaT
elif typ == "period":
return Period(ordinal=obj["ordinal"], freq=obj["freq"])
elif typ == "index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
return globals()[obj["klass"]](data, dtype=dtype, name=obj["name"])
elif typ == "range_index":
return globals()[obj["klass"]](
obj["start"], obj["stop"], obj["step"], name=obj["name"]
)
elif typ == "multi_index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
data = [tuple(x) for x in data]
return globals()[obj["klass"]].from_tuples(data, names=obj["names"])
elif typ == "period_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
return globals()[obj["klass"]]._from_ordinals(data, **d)
elif typ == "datetime_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"], verify_integrity=False)
result = globals()[obj["klass"]](data, **d)
tz = obj["tz"]
# reverse tz conversion
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
elif typ == "interval_index":
return globals()[obj["klass"]].from_arrays(
obj["left"], obj["right"], obj["closed"], name=obj["name"]
)
elif typ == "category":
from_codes = globals()[obj["klass"]].from_codes
return from_codes(
codes=obj["codes"], categories=obj["categories"], ordered=obj["ordered"]
)
elif typ == "interval":
return Interval(obj["left"], obj["right"], obj["closed"])
elif typ == "series":
dtype = dtype_for(obj["dtype"])
pd_dtype = pandas_dtype(dtype)
index = obj["index"]
result = globals()[obj["klass"]](
unconvert(obj["data"], dtype, obj["compress"]),
index=index,
dtype=pd_dtype,
name=obj["name"],
)
return result
elif typ == "block_manager":
axes = obj["axes"]
def create_block(b):
values = _safe_reshape(
unconvert(b["values"], dtype_for(b["dtype"]), b["compress"]), b["shape"]
)
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if "locs" in b:
placement = b["locs"]
else:
placement = axes[0].get_indexer(b["items"])
return make_block(
values=values,
klass=getattr(internals, b["klass"]),
placement=placement,
dtype=b["dtype"],
)
blocks = [create_block(b) for b in obj["blocks"]]
return globals()[obj["klass"]](BlockManager(blocks, axes))
elif typ == "datetime":
return parse(obj["data"])
elif typ == "datetime64":
return np.datetime64(parse(obj["data"]))
elif typ == "date":
return parse(obj["data"]).date()
elif typ == "timedelta":
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
return globals()[obj["klass"]](obj["length"], obj["indices"])
elif typ == "ndarray":
return unconvert(
obj["data"], np.typeDict[obj["dtype"]], obj.get("compress")
).reshape(obj["shape"])
elif typ == "np_scalar":
if obj.get("sub_typ") == "np_complex":
return c2f(obj["real"], obj["imag"], obj["dtype"])
else:
dtype = dtype_for(obj["dtype"])
try:
return dtype(obj["data"])
except:
return dtype.type(obj["data"])
elif typ == "np_complex":
return complex(obj["real"] + "+" + obj["imag"] + "j")
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
|
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get("typ")
if typ is None:
return obj
elif typ == "timestamp":
freq = obj["freq"] if "freq" in obj else obj["offset"]
return Timestamp(obj["value"], tz=obj["tz"], freq=freq)
elif typ == "nat":
return NaT
elif typ == "period":
return Period(ordinal=obj["ordinal"], freq=obj["freq"])
elif typ == "index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
return globals()[obj["klass"]](data, dtype=dtype, name=obj["name"])
elif typ == "range_index":
return globals()[obj["klass"]](
obj["start"], obj["stop"], obj["step"], name=obj["name"]
)
elif typ == "multi_index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
data = [tuple(x) for x in data]
return globals()[obj["klass"]].from_tuples(data, names=obj["names"])
elif typ == "period_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
return globals()[obj["klass"]]._from_ordinals(data, **d)
elif typ == "datetime_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"], verify_integrity=False)
result = globals()[obj["klass"]](data, **d)
tz = obj["tz"]
# reverse tz conversion
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
elif typ == "category":
from_codes = globals()[obj["klass"]].from_codes
return from_codes(
codes=obj["codes"], categories=obj["categories"], ordered=obj["ordered"]
)
elif typ == "series":
dtype = dtype_for(obj["dtype"])
pd_dtype = pandas_dtype(dtype)
index = obj["index"]
result = globals()[obj["klass"]](
unconvert(obj["data"], dtype, obj["compress"]),
index=index,
dtype=pd_dtype,
name=obj["name"],
)
return result
elif typ == "block_manager":
axes = obj["axes"]
def create_block(b):
values = _safe_reshape(
unconvert(b["values"], dtype_for(b["dtype"]), b["compress"]), b["shape"]
)
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if "locs" in b:
placement = b["locs"]
else:
placement = axes[0].get_indexer(b["items"])
return make_block(
values=values,
klass=getattr(internals, b["klass"]),
placement=placement,
dtype=b["dtype"],
)
blocks = [create_block(b) for b in obj["blocks"]]
return globals()[obj["klass"]](BlockManager(blocks, axes))
elif typ == "datetime":
return parse(obj["data"])
elif typ == "datetime64":
return np.datetime64(parse(obj["data"]))
elif typ == "date":
return parse(obj["data"]).date()
elif typ == "timedelta":
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
return globals()[obj["klass"]](obj["length"], obj["indices"])
elif typ == "ndarray":
return unconvert(
obj["data"], np.typeDict[obj["dtype"]], obj.get("compress")
).reshape(obj["shape"])
elif typ == "np_scalar":
if obj.get("sub_typ") == "np_complex":
return c2f(obj["real"], obj["imag"], obj["dtype"])
else:
dtype = dtype_for(obj["dtype"])
try:
return dtype(obj["data"])
except:
return dtype.type(obj["data"])
elif typ == "np_complex":
return complex(obj["real"] + "+" + obj["imag"] + "j")
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
|
https://github.com/pandas-dev/pandas/issues/13463
|
import pandas as pd
pd.DataFrame({'C': pd.period_range('2015-01-01', periods=3)}).to_msgpack()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-65-8e5ad9cd6036> in <module>()
----> 1 pd.DataFrame({'C': pd.period_range('2015-01-01', periods=3)}).to_msgpack()
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/core/generic.pyc in to_msgpack(self, path_or_buf, encoding, **kwargs)
1120 from pandas.io import packers
1121 return packers.to_msgpack(path_or_buf, self, encoding=encoding,
-> 1122 **kwargs)
1123
1124 def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail',
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in to_msgpack(path_or_buf, *args, **kwargs)
152 elif path_or_buf is None:
153 buf = compat.BytesIO()
--> 154 writer(buf)
155 return buf.getvalue()
156 else:
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in writer(fh)
145 def writer(fh):
146 for a in args:
--> 147 fh.write(pack(a, **kwargs))
148
149 if isinstance(path_or_buf, compat.string_types):
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in pack(o, default, encoding, unicode_errors, use_single_float, autoreset, use_bin_type)
699 use_single_float=use_single_float,
700 autoreset=autoreset,
--> 701 use_bin_type=use_bin_type).pack(o)
702
703
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer.pack (pandas/msgpack/_packer.cpp:3434)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer.pack (pandas/msgpack/_packer.cpp:3276)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:2433)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3006)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:2433)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3006)()
pandas/msgpack/_packer.pyx in pandas.msgpack._packer.Packer._pack (pandas/msgpack/_packer.cpp:3088)()
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/io/packers.pyc in encode(obj)
510 return {u'typ': u'period',
511 u'ordinal': obj.ordinal,
--> 512 u'freq': u(obj.freq)}
513 elif isinstance(obj, BlockIndex):
514 return {u'typ': u'block_index',
/auto/energymdl2/anaconda/envs/commod_20160516_pd18/lib/python2.7/site-packages/pandas/compat/__init__.pyc in u(s)
262
263 def u(s):
--> 264 return unicode(s, "unicode_escape")
265
266 def u_safe(s):
TypeError: coercing to Unicode: need string or buffer, Day found
|
TypeError
|
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.datetime64
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
|
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
|
https://github.com/pandas-dev/pandas/issues/19768
|
Traceback (most recent call last):
File "mve.py", line 26, in <module>
pd.qcut(df["Date"], 2)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/reshape/tile.py", line 208, in qcut
dtype=dtype, duplicates=duplicates)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/reshape/tile.py", line 251, in _bins_to_cuts
dtype=dtype)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/reshape/tile.py", line 344, in _format_labels
labels = IntervalIndex.from_breaks(breaks, closed=closed)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/indexes/interval.py", line 370, in from_breaks
name=name, copy=copy)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/indexes/interval.py", line 411, in from_arrays
copy=copy, verify_integrity=True)
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/indexes/interval.py", line 225, in _simple_new
result._validate()
File "/tmp/test/env/lib/python3.5/site-packages/pandas/core/indexes/interval.py", line 265, in _validate
raise ValueError('missing values must be missing in the same '
ValueError: missing values must be missing in the same location both left and right sides
|
ValueError
|
def to_stata(
self,
fname,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> data.to_stata('./data_file.dta')
Or with dates
>>> data.to_stata('./date_data_file.dta', {2 : 'tw'})
Alternatively you can create an instance of the StataWriter class
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
With dates:
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(
fname,
self,
convert_dates=convert_dates,
encoding=encoding,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
)
writer.write_file()
|
def to_stata(
self,
fname,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(
fname,
self,
convert_dates=convert_dates,
encoding=encoding,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
)
writer.write_file()
|
https://github.com/pandas-dev/pandas/issues/19817
|
import pandas as pd
df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
df.to_stata('test.dta', dataset_label='data label')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: to_stata() got an unexpected keyword argument 'dataset_label'
df.to_stata('test.dta', data_label='data label')
|
TypeError
|
def __init__(
self,
data=None,
index=None,
columns=None,
default_kind=None,
default_fill_value=None,
dtype=None,
copy=False,
):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, "name"):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = "block"
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(
data, index, columns, dtype=dtype, fill_value=default_fill_value
)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(
data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index, columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(
np.nan,
index=index,
kind=self._default_kind,
fill_value=self._default_fill_value,
)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = 'SparseDataFrame called with unkown type "{data_type}" for data argument'
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
|
def __init__(
self,
data=None,
index=None,
columns=None,
default_kind=None,
default_fill_value=None,
dtype=None,
copy=False,
):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, "name"):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = "block"
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(
data, index, columns, dtype=dtype, fill_value=default_fill_value
)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(
data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(
np.nan,
index=index,
kind=self._default_kind,
fill_value=self._default_fill_value,
)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
generic.NDFrame.__init__(self, mgr)
|
https://github.com/pandas-dev/pandas/issues/19374
|
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-6-b9708ff05a7c> in <module>()
1 import pandas
2
----> 3 pandas.SparseDataFrame(pandas.Series())
~/anaconda3/lib/python3.6/site-packages/pandas/core/sparse/frame.py in __init__(self, data, index, columns, default_kind, default_fill_value, dtype, copy)
120 mgr = mgr.astype(dtype)
121
--> 122 generic.NDFrame.__init__(self, mgr)
123
124 @property
UnboundLocalError: local variable 'mgr' referenced before assignment
|
UnboundLocalError
|
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : int or axis name
Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns : single label or list-like
Alternative to specifying `axis` (``labels, axis=1`` is
equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : type of caller
Raises
------
KeyError
If none of the labels are found in the selected axis
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Notes
-----
Specifying both `labels` and `index` or `columns` will raise a
ValueError.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
|
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : int or axis name
Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns : single label or list-like
Alternative to specifying `axis` (``labels, axis=1`` is
equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : type of caller
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Notes
-----
Specifying both `labels` and `index` or `columns` will raise a
ValueError.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
|
https://github.com/pandas-dev/pandas/issues/19186
|
items.drop('abcd',axis=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2530, in drop
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2562, in _drop_axis
new_axis = axis.drop(labels, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3744, in drop
labels[mask])
ValueError: labels ['abcd'] not contained in axis
|
ValueError
|
def _drop_axis(self, labels, axis, level=None, errors="raise"):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
if errors == "raise" and indexer.all():
raise KeyError("{} not found in axis".format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
|
def _drop_axis(self, labels, axis, level=None, errors="raise"):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
|
https://github.com/pandas-dev/pandas/issues/19186
|
items.drop('abcd',axis=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2530, in drop
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2562, in _drop_axis
new_axis = axis.drop(labels, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3744, in drop
labels[mask])
ValueError: labels ['abcd'] not contained in axis
|
ValueError
|
def drop(self, labels, errors="raise"):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If none of the labels are found in the selected axis
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = _index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError("labels %s not contained in axis" % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
|
def drop(self, labels, errors="raise"):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = _index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise ValueError("labels %s not contained in axis" % labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
|
https://github.com/pandas-dev/pandas/issues/19186
|
items.drop('abcd',axis=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2530, in drop
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2562, in _drop_axis
new_axis = axis.drop(labels, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3744, in drop
labels[mask])
ValueError: labels ['abcd'] not contained in axis
|
ValueError
|
def pivot_table(
data,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
index=index,
columns=columns,
fill_value=fill_value,
aggfunc=func,
margins=margins,
margins_name=margins_name,
)
pieces.append(table)
keys.append(getattr(func, "__name__", func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
try:
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
except AttributeError:
pass # it's a single level
try:
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
except AttributeError:
pass # it's a single level or a series
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast="infer")
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if (
values_passed
and not values_multi
and not table.empty
and (table.columns.nlevels > 1)
):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
|
def pivot_table(
data,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
index=index,
columns=columns,
fill_value=fill_value,
aggfunc=func,
margins=margins,
margins_name=margins_name,
)
pieces.append(table)
keys.append(getattr(func, "__name__", func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError):
pass
values = list(values)
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
try:
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
except AttributeError:
pass # it's a single level
try:
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
except AttributeError:
pass # it's a single level or a series
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast="infer")
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if (
values_passed
and not values_multi
and not table.empty
and (table.columns.nlevels > 1)
):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
|
https://github.com/pandas-dev/pandas/issues/19186
|
items.drop('abcd',axis=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2530, in drop
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/generic.py", line 2562, in _drop_axis
new_axis = axis.drop(labels, errors=errors)
File "/usr/local/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3744, in drop
labels[mask])
ValueError: labels ['abcd'] not contained in axis
|
ValueError
|
def __init__(
self, values, index, level=-1, value_columns=None, fill_value=None, constructor=None
):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
if self.is_sparse:
self.constructor = SparseDataFrame
else:
self.constructor = DataFrame
else:
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
self._make_sorted_values_labels()
self._make_selectors()
|
def __init__(
self, values, index, level=-1, value_columns=None, fill_value=None, constructor=None
):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
if self.is_sparse:
self.constructor = SparseDataFrame
else:
self.constructor = DataFrame
else:
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
self.index = index
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
|
https://github.com/pandas-dev/pandas/issues/18562
|
In [2]: df = pd.DataFrame([[2009, 1, 15, 'City1', 'Housing'],
...: [2010, 2, np.nan, 'City2', 'Housing']],
...: columns=['year', 'month', 'day', 'city', 'sector'])
...:
In [3]: df['day'] = 15
...: df['date'] = pd.to_datetime(df[['year', 'month', 'day']])
...:
In [4]: ind = df.set_index(['date', 'sector', 'city'], drop=False).sort_index()
In [5]:
In [5]: cols = ['month', 'year', 'sector']
...: per_city = ind.loc[(slice(None), slice(None), 'City1'), cols].dropna(how='any')
...:
In [6]: per_city.unstack()
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-6-97028d1092eb> in <module>()
----> 1 per_city.unstack()
/home/nobackup/repo/pandas/pandas/core/frame.py in unstack(self, level, fill_value)
4575 """
4576 from pandas.core.reshape.reshape import unstack
-> 4577 return unstack(self, level, fill_value)
4578
4579 _shared_docs['melt'] = ("""
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in unstack(obj, level, fill_value)
463 if isinstance(obj, DataFrame):
464 if isinstance(obj.index, MultiIndex):
--> 465 return _unstack_frame(obj, level, fill_value=fill_value)
466 else:
467 return obj.T.stack(dropna=False)
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in _unstack_frame(obj, level, fill_value)
476 unstacker = partial(_Unstacker, index=obj.index,
477 level=level, fill_value=fill_value)
--> 478 blocks = obj._data.unstack(unstacker)
479 klass = type(obj)
480 return klass(blocks)
/home/nobackup/repo/pandas/pandas/core/internals.py in unstack(self, unstacker_func)
4374 new_columns = new_columns[columns_mask]
4375
-> 4376 bm = BlockManager(new_blocks, [new_columns, new_index])
4377 return bm
4378
/home/nobackup/repo/pandas/pandas/core/internals.py in __init__(self, blocks, axes, do_integrity_check, fastpath)
3042 self._consolidate_check()
3043
-> 3044 self._rebuild_blknos_and_blklocs()
3045
3046 def make_empty(self, axes=None):
/home/nobackup/repo/pandas/pandas/core/internals.py in _rebuild_blknos_and_blklocs(self)
3134
3135 if (new_blknos == -1).any():
-> 3136 raise AssertionError("Gaps in blk ref_locs")
3137
3138 self._blknos = new_blknos
AssertionError: Gaps in blk ref_locs
|
AssertionError
|
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [
Categorical(values[:, i], categories=categories, ordered=ordered)
for i in range(values.shape[-1])
]
return self.constructor(values, index=index, columns=columns)
|
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [
Categorical(values[:, i], categories=categories, ordered=ordered)
for i in range(values.shape[-1])
]
return self.constructor(values, index=index, columns=columns)
|
https://github.com/pandas-dev/pandas/issues/18562
|
In [2]: df = pd.DataFrame([[2009, 1, 15, 'City1', 'Housing'],
...: [2010, 2, np.nan, 'City2', 'Housing']],
...: columns=['year', 'month', 'day', 'city', 'sector'])
...:
In [3]: df['day'] = 15
...: df['date'] = pd.to_datetime(df[['year', 'month', 'day']])
...:
In [4]: ind = df.set_index(['date', 'sector', 'city'], drop=False).sort_index()
In [5]:
In [5]: cols = ['month', 'year', 'sector']
...: per_city = ind.loc[(slice(None), slice(None), 'City1'), cols].dropna(how='any')
...:
In [6]: per_city.unstack()
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-6-97028d1092eb> in <module>()
----> 1 per_city.unstack()
/home/nobackup/repo/pandas/pandas/core/frame.py in unstack(self, level, fill_value)
4575 """
4576 from pandas.core.reshape.reshape import unstack
-> 4577 return unstack(self, level, fill_value)
4578
4579 _shared_docs['melt'] = ("""
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in unstack(obj, level, fill_value)
463 if isinstance(obj, DataFrame):
464 if isinstance(obj.index, MultiIndex):
--> 465 return _unstack_frame(obj, level, fill_value=fill_value)
466 else:
467 return obj.T.stack(dropna=False)
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in _unstack_frame(obj, level, fill_value)
476 unstacker = partial(_Unstacker, index=obj.index,
477 level=level, fill_value=fill_value)
--> 478 blocks = obj._data.unstack(unstacker)
479 klass = type(obj)
480 return klass(blocks)
/home/nobackup/repo/pandas/pandas/core/internals.py in unstack(self, unstacker_func)
4374 new_columns = new_columns[columns_mask]
4375
-> 4376 bm = BlockManager(new_blocks, [new_columns, new_index])
4377 return bm
4378
/home/nobackup/repo/pandas/pandas/core/internals.py in __init__(self, blocks, axes, do_integrity_check, fastpath)
3042 self._consolidate_check()
3043
-> 3044 self._rebuild_blknos_and_blklocs()
3045
3046 def make_empty(self, axes=None):
/home/nobackup/repo/pandas/pandas/core/internals.py in _rebuild_blknos_and_blklocs(self)
3134
3135 if (new_blknos == -1).any():
-> 3136 raise AssertionError("Gaps in blk ref_locs")
3137
3138 self._blknos = new_blknos
AssertionError: Gaps in blk ref_locs
|
AssertionError
|
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator) for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new labels to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_labels.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False
)
|
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator) for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(
levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False
)
|
https://github.com/pandas-dev/pandas/issues/18562
|
In [2]: df = pd.DataFrame([[2009, 1, 15, 'City1', 'Housing'],
...: [2010, 2, np.nan, 'City2', 'Housing']],
...: columns=['year', 'month', 'day', 'city', 'sector'])
...:
In [3]: df['day'] = 15
...: df['date'] = pd.to_datetime(df[['year', 'month', 'day']])
...:
In [4]: ind = df.set_index(['date', 'sector', 'city'], drop=False).sort_index()
In [5]:
In [5]: cols = ['month', 'year', 'sector']
...: per_city = ind.loc[(slice(None), slice(None), 'City1'), cols].dropna(how='any')
...:
In [6]: per_city.unstack()
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-6-97028d1092eb> in <module>()
----> 1 per_city.unstack()
/home/nobackup/repo/pandas/pandas/core/frame.py in unstack(self, level, fill_value)
4575 """
4576 from pandas.core.reshape.reshape import unstack
-> 4577 return unstack(self, level, fill_value)
4578
4579 _shared_docs['melt'] = ("""
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in unstack(obj, level, fill_value)
463 if isinstance(obj, DataFrame):
464 if isinstance(obj.index, MultiIndex):
--> 465 return _unstack_frame(obj, level, fill_value=fill_value)
466 else:
467 return obj.T.stack(dropna=False)
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in _unstack_frame(obj, level, fill_value)
476 unstacker = partial(_Unstacker, index=obj.index,
477 level=level, fill_value=fill_value)
--> 478 blocks = obj._data.unstack(unstacker)
479 klass = type(obj)
480 return klass(blocks)
/home/nobackup/repo/pandas/pandas/core/internals.py in unstack(self, unstacker_func)
4374 new_columns = new_columns[columns_mask]
4375
-> 4376 bm = BlockManager(new_blocks, [new_columns, new_index])
4377 return bm
4378
/home/nobackup/repo/pandas/pandas/core/internals.py in __init__(self, blocks, axes, do_integrity_check, fastpath)
3042 self._consolidate_check()
3043
-> 3044 self._rebuild_blknos_and_blklocs()
3045
3046 def make_empty(self, axes=None):
/home/nobackup/repo/pandas/pandas/core/internals.py in _rebuild_blknos_and_blklocs(self)
3134
3135 if (new_blknos == -1).any():
-> 3136 raise AssertionError("Gaps in blk ref_locs")
3137
3138 self._blknos = new_blknos
AssertionError: Gaps in blk ref_locs
|
AssertionError
|
def _dt_index_cmp(opname, cls, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, (datetime, compat.string_types)):
if isinstance(other, datetime):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
if is_datetimelike(other):
self._assert_tzawareness_compat(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view("i8") == libts.iNaT
else:
o_mask = other.view("i8") == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return compat.set_function_name(wrapper, opname, cls)
|
def _dt_index_cmp(opname, cls, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime) or isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view("i8") == libts.iNaT
else:
o_mask = other.view("i8") == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return compat.set_function_name(wrapper, opname, cls)
|
https://github.com/pandas-dev/pandas/issues/18162
|
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
dr < dz
array([ True, True, True, True, True, True], dtype=bool)
dr[0] < dz[0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/_libs/tslib.pyx", line 1169, in pandas._libs.tslib._Timestamp.__richcmp__
File "pandas/_libs/tslib.pyx", line 1230, in pandas._libs.tslib._Timestamp._assert_tzawareness_compat
TypeError: Cannot compare tz-naive and tz-aware timestamps
|
TypeError
|
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, (datetime, compat.string_types)):
if isinstance(other, datetime):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
if is_datetimelike(other):
self._assert_tzawareness_compat(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view("i8") == libts.iNaT
else:
o_mask = other.view("i8") == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
|
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime) or isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view("i8") == libts.iNaT
else:
o_mask = other.view("i8") == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
|
https://github.com/pandas-dev/pandas/issues/18162
|
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
dr < dz
array([ True, True, True, True, True, True], dtype=bool)
dr[0] < dz[0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/_libs/tslib.pyx", line 1169, in pandas._libs.tslib._Timestamp.__richcmp__
File "pandas/_libs/tslib.pyx", line 1230, in pandas._libs.tslib._Timestamp._assert_tzawareness_compat
TypeError: Cannot compare tz-naive and tz-aware timestamps
|
TypeError
|
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
other = lib.item_from_zerodim(other)
if is_timedelta64_dtype(other):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError(
"cannot add TimedeltaIndex and {typ}".format(typ=type(other))
)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (datetime, np.datetime64)):
return self._add_datelike(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._add_offset_array(other)
elif isinstance(other, Index):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
other = lib.item_from_zerodim(other)
if is_timedelta64_dtype(other):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._sub_offset_array(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
|
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if is_timedelta64_dtype(other):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError(
"cannot add TimedeltaIndex and {typ}".format(typ=type(other))
)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (datetime, np.datetime64)):
return self._add_datelike(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._add_offset_array(other)
elif isinstance(other, Index):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if is_timedelta64_dtype(other):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._sub_offset_array(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
|
https://github.com/pandas-dev/pandas/issues/19012
|
dti + one
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexes/datetimelike.py", line 685, in __add__
elif is_offsetlike(other):
File "pandas/core/dtypes/common.py", line 294, in is_offsetlike
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
TypeError: len() of unsized object
|
TypeError
|
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
other = lib.item_from_zerodim(other)
if is_timedelta64_dtype(other):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError("cannot add TimedeltaIndex and {typ}".format(typ=type(other)))
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (datetime, np.datetime64)):
return self._add_datelike(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._add_offset_array(other)
elif isinstance(other, Index):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
|
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if is_timedelta64_dtype(other):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError("cannot add TimedeltaIndex and {typ}".format(typ=type(other)))
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (datetime, np.datetime64)):
return self._add_datelike(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._add_offset_array(other)
elif isinstance(other, Index):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
|
https://github.com/pandas-dev/pandas/issues/19012
|
dti + one
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexes/datetimelike.py", line 685, in __add__
elif is_offsetlike(other):
File "pandas/core/dtypes/common.py", line 294, in is_offsetlike
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
TypeError: len() of unsized object
|
TypeError
|
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
other = lib.item_from_zerodim(other)
if is_timedelta64_dtype(other):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._sub_offset_array(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
else: # pragma: no cover
return NotImplemented
|
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if is_timedelta64_dtype(other):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
return self._sub_offset_array(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
else: # pragma: no cover
return NotImplemented
|
https://github.com/pandas-dev/pandas/issues/19012
|
dti + one
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexes/datetimelike.py", line 685, in __add__
elif is_offsetlike(other):
File "pandas/core/dtypes/common.py", line 294, in is_offsetlike
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
TypeError: len() of unsized object
|
TypeError
|
def _add_delta_td(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view("i8")
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view("i8")
|
def _add_delta_td(self, other):
# add a delta of a timedeltalike
# return the i8 result view
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view("i8")
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view("i8")
|
https://github.com/pandas-dev/pandas/issues/19012
|
dti + one
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexes/datetimelike.py", line 685, in __add__
elif is_offsetlike(other):
File "pandas/core/dtypes/common.py", line 294, in is_offsetlike
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
TypeError: len() of unsized object
|
TypeError
|
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
# delta operation
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
|
def _add_delta_tdi(self, other):
# add a delta of a TimedeltaIndex
# return the i8 result view
# delta operation
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
|
https://github.com/pandas-dev/pandas/issues/19012
|
dti + one
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexes/datetimelike.py", line 685, in __add__
elif is_offsetlike(other):
File "pandas/core/dtypes/common.py", line 294, in is_offsetlike
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
TypeError: len() of unsized object
|
TypeError
|
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(
new_levels,
new_labels,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
|
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if lev.is_monotonic:
new_levels.append(lev)
new_labels.append(lab)
continue
# indexer to reorder the levels
indexer = lev.argsort()
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(
new_levels,
new_labels,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
|
https://github.com/pandas-dev/pandas/issues/18310
|
In [2]: df = pd.DataFrame(1, index=range(3), columns=pd.MultiIndex.from_product([['a', 'b'], [1, 2]]))
In [3]: df['c'] = 2
In [4]: df.stack(0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-a437419299b6> in <module>()
----> 1 df.stack(0)
/home/nobackup/repo/pandas/pandas/core/frame.py in stack(self, level, dropna)
4500 return stack_multiple(self, level, dropna=dropna)
4501 else:
-> 4502 return stack(self, level, dropna=dropna)
4503
4504 def unstack(self, level=-1, fill_value=None):
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in stack(frame, level, dropna)
513
514 if isinstance(frame.columns, MultiIndex):
--> 515 return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
516 elif isinstance(frame.index, MultiIndex):
517 new_levels = list(frame.index.levels)
/home/nobackup/repo/pandas/pandas/core/reshape/reshape.py in _stack_multi_columns(frame, level_num, dropna)
619 # level
620 level_to_sort = _convert_level_number(0, this.columns)
--> 621 this = this.sort_index(level=level_to_sort, axis=1)
622
623 # tuple list excluding level for grouping columns
/home/nobackup/repo/pandas/pandas/core/frame.py in sort_index(self, axis, level, ascending, inplace, kind, na_position, sort_remaining, by)
3673 # make sure that the axis is lexsorted to start
3674 # if not we need to reconstruct to get the correct indexer
-> 3675 labels = labels._sort_levels_monotonic()
3676 indexer = lexsort_indexer(labels._get_labels_for_sorting(),
3677 orders=ascending,
/home/nobackup/repo/pandas/pandas/core/indexes/multi.py in _sort_levels_monotonic(self)
1275
1276 # indexer to reorder the levels
-> 1277 indexer = lev.argsort()
1278 lev = lev.take(indexer)
1279
/home/nobackup/repo/pandas/pandas/core/indexes/base.py in argsort(self, *args, **kwargs)
2146 if result is None:
2147 result = np.array(self)
-> 2148 return result.argsort(*args, **kwargs)
2149
2150 def __add__(self, other):
TypeError: unorderable types: str() > int()
|
TypeError
|
def to_dict(self, orient="dict", into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
.reshape(self.values.shape)
.tolist(),
),
)
)
elif orient.lower().startswith("s"):
return into_c(
(k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)
)
elif orient.lower().startswith("r"):
return [
into_c(
(k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row))
)
for row in self.values
]
elif orient.lower().startswith("i"):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
|
def to_dict(self, orient="dict", into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
.reshape(self.values.shape)
.tolist(),
),
)
)
elif orient.lower().startswith("s"):
return into_c(
(k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)
)
elif orient.lower().startswith("r"):
return [
into_c((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, row))
for row in self.values
]
elif orient.lower().startswith("i"):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
|
https://github.com/pandas-dev/pandas/issues/18372
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-30-34cf441e3f50> in <module>()
----> 1 df.to_dict(orient='records')
/Users/bolke/Documents/dev/airflow_env/lib/python2.7/site-packages/pandas-0.21.0-py2.7-macosx-10.12-x86_64.egg/pandas/core/frame.pyc in to_dict(self, orient)
897 return [dict((k, _maybe_box_datetimelike(v))
898 for k, v in zip(self.columns, row))
--> 899 for row in self.values]
900 elif orient.lower().startswith('i'):
901 return dict((k, v.to_dict()) for k, v in self.iterrows())
TypeError: izip argument #2 must support iteration
|
TypeError
|
def to_dict(self, orient="dict", into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
.reshape(self.values.shape)
.tolist(),
),
)
)
elif orient.lower().startswith("s"):
return into_c(
(k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)
)
elif orient.lower().startswith("r"):
return [
into_c(
(k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row))
)
for row in self.values
]
elif orient.lower().startswith("i"):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
|
def to_dict(self, orient="dict", into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
.reshape(self.values.shape)
.tolist(),
),
)
)
elif orient.lower().startswith("s"):
return into_c(
(k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)
)
elif orient.lower().startswith("r"):
return [
into_c((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, row))
for row in self.values
]
elif orient.lower().startswith("i"):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
|
https://github.com/pandas-dev/pandas/issues/18372
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-30-34cf441e3f50> in <module>()
----> 1 df.to_dict(orient='records')
/Users/bolke/Documents/dev/airflow_env/lib/python2.7/site-packages/pandas-0.21.0-py2.7-macosx-10.12-x86_64.egg/pandas/core/frame.pyc in to_dict(self, orient)
897 return [dict((k, _maybe_box_datetimelike(v))
898 for k, v in zip(self.columns, row))
--> 899 for row in self.values]
900 elif orient.lower().startswith('i'):
901 return dict((k, v.to_dict()) for k, v in self.iterrows())
TypeError: izip argument #2 must support iteration
|
TypeError
|
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame
Returns
-------
same type as input object
Examples
--------
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
See Also
--------
pandas.DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
"""
import re
nkw = _count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in to_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
|
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame
Returns
-------
same type as input object
Examples
--------
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
See Also
--------
pandas.DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
"""
import re
nkw = _count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
if not isinstance(x, string_types):
x = str(x)
return like in x
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
matcher = re.compile(regex)
values = labels.map(lambda x: matcher.search(str(x)) is not None)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
|
https://github.com/pandas-dev/pandas/issues/13101
|
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-10-9de5a19c260e> in <module>()
----> 1 df.filter(regex=u'a')
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in filter(self, items, like, regex, axis)
2013 matcher = re.compile(regex)
2014 return self.select(lambda x: matcher.search(str(x)) is not None,
-> 2015 axis=axis_name)
2016 else:
2017 raise TypeError('Must pass either `items`, `like`, or `regex`')
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in select(self, crit, axis)
1545 if len(axis_values) > 0:
1546 new_axis = axis_values[
-> 1547 np.asarray([bool(crit(label)) for label in axis_values])]
1548 else:
1549 new_axis = axis_values
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in <lambda>(x)
2012 elif regex:
2013 matcher = re.compile(regex)
-> 2014 return self.select(lambda x: matcher.search(str(x)) is not None,
2015 axis=axis_name)
2016 else:
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe4' in position 0: ordinal not in range(128)
|
UnicodeEncodeError
|
def f(x):
return matcher.search(to_str(x)) is not None
|
def f(x):
if not isinstance(x, string_types):
x = str(x)
return like in x
|
https://github.com/pandas-dev/pandas/issues/13101
|
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-10-9de5a19c260e> in <module>()
----> 1 df.filter(regex=u'a')
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in filter(self, items, like, regex, axis)
2013 matcher = re.compile(regex)
2014 return self.select(lambda x: matcher.search(str(x)) is not None,
-> 2015 axis=axis_name)
2016 else:
2017 raise TypeError('Must pass either `items`, `like`, or `regex`')
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in select(self, crit, axis)
1545 if len(axis_values) > 0:
1546 new_axis = axis_values[
-> 1547 np.asarray([bool(crit(label)) for label in axis_values])]
1548 else:
1549 new_axis = axis_values
C:\Users\...\AppData\Local\Continuum\32bit\Anaconda\envs\test\lib\site-packages\pandas\core\generic.pyc in <lambda>(x)
2012 elif regex:
2013 matcher = re.compile(regex)
-> 2014 return self.select(lambda x: matcher.search(str(x)) is not None,
2015 axis=axis_name)
2016 else:
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe4' in position 0: ordinal not in range(128)
|
UnicodeEncodeError
|
def count(self):
"""Compute count of group, excluding missing values"""
from functools import partial
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isna(np.atleast_2d(blk.get_values()))) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
|
def count(self):
"""Compute count of group, excluding missing values"""
from functools import partial
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isna(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
|
https://github.com/pandas-dev/pandas/issues/13393
|
ValueError Traceback (most recent call last)
<ipython-input-5-3119045de5b1> in <module>()
2 df = pd.DataFrame({'x': ['a', 'a', 'b'],
3 'y': [pd.Timestamp('2016-05-07 20:09:25+00:00'), pd.Timestamp('2016-05-07 20:09:29+00:00'), pd.Timestamp('2016-05-07 20:09:29+00:00')]})
----> 4 print df.groupby('x').count()
/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.pyc in count(self)
3754 blk = map(make_block, map(counter, val), loc)
3755
-> 3756 return self._wrap_agged_blocks(data.items, list(blk))
3757
3758
pandas/lib.pyx in pandas.lib.count_level_2d (pandas/lib.c:23068)()
ValueError: Buffer has wrong number of dimensions (expected 2, got 1)
|
ValueError
|
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError(
"cannot add TimedeltaIndex and {typ}".format(typ=type(other))
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (Index, datetime, np.datetime64)):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
|
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, "_add_delta"):
return other._add_delta(self)
raise TypeError(
"cannot add TimedeltaIndex and {typ}".format(typ=type(other))
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (Index, datetime, np.datetime64)):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, datetime):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
|
https://github.com/pandas-dev/pandas/issues/17991
|
In [9]: pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
---------------------------------------------------------------------------
OverflowError Traceback (most recent call last)
<ipython-input-9-cc287bf4c401> in <module>()
----> 1 pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py in __add__(self, other)
658 return self.shift(other)
659 elif isinstance(other, (Timestamp, datetime)):
--> 660 return self._add_datelike(other)
661 else: # pragma: no cover
662 return NotImplemented
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/timedeltas.py in _add_datelike(self, other)
354 other = Timestamp(other)
355 i8 = self.asi8
--> 356 result = checked_add_with_arr(i8, other.value)
357 result = self._maybe_mask_results(result, fill_value=iNaT)
358 return DatetimeIndex(result, name=self.name, copy=False)
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/algorithms.py in checked_add_with_arr(arr, b, arr_mask, b_mask)
889
890 if to_raise:
--> 891 raise OverflowError("Overflow in int64 addition")
892 return arr + b
893
OverflowError: Overflow in int64 addition
|
OverflowError
|
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
|
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError(
"cannot subtract TimedeltaIndex and {typ}".format(
typ=type(other).__name__
)
)
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError(
"cannot subtract {typ1} and {typ2}".format(
typ1=type(self).__name__, typ2=type(other).__name__
)
)
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, datetime):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
|
https://github.com/pandas-dev/pandas/issues/17991
|
In [9]: pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
---------------------------------------------------------------------------
OverflowError Traceback (most recent call last)
<ipython-input-9-cc287bf4c401> in <module>()
----> 1 pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py in __add__(self, other)
658 return self.shift(other)
659 elif isinstance(other, (Timestamp, datetime)):
--> 660 return self._add_datelike(other)
661 else: # pragma: no cover
662 return NotImplemented
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/timedeltas.py in _add_datelike(self, other)
354 other = Timestamp(other)
355 i8 = self.asi8
--> 356 result = checked_add_with_arr(i8, other.value)
357 result = self._maybe_mask_results(result, fill_value=iNaT)
358 return DatetimeIndex(result, name=self.name, copy=False)
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/algorithms.py in checked_add_with_arr(arr, b, arr_mask, b_mask)
889
890 if to_raise:
--> 891 raise OverflowError("Overflow in int64 addition")
892 return arr + b
893
OverflowError: Overflow in int64 addition
|
OverflowError
|
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError(
"DatetimeIndex subtraction must have the same timezones or no timezones"
)
result = self._sub_datelike_dti(other)
elif isinstance(other, (datetime, np.datetime64)):
other = Timestamp(other)
if other is libts.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
else:
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result, fill_value=libts.iNaT)
else:
raise TypeError(
"cannot subtract DatetimeIndex and {typ}".format(typ=type(other).__name__)
)
return TimedeltaIndex(result, name=self.name, copy=False)
|
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError(
"DatetimeIndex subtraction must have the same timezones or no timezones"
)
result = self._sub_datelike_dti(other)
elif isinstance(other, datetime):
other = Timestamp(other)
if other is libts.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
else:
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result, fill_value=libts.iNaT)
else:
raise TypeError(
"cannot subtract DatetimeIndex and {typ}".format(typ=type(other).__name__)
)
return TimedeltaIndex(result, name=self.name, copy=False)
|
https://github.com/pandas-dev/pandas/issues/17991
|
In [9]: pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
---------------------------------------------------------------------------
OverflowError Traceback (most recent call last)
<ipython-input-9-cc287bf4c401> in <module>()
----> 1 pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py in __add__(self, other)
658 return self.shift(other)
659 elif isinstance(other, (Timestamp, datetime)):
--> 660 return self._add_datelike(other)
661 else: # pragma: no cover
662 return NotImplemented
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/timedeltas.py in _add_datelike(self, other)
354 other = Timestamp(other)
355 i8 = self.asi8
--> 356 result = checked_add_with_arr(i8, other.value)
357 result = self._maybe_mask_results(result, fill_value=iNaT)
358 return DatetimeIndex(result, name=self.name, copy=False)
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/algorithms.py in checked_add_with_arr(arr, b, arr_mask, b_mask)
889
890 if to_raise:
--> 891 raise OverflowError("Overflow in int64 addition")
892 return arr + b
893
OverflowError: Overflow in int64 addition
|
OverflowError
|
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result, fill_value=iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
|
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
|
https://github.com/pandas-dev/pandas/issues/17991
|
In [9]: pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
---------------------------------------------------------------------------
OverflowError Traceback (most recent call last)
<ipython-input-9-cc287bf4c401> in <module>()
----> 1 pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + pd.Timestamp('1950-01-01')
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py in __add__(self, other)
658 return self.shift(other)
659 elif isinstance(other, (Timestamp, datetime)):
--> 660 return self._add_datelike(other)
661 else: # pragma: no cover
662 return NotImplemented
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/indexes/timedeltas.py in _add_datelike(self, other)
354 other = Timestamp(other)
355 i8 = self.asi8
--> 356 result = checked_add_with_arr(i8, other.value)
357 result = self._maybe_mask_results(result, fill_value=iNaT)
358 return DatetimeIndex(result, name=self.name, copy=False)
~/conda/envs/xarray-py36/lib/python3.6/site-packages/pandas/core/algorithms.py in checked_add_with_arr(arr, b, arr_mask, b_mask)
889
890 if to_raise:
--> 891 raise OverflowError("Overflow in int64 addition")
892 return arr + b
893
OverflowError: Overflow in int64 addition
|
OverflowError
|
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if kwds.get("compression") is None and "utf-16" in (kwds.get("encoding") or ""):
# if source is utf-16 plain text, convert source to utf-8
if isinstance(src, compat.string_types):
src = open(src, "rb")
self.handles.append(src)
src = UTF8Recoder(src, kwds["encoding"])
kwds["encoding"] = "utf-8"
# #2442
kwds["allow_leading_cols"] = self.index_col is not False
self._reader = parsers.TextReader(src, **kwds)
# XXX
self.usecols, self.usecols_dtype = _validate_usecols_arg(self._reader.usecols)
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names, passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = [
"%s%d" % (self.prefix, i) for i in range(self._reader.table_width)
]
else:
self.names = lrange(self._reader.table_width)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
raise ValueError("Usecols do not match names.")
if len(self.names) > len(usecols):
self.names = [
n for i, n in enumerate(self.names) if (i in usecols or n in usecols)
]
if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if self._reader.leading_cols == 0 and _is_index_col(self.index_col):
self._name_processed = True
(index_names, self.names, self.index_col) = _clean_index_names(
self.names, self.index_col
)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
|
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if "utf-16" in (kwds.get("encoding") or ""):
if isinstance(src, compat.string_types):
src = open(src, "rb")
self.handles.append(src)
src = UTF8Recoder(src, kwds["encoding"])
kwds["encoding"] = "utf-8"
# #2442
kwds["allow_leading_cols"] = self.index_col is not False
self._reader = parsers.TextReader(src, **kwds)
# XXX
self.usecols, self.usecols_dtype = _validate_usecols_arg(self._reader.usecols)
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names, passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = [
"%s%d" % (self.prefix, i) for i in range(self._reader.table_width)
]
else:
self.names = lrange(self._reader.table_width)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
raise ValueError("Usecols do not match names.")
if len(self.names) > len(usecols):
self.names = [
n for i, n in enumerate(self.names) if (i in usecols or n in usecols)
]
if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if self._reader.leading_cols == 0 and _is_index_col(self.index_col):
self._name_processed = True
(index_names, self.names, self.index_col) = _clean_index_names(
self.names, self.index_col
)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
|
https://github.com/pandas-dev/pandas/issues/18071
|
Traceback (most recent call last):
File "<ipython-input-4-6daea174e70a>", line 3, in <module>
encoding='utf-16')
File "/Users/Nick/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 705, in parser_f
return _read(filepath_or_buffer, kwds)
File "/Users/Nick/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 445, in _read
parser = TextFileReader(filepath_or_buffer, **kwds)
File "/Users/Nick/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 814, in __init__
self._make_engine(self.engine)
File "/Users/Nick/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 1045, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "/Users/Nick/anaconda/lib/python3.5/site-packages/pandas/io/parsers.py", line 1684, in __init__
self._reader = parsers.TextReader(src, **kwds)
File "pandas/_libs/parsers.pyx", line 391, in pandas._libs.parsers.TextReader.__cinit__
File "pandas/_libs/parsers.pyx", line 664, in pandas._libs.parsers.TextReader._setup_parser_source
File "/Users/Nick/anaconda/lib/python3.5/zipfile.py", line 1026, in __init__
self._RealGetContents()
File "/Users/Nick/anaconda/lib/python3.5/zipfile.py", line 1090, in _RealGetContents
endrec = _EndRecData(fp)
File "/Users/Nick/anaconda/lib/python3.5/zipfile.py", line 241, in _EndRecData
fpin.seek(0, 2)
AttributeError: 'UTF8Recoder' object has no attribute 'seek'
|
AttributeError
|
def first_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[0] if len(valid_indices) else None
|
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
|
https://github.com/pandas-dev/pandas/issues/17400
|
Traceback (most recent call last):
File "<pyshell#10>", line 1, in <module>
df.first_valid_index()
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 3859, in first_valid_index
return self.index[self.count(1) > 0][0]
File "C:\Python27\lib\site-packages\pandas\indexes\base.py", line 1423, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def last_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[-1] if len(valid_indices) else None
|
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
|
https://github.com/pandas-dev/pandas/issues/17400
|
Traceback (most recent call last):
File "<pyshell#10>", line 1, in <module>
df.first_valid_index()
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 3859, in first_valid_index
return self.index[self.count(1) > 0][0]
File "C:\Python27\lib\site-packages\pandas\indexes\base.py", line 1423, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def first_valid_index(self):
if len(self) == 0:
return None
mask = isna(self._values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
|
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
mask = isna(self._values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
|
https://github.com/pandas-dev/pandas/issues/17400
|
Traceback (most recent call last):
File "<pyshell#10>", line 1, in <module>
df.first_valid_index()
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 3859, in first_valid_index
return self.index[self.count(1) > 0][0]
File "C:\Python27\lib\site-packages\pandas\indexes\base.py", line 1423, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def last_valid_index(self):
if len(self) == 0:
return None
mask = isna(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
|
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
mask = isna(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
|
https://github.com/pandas-dev/pandas/issues/17400
|
Traceback (most recent call last):
File "<pyshell#10>", line 1, in <module>
df.first_valid_index()
File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 3859, in first_valid_index
return self.index[self.count(1) > 0][0]
File "C:\Python27\lib\site-packages\pandas\indexes\base.py", line 1423, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def setup(self):
self.ts = Timestamp("2017-08-25 08:16:14")
self.ts_tz = Timestamp("2017-08-25 08:16:14", tz="US/Eastern")
dt = datetime.datetime(2016, 3, 27, 1)
self.tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo
self.ts2 = Timestamp(dt)
|
def setup(self):
self.ts = Timestamp("2017-08-25 08:16:14")
|
https://github.com/pandas-dev/pandas/issues/15683
|
pytz.timezone('CET').localize(pd.Timestamp(datetime(2016, 3, 27, 1)), is_dst=None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/localhome/stefan/emsconda/envs/popeye/lib/python3.6/site-packages/pytz/tzinfo.py", line 327, in localize
raise NonExistentTimeError(dt)
pytz.exceptions.NonExistentTimeError: 2016-03-27 01:00:00
|
pytz.exceptions.NonExistentTimeError
|
def setitem(self, indexer, value, mgr=None):
"""set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
try:
values, _, value, _ = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, "dtype") and is_dtype_equal(values.dtype, value.dtype):
dtype = self.dtype
else:
dtype = "infer"
except (TypeError, ValueError):
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, "dtype"):
dtype = value.dtype
find_dtype = True
elif is_scalar(value):
if isna(value):
# NaN promotion is handled in latter path
dtype = False
else:
dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
find_dtype = True
else:
dtype = "infer"
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value, mgr=mgr)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(
isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer
)
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
and np.prod(arr_value.shape) == np.prod(values.shape)
):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
if _np_version_under1p9:
# Work around GH 6168 to support old numpy
indexer = getattr(indexer, "values", indexer)
values[indexer] = value
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
return block
|
def setitem(self, indexer, value, mgr=None):
"""set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
try:
values, _, value, _ = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, "dtype") and is_dtype_equal(values.dtype, value.dtype):
dtype = self.dtype
else:
dtype = "infer"
except (TypeError, ValueError):
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, "dtype"):
dtype = value.dtype
find_dtype = True
elif is_scalar(value):
if isna(value):
# NaN promotion is handled in latter path
dtype = False
else:
dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
find_dtype = True
else:
dtype = "infer"
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value, mgr=mgr)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(
isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer
)
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
and np.prod(arr_value.shape) == np.prod(values.shape)
):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
return block
|
https://github.com/pandas-dev/pandas/issues/17193
|
s = pd.Series([1,2])
s.iloc[pd.Series([0])] = 2
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pandas/core/indexing.py", line 198, in __setitem__
self._setitem_with_indexer(indexer, value)
File "pandas/core/indexing.py", line 619, in _setitem_with_indexer
value=value)
File "pandas/core/internals.py", line 3313, in setitem
return self.apply('setitem', **kwargs)
File "pandas/core/internals.py", line 3201, in apply
applied = getattr(b, f)(**kwargs)
File "pandas/core/internals.py", line 864, in setitem
values[indexer] = value
IndexError: unsupported iterator index
np.version.version
'1.7.0'
|
IndexError
|
def _maybe_coerce_merge_keys(self):
# we have valid mergees but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys, self.right_join_keys, self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk_is_cat = is_categorical_dtype(lk)
rk_is_cat = is_categorical_dtype(rk)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
if lk.is_dtype_equal(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk) and is_numeric_dtype(rk):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
if name in self.left.columns:
typ = lk.categories.dtype if lk_is_cat else object
self.left = self.left.assign(**{name: self.left[name].astype(typ)})
if name in self.right.columns:
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(**{name: self.right[name].astype(typ)})
|
def _maybe_coerce_merge_keys(self):
# we have valid mergee's but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys, self.right_join_keys, self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if is_categorical_dtype(lk) and is_categorical_dtype(rk):
if lk.is_dtype_equal(rk):
continue
elif is_categorical_dtype(lk) or is_categorical_dtype(rk):
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk) and is_numeric_dtype(rk):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object
if name in self.left.columns:
self.left = self.left.assign(**{name: self.left[name].astype(object)})
if name in self.right.columns:
self.right = self.right.assign(**{name: self.right[name].astype(object)})
|
https://github.com/pandas-dev/pandas/issues/16900
|
Traceback (most recent call last):
File "blah.py", line 20, in <module>
df, df2, how='outer', on=['date']
File "/Users/dave/code/pandas/pandas/core/reshape/merge.py", line 57, in merge
return op.get_result()
File "/Users/dave/code/pandas/pandas/core/reshape/merge.py", line 604, in get_result
self._maybe_add_join_keys(result, left_indexer, right_indexer)
File "/Users/dave/code/pandas/pandas/core/reshape/merge.py", line 714, in _maybe_add_join_keys
key_col = Index(lvals).where(~mask, rvals)
File "/Users/dave/code/pandas/pandas/core/indexes/base.py", line 613, in where
values = np.where(cond, self.values, other)
TypeError: invalid type promotion
|
TypeError
|
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
|
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
new_target = self.take(indexer)
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
|
https://github.com/pandas-dev/pandas/issues/16770
|
import pandas as pd
index = pd.CategoricalIndex([], [0])
pd.Series(index=index).reindex(index=[0])
Traceback (most recent call last):
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2733, in reindex
**kwargs)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/generic.py", line 2515, in reindex
fill_value, copy).__finalize__(self)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2679, in _reindex_axes
fill_value, limit, tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2687, in _reindex_index
tolerance=tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 422, in reindex
new_target = self.take(indexer)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 560, in take
na_value=-1)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1806, in _assert_take_fillable
taken = values.take(indices)
IndexError: cannot do a non-empty take from an empty axes.
pd.DataFrame(index=index).reindex(index=[0])
Traceback (most recent call last):
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2733, in reindex
**kwargs)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/generic.py", line 2515, in reindex
fill_value, copy).__finalize__(self)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2679, in _reindex_axes
fill_value, limit, tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2687, in _reindex_index
tolerance=tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 422, in reindex
new_target = self.take(indexer)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 560, in take
na_value=-1)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1806, in _assert_take_fillable
taken = values.take(indices)
IndexError: cannot do a non-empty take from an empty axes.
pd.DataFrame(columns=index).reindex(columns=[0])
Traceback (most recent call last):
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2733, in reindex
**kwargs)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/generic.py", line 2515, in reindex
fill_value, copy).__finalize__(self)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2679, in _reindex_axes
fill_value, limit, tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/frame.py", line 2687, in _reindex_index
tolerance=tolerance)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 422, in reindex
new_target = self.take(indexer)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/category.py", line 560, in take
na_value=-1)
File "~/miniconda3/envs/pantheon/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1806, in _assert_take_fillable
taken = values.take(indices)
IndexError: cannot do a non-empty take from an empty axes.
|
IndexError
|
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__)
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__)
)
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if (
(_np_version_under1p8 and compat.PY3)
or len(comps) > 1000000
and not is_object_dtype(comps)
):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
comps = comps.astype("int64", copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype("float64", copy=False)
comps = comps.astype("float64", copy=False)
checknull = isnull(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
|
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__)
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__)
)
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
comps = comps.astype("int64", copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype("float64", copy=False)
comps = comps.astype("float64", copy=False)
checknull = isnull(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
|
https://github.com/pandas-dev/pandas/issues/16012
|
In [2]: i = pd.Series(list('abcdefghijk'*10**5))
In [3]: alt = [-1, 'AT', 'BE', 'BG', 'CY', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GR']*6
In [4]: res = i[:10**6].isin(alt)
In [5]: res = i[:10**6+1].isin(alt)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-f3e21d855671> in <module>()
----> 1 res = i[:10**6+1].isin(alt)
/home/pietro/nobackup/repo/pandas/pandas/core/series.py in isin(self, values)
2458
2459 """
-> 2460 result = algorithms.isin(_values_from_object(self), values)
2461 return self._constructor(result, index=self.index).__finalize__(self)
2462
/home/pietro/nobackup/repo/pandas/pandas/core/algorithms.py in isin(comps, values)
421 comps = comps.astype(object)
422
--> 423 return f(comps, values)
424
425
/home/pietro/nobackup/repo/pandas/pandas/core/algorithms.py in <lambda>(x, y)
401 f = lambda x, y: htable.ismember_object(x, values)
402 if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
--> 403 f = lambda x, y: np.in1d(x, y)
404 elif is_integer_dtype(comps):
405 try:
/usr/lib/python3/dist-packages/numpy/lib/arraysetops.py in in1d(ar1, ar2, assume_unique, invert)
399 if not assume_unique:
400 ar1, rev_idx = np.unique(ar1, return_inverse=True)
--> 401 ar2 = np.unique(ar2)
402
403 ar = np.concatenate((ar1, ar2))
/usr/lib/python3/dist-packages/numpy/lib/arraysetops.py in unique(ar, return_index, return_inverse, return_counts)
212 aux = ar[perm]
213 else:
--> 214 ar.sort()
215 aux = ar
216 flag = np.concatenate(([True], aux[1:] != aux[:-1]))
TypeError: unorderable types: str() > int()
|
TypeError
|
def _join_non_unique(self, other, how="left", return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers(
[self._values], [other._values], how=how, sort=True
)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self._values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
|
def _join_non_unique(self, other, how="left", return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers(
[self.values], [other._values], how=how, sort=True
)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = np.asarray(self.values.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
|
https://github.com/pandas-dev/pandas/issues/16871
|
TypeError Traceback (most recent call last)
<ipython-input-2-c7c6bdf18c3f> in <module>()
3 index=perindex, columns=['pnum'])
4 df2 = pd.concat([perdf, perdf])
----> 5 perdf.merge(df2, left_index=True, right_index=True, how='outer')
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\frame.py in merge(self, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator)
4720 right_on=right_on, left_index=left_index,
4721 right_index=right_index, sort=sort, suffixes=suffixes,
-> 4722 copy=copy, indicator=indicator)
4723
4724 def round(self, decimals=0, *args, **kwargs):
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in merge(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator)
52 right_index=right_index, sort=sort, suffixes=suffixes,
53 copy=copy, indicator=indicator)
---> 54 return op.get_result()
55
56
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in get_result(self)
567 self.left, self.right)
568
--> 569 join_index, left_indexer, right_indexer = self._get_join_info()
570
571 ldata, rdata = self.left._data, self.right._data
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in _get_join_info(self)
720 join_index, left_indexer, right_indexer = \
721 left_ax.join(right_ax, how=self.how, return_indexers=True,
--> 722 sort=self.sort)
723 elif self.right_index and self.how == 'left':
724 join_index, left_indexer, right_indexer = \
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\indexes\period.py in join(self, other, how, level, return_indexers, sort)
929 result = Int64Index.join(self, other, how=how, level=level,
930 return_indexers=return_indexers,
--> 931 sort=sort)
932
933 if return_indexers:
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\indexes\base.py in join(self, other, how, level, return_indexers, sort)
3044 else:
3045 return self._join_non_unique(other, how=how,
-> 3046 return_indexers=return_indexers)
3047 elif self.is_monotonic and other.is_monotonic:
3048 try:
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\indexes\base.py in _join_non_unique(self, other, how, return_indexers)
3125 left_idx, right_idx = _get_join_indexers([self.values],
3126 [other._values], how=how,
-> 3127 sort=True)
3128
3129 left_idx = _ensure_platform_int(left_idx)
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in _get_join_indexers(left_keys, right_keys, sort, how, **kwargs)
980
981 # get left & right join labels and num. of levels at each location
--> 982 llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
983
984 # get flat i8 keys from label lists
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in _factorize_keys(lk, rk, sort)
1410 if sort:
1411 uniques = rizer.uniques.to_array()
-> 1412 llab, rlab = _sort_labels(uniques, llab, rlab)
1413
1414 # NA group
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\reshape\merge.py in _sort_labels(uniques, left, right)
1436 labels = np.concatenate([left, right])
1437
-> 1438 _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
1439 new_labels = _ensure_int64(new_labels)
1440 new_left, new_right = new_labels[:l], new_labels[l:]
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\algorithms.py in safe_sort(values, labels, na_sentinel, assume_unique)
481 if compat.PY3 and lib.infer_dtype(values) == 'mixed-integer':
482 # unorderable in py3 if mixed str/int
--> 483 ordered = sort_mixed(values)
484 else:
485 try:
C:\Anaconda3\envs\py36\lib\site-packages\pandas\core\algorithms.py in sort_mixed(values)
474 str_pos = np.array([isinstance(x, string_types) for x in values],
475 dtype=bool)
--> 476 nums = np.sort(values[~str_pos])
477 strs = np.sort(values[str_pos])
478 return _ensure_object(np.concatenate([nums, strs]))
C:\Anaconda3\envs\py36\lib\site-packages\numpy\core\fromnumeric.py in sort(a, axis, kind, order)
820 else:
821 a = asanyarray(a).copy(order="K")
--> 822 a.sort(axis=axis, kind=kind, order=order)
823 return a
824
pandas/_libs/period.pyx in pandas._libs.period._Period.__richcmp__ (pandas\_libs\period.c:12067)()
TypeError: Cannot compare type 'Period' with type 'int'
|
TypeError
|
def _astype(
self,
dtype,
copy=False,
errors="raise",
values=None,
klass=None,
mgr=None,
raise_on_error=False,
**kwargs,
):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype, klass=klass)
except:
if errors == "raise":
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])"
% (copy, self.dtype.name, self.itemsize, newb.dtype.name, newb.itemsize)
)
return newb
|
def _astype(
self, dtype, copy=False, errors="raise", values=None, klass=None, mgr=None, **kwargs
):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(list(errors_legal_values), errors)
)
raise ValueError(invalid_arg)
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype, klass=klass)
except:
if errors == "raise":
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
"cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])"
% (copy, self.dtype.name, self.itemsize, newb.dtype.name, newb.itemsize)
)
return newb
|
https://github.com/pandas-dev/pandas/issues/16524
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-6c903a3f0db3> in <module>()
----> 1 pd.Series(pd.Series(['a', 'b', 'c']), dtype='category')
/Users/ttylec/.virtualenvs/zettafox/lib/python3.6/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
241 if dtype is not None:
242 data = data.astype(dtype=dtype, raise_on_error=False,
--> 243 copy=copy)
244 elif copy:
245 data = data.copy()
/Users/ttylec/.virtualenvs/zettafox/lib/python3.6/site-packages/pandas/core/internals.py in astype(self, dtype, **kwargs)
3222
3223 def astype(self, dtype, **kwargs):
-> 3224 return self.apply('astype', dtype=dtype, **kwargs)
3225
3226 def convert(self, **kwargs):
/Users/ttylec/.virtualenvs/zettafox/lib/python3.6/site-packages/pandas/core/internals.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
3089
3090 kwargs['mgr'] = self
-> 3091 applied = getattr(b, f)(**kwargs)
3092 result_blocks = _extend_blocks(applied, result_blocks)
3093
/Users/ttylec/.virtualenvs/zettafox/lib/python3.6/site-packages/pandas/core/internals.py in astype(self, dtype, copy, errors, values, **kwargs)
469 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
470 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 471 **kwargs)
472
473 def _astype(self, dtype, copy=False, errors='raise', values=None,
/Users/ttylec/.virtualenvs/zettafox/lib/python3.6/site-packages/pandas/core/internals.py in _astype(self, dtype, copy, errors, values, klass, mgr, **kwargs)
488 # this is only called for non-categoricals
489 if self.is_categorical_astype(dtype):
--> 490 return self.make_block(Categorical(self.values, **kwargs))
491
492 # astype processing
TypeError: __init__() got an unexpected keyword argument 'raise_on_error'
|
TypeError
|
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(arg))
|
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
return f(*args, **kwargs)
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(arg))
|
https://github.com/pandas-dev/pandas/issues/16405
|
pd.DataFrame({'g': [0, 0, 1], 'v': [1, 2, None]}).agg({'v': 'size'})
---------------------------------------------------------------------------
RecursionError Traceback (most recent call last)
<ipython-input-15-5fc5c38cdb5f> in <module>()
----> 1 pd.DataFrame({'g': [0, 0, 1], 'v': [1, 2, 3]}).agg({'v': 'size'})
/Volumes/Home/venvs/py3-data-science/lib/python3.5/site-packages/pandas/core/frame.py in aggregate(self, func, axis, *args, **kwargs)
4250 pass
4251 if result is None:
-> 4252 return self.apply(func, axis=axis, args=args, **kwargs)
4253 return result
4254
/Volumes/Home/venvs/py3-data-science/lib/python3.5/site-packages/pandas/core/frame.py in apply(self, func, axis, broadcast, raw, reduce, args, **kwds)
4322 # dispatch to agg
4323 if axis == 0 and isinstance(func, (list, dict)):
-> 4324 return self.aggregate(func, axis=axis, *args, **kwds)
4325
4326 if len(self.columns) == 0 and len(self.index) == 0:
... last 2 frames repeated, from the frame below ...
/Volumes/Home/venvs/py3-data-science/lib/python3.5/site-packages/pandas/core/frame.py in aggregate(self, func, axis, *args, **kwargs)
4250 pass
4251 if result is None:
-> 4252 return self.apply(func, axis=axis, args=args, **kwargs)
4253 return result
4254
RecursionError: maximum recursion depth exceeded
|
RecursionError
|
def drop(self, labels, axis=0, level=None, inplace=False, errors="raise"):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
.. versionadded:: 0.16.1
Returns
-------
dropped : type of caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
if inplace:
self._update_inplace(result)
else:
return result
|
def drop(self, labels, axis=0, level=None, inplace=False, errors="raise"):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
.. versionadded:: 0.16.1
Returns
-------
dropped : type of caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = com._index_labels_to_array(labels)
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
if inplace:
self._update_inplace(result)
else:
return result
|
https://github.com/pandas-dev/pandas/issues/16398
|
In [2]: pd.DataFrame([[1,2], [3,4], [5,6]], index=['a','a','b']).drop(['a'])
Out[2]:
0 1
b 5 6
In [3]: pd.DataFrame([[1,2], [3,4], [5,6]], index=['a','a','b']).drop(['b'])
Out[3]:
0 1
a 1 2
a 3 4
In [4]: pd.DataFrame([[1,2], [3,4], [5,6]], index=['a','a','b']).drop([])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-ae217de2434b> in <module>()
----> 1 pd.DataFrame([[1,2], [3,4], [5,6]], index=['a','a','b']).drop([])
/home/pietro/.local/lib/python3.5/site-packages/pandas/core/generic.py in drop(self, labels, axis, level, inplace, errors)
2063 indexer = ~axis.get_level_values(level).isin(labels)
2064 else:
-> 2065 indexer = ~axis.isin(labels)
2066
2067 slicer = [slice(None)] * self.ndim
/home/pietro/.local/lib/python3.5/site-packages/pandas/core/indexes/base.py in isin(self, values, level)
2766 if level is not None:
2767 self._validate_index_level(level)
-> 2768 return algos.isin(np.array(self), values)
2769
2770 def _can_reindex(self, indexer):
/home/pietro/.local/lib/python3.5/site-packages/pandas/core/algorithms.py in isin(comps, values)
419 comps = comps.astype(object)
420
--> 421 return f(comps, values)
422
423
/home/pietro/.local/lib/python3.5/site-packages/pandas/core/algorithms.py in <lambda>(x, y)
397 # work-around for numpy < 1.8 and comparisions on py3
398 # faster for larger cases to use np.in1d
--> 399 f = lambda x, y: htable.ismember_object(x, values)
400 if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
401 f = lambda x, y: np.in1d(x, y)
/home/pietro/nobackup/repo/pandas/pandas/_libs/hashtable_func_helper.pxi in pandas._libs.hashtable.ismember_object (pandas/_libs/hashtable.c:29677)()
ValueError: Buffer dtype mismatch, expected 'Python object' but got 'double'
|
ValueError
|
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": True,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS, "level%s" % r]
name = BLANK_VALUE if name is None else name
row_es.append(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": True,
}
)
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
cs.extend(cell_context.get("col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if self.data.index.names and not all(x is None for x in self.data.index.names):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, "level%s" % c]
name = "" if name is None else name
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* len(clabels[0])
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
es = {
"type": "th",
"is_visible": _is_visible(r, c, idx_lengths),
"value": value,
"display_value": value,
"class": " ".join([ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]),
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [format_attr({"key": "rowspan", "value": rowspan})]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_es.append(
{
"type": "td",
"value": value,
"class": " ".join(cs),
"id": "_".join(cs[1:]),
"display_value": formatter(value),
}
)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(["", ""])
cellstyle.append({"props": props, "selector": "row%s_col%s" % (r, c)})
body.append(row_es)
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=self.table_attributes,
)
|
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": True,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS, "level%s" % r]
name = BLANK_VALUE if name is None else name
row_es.append(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": True,
}
)
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
cs.extend(cell_context.get("col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [format_attr({"key": "colspan", "value": colspan})]
row_es.append(es)
head.append(row_es)
if self.data.index.names and not all(x is None for x in self.data.index.names):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, "level%s" % c]
name = "" if name is None else name
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* len(clabels[0])
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
es = {
"type": "th",
"is_visible": _is_visible(r, c, idx_lengths),
"value": value,
"display_value": value,
"class": " ".join([ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]),
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [format_attr({"key": "rowspan", "value": rowspan})]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_es.append(
{
"type": "td",
"value": value,
"class": " ".join(cs),
"id": "_".join(cs[1:]),
"display_value": formatter(value),
}
)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(["", ""])
cellstyle.append({"props": props, "selector": "row%s_col%s" % (r, c)})
body.append(row_es)
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=self.table_attributes,
)
|
https://github.com/pandas-dev/pandas/issues/15953
|
In [6]: pd.DataFrame().style.render()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-345e1b37074e> in <module>()
----> 1 pd.DataFrame().style.render()
/Users/taugspurger/Envs/dask-dev/lib/python3.6/site-packages/pandas/formats/style.py in render(self)
416 """
417 self._compute()
--> 418 d = self._translate()
419 # filter out empty styles, every cell will have a class
420 # but the list of props may just be [['', '']].
/Users/taugspurger/Envs/dask-dev/lib/python3.6/site-packages/pandas/formats/style.py in _translate(self)
252 "is_visible": True})
253
--> 254 for c in range(len(clabels[0])):
255 cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
256 cs.extend(cell_context.get(
IndexError: list index out of range
|
IndexError
|
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io.clipboard import clipboard
clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
https://github.com/pandas-dev/pandas/issues/16288
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/smiel/.venvs/foo/local/lib/python2.7/site-packages/pandas/__init__.py", line 58, in <module>
from pandas.io.api import *
File "/home/smiel/.venvs/foo/local/lib/python2.7/site-packages/pandas/io/api.py", line 8, in <module>
from pandas.io.clipboard.clipboard import read_clipboard
File "/home/smiel/.venvs/foo/local/lib/python2.7/site-packages/pandas/io/clipboard/__init__.py", line 103, in <module>
copy, paste = determine_clipboard()
File "/home/smiel/.venvs/foo/local/lib/python2.7/site-packages/pandas/io/clipboard/__init__.py", line 76, in determine_clipboard
return init_qt_clipboard()
File "/home/smiel/.venvs/foo/local/lib/python2.7/site-packages/pandas/io/clipboard/clipboards.py", line 49, in init_qt_clipboard
from PyQt4.QtGui import QApplication
ImportError: No module named sip
|
ImportError
|
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=""):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [
n if n is not None else ("level_%d" % i)
for (i, n) in enumerate(self.index.names)
]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name)
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
|
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=""):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
names = self.index.names
zipped = lzip(self.index.levels, self.index.labels)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(zipped))):
col_name = names[i]
if col_name is None:
col_name = "level_%d" % i
if multi_col:
if col_fill is None:
col_name = tuple([col_name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = col_name
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
if name is None or name == "index":
name = "index" if "index" not in self else "level_0"
if isinstance(self.columns, MultiIndex):
if col_fill is None:
name = tuple([name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = name
name = tuple(name_lst)
values = _maybe_casted_values(self.index)
new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
return new_obj
|
https://github.com/pandas-dev/pandas/issues/16164
|
In [2]: df = pd.DataFrame([[1, 2]], columns=pd.MultiIndex.from_product([['A'], ['a', 'b']]))
In [3]: reind = df.set_index([('A', 'a')])
In [4]: reind.reset_index()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in hash_array(vals, encoding, hash_key, categorize)
265 try:
--> 266 vals = _hash.hash_object_array(vals, hash_key, encoding)
267 except TypeError:
as/pandas/util/hashing.pyx in pandas.util.libhashing.hash_object_array (pandas/util/hashing.c:2372)()
TypeError: ('A', 'a') of type <class 'tuple'> is not a valid type for hashing, must be string or null
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-4-985862d3b27d> in <module>()
----> 1 reind.reset_index()
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in reset_index(self, level, drop, inplace, col_level, col_fill)
3057 name = tuple(name_lst)
3058 values = _maybe_casted_values(self.index)
-> 3059 new_obj.insert(0, name, values)
3060
3061 new_obj.index = new_index
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in insert(self, loc, column, value, allow_duplicates)
2518 value = self._sanitize_column(column, value, broadcast=False)
2519 self._data.insert(loc, column, value,
-> 2520 allow_duplicates=allow_duplicates)
2521
2522 def assign(self, **kwargs):
/home/pietro/nobackup/repo/pandas/pandas/core/internals.py in insert(self, loc, item, value, allow_duplicates)
3806
3807 """
-> 3808 if not allow_duplicates and item in self.items:
3809 # Should this be a different kind of error??
3810 raise ValueError('cannot insert {}, already exists'.format(item))
/home/pietro/nobackup/repo/pandas/pandas/core/indexes/multi.py in __contains__(self, key)
1326 hash(key)
1327 try:
-> 1328 self.get_loc(key)
1329 return True
1330 except LookupError:
/home/pietro/nobackup/repo/pandas/pandas/core/indexes/multi.py in get_loc(self, key, method)
1984 key = _values_from_object(key)
1985 key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
-> 1986 return self._engine.get_loc(key)
1987
1988 # -- partial selection or non-unique index
/home/pietro/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.MultiIndexEngine.get_loc (pandas/_libs/index.c:13171)()
/home/pietro/nobackup/repo/pandas/pandas/_libs/index.pyx in pandas._libs.index.MultiIndexEngine.get_loc (pandas/_libs/index.c:13018)()
/home/pietro/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.MultiIndexHashTable.get_item (pandas/_libs/hashtable.c:23625)()
/home/pietro/nobackup/repo/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.MultiIndexHashTable.get_item (pandas/_libs/hashtable.c:23374)()
/home/pietro/nobackup/repo/pandas/pandas/core/indexes/multi.py in _hashed_indexing_key(self, key)
755 key = tuple([f(k, stringify)
756 for k, stringify in zip(key, self._have_mixed_levels)])
--> 757 return hash_tuples(key)
758
759 @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in hash_tuples(vals, encoding, hash_key)
159 hash_key=hash_key)
160 for cat in vals)
--> 161 h = _combine_hash_arrays(hashes, len(vals))
162 if is_tuple:
163 h = h[0]
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in _combine_hash_arrays(arrays, num_items)
31 """
32 try:
---> 33 first = next(arrays)
34 except StopIteration:
35 return np.array([], dtype=np.uint64)
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in <genexpr>(.0)
158 encoding=encoding,
159 hash_key=hash_key)
--> 160 for cat in vals)
161 h = _combine_hash_arrays(hashes, len(vals))
162 if is_tuple:
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in _hash_categorical(c, encoding, hash_key)
182 """
183 hashed = hash_array(c.categories.values, encoding, hash_key,
--> 184 categorize=False)
185
186 # we have uint64, as we don't directly support missing values
/home/pietro/nobackup/repo/pandas/pandas/util/hashing.py in hash_array(vals, encoding, hash_key, categorize)
267 except TypeError:
268 # we have mixed types
--> 269 vals = _hash.hash_object_array(vals.astype(str).astype(object),
270 hash_key, encoding)
271
ValueError: setting an array element with a sequence
|
TypeError
|
def _ipython_display_(self):
try:
from IPython.display import display
except ImportError:
return None
# Series doesn't define _repr_html_ or _repr_latex_
latex = self._repr_latex_() if hasattr(self, "_repr_latex_") else None
html = self._repr_html_() if hasattr(self, "_repr_html_") else None
try:
table_schema = self._repr_table_schema_()
except Exception as e:
warnings.warn(
"Cannot create table schema representation. {}".format(e),
UnserializableWarning,
)
table_schema = None
# We need the inital newline since we aren't going through the
# usual __repr__. See
# https://github.com/pandas-dev/pandas/pull/14904#issuecomment-277829277
text = "\n" + repr(self)
reprs = {
"text/plain": text,
"text/html": html,
"text/latex": latex,
"application/vnd.dataresource+json": table_schema,
}
reprs = {k: v for k, v in reprs.items() if v}
display(reprs, raw=True)
|
def _ipython_display_(self):
try:
from IPython.display import display
except ImportError:
return None
# Series doesn't define _repr_html_ or _repr_latex_
latex = self._repr_latex_() if hasattr(self, "_repr_latex_") else None
html = self._repr_html_() if hasattr(self, "_repr_html_") else None
table_schema = self._repr_table_schema_()
# We need the inital newline since we aren't going through the
# usual __repr__. See
# https://github.com/pandas-dev/pandas/pull/14904#issuecomment-277829277
text = "\n" + repr(self)
reprs = {
"text/plain": text,
"text/html": html,
"text/latex": latex,
"application/vnd.dataresource+json": table_schema,
}
reprs = {k: v for k, v in reprs.items() if v}
display(reprs, raw=True)
|
https://github.com/pandas-dev/pandas/issues/15996
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in _convert_can_do_setop(self, other)
2513 try:
-> 2514 other = MultiIndex.from_tuples(other)
2515 except:
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in from_tuples(cls, tuples, sortorder, names)
1128 elif isinstance(tuples, list):
-> 1129 arrays = list(lib.to_object_array_tuples(tuples).T)
1130 else:
TypeError: Argument 'rows' has incorrect type (expected list, got FrozenList)
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
880 method = get_real_method(obj, self.print_method)
881 if method is not None:
--> 882 method()
883 return True
884
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in _ipython_display_(self)
138 latex = self._repr_latex_() if hasattr(self, '_repr_latex_') else None
139 html = self._repr_html_() if hasattr(self, '_repr_html_') else None
--> 140 table_schema = self._repr_table_schema_()
141 # We need the inital newline since we aren't going through the
142 # usual __repr__. See
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in _repr_table_schema_(self)
156 if config.get_option("display.html.table_schema"):
157 data = self.head(config.get_option('display.max_rows'))
--> 158 payload = json.loads(data.to_json(orient='table'),
159 object_pairs_hook=collections.OrderedDict)
160 return payload
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in to_json(self, path_or_buf, orient, date_format, double_precision, force_ascii, date_unit, default_handler, lines)
1232 force_ascii=force_ascii, date_unit=date_unit,
1233 default_handler=default_handler,
-> 1234 lines=lines)
1235
1236 def to_hdf(self, path_or_buf, key, **kwargs):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/io/json/json.py in to_json(path_or_buf, obj, orient, date_format, double_precision, force_ascii, date_unit, default_handler, lines)
44 obj, orient=orient, date_format=date_format,
45 double_precision=double_precision, ensure_ascii=force_ascii,
---> 46 date_unit=date_unit, default_handler=default_handler).write()
47
48 if lines:
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/io/json/json.py in __init__(self, obj, orient, date_format, double_precision, ensure_ascii, date_unit, default_handler)
141 # TODO: Do this timedelta properly in objToJSON.c See GH #15137
142 if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
--> 143 len(obj.columns & obj.index.names)):
144 msg = "Overlapping names between the index and columns"
145 raise ValueError(msg)
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/base.py in __and__(self, other)
2046
2047 def __and__(self, other):
-> 2048 return self.intersection(other)
2049
2050 def __or__(self, other):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in intersection(self, other)
2447 """
2448 self._assert_can_do_setop(other)
-> 2449 other, result_names = self._convert_can_do_setop(other)
2450
2451 if self.equals(other):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in _convert_can_do_setop(self, other)
2514 other = MultiIndex.from_tuples(other)
2515 except:
-> 2516 raise TypeError(msg)
2517 else:
2518 result_names = self.names if self.names == other.names else None
TypeError: other must be a MultiIndex or a list of tuples
|
TypeError
|
def __init__(
self,
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
default_handler=None,
):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
default_handler=default_handler,
)
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format
)
raise ValueError(msg)
self.schema = build_table_schema(obj)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError("orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
(obj.ndim == 1)
and (obj.name in set(obj.index.names))
or len(obj.columns & obj.index.names)
):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = "iso"
self.orient = "records"
|
def __init__(
self,
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
default_handler=None,
):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
default_handler=default_handler,
)
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format
)
raise ValueError(msg)
self.schema = build_table_schema(obj)
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
(obj.ndim == 1)
and (obj.name in set(obj.index.names))
or len(obj.columns & obj.index.names)
):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = "iso"
self.orient = "records"
|
https://github.com/pandas-dev/pandas/issues/15996
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in _convert_can_do_setop(self, other)
2513 try:
-> 2514 other = MultiIndex.from_tuples(other)
2515 except:
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in from_tuples(cls, tuples, sortorder, names)
1128 elif isinstance(tuples, list):
-> 1129 arrays = list(lib.to_object_array_tuples(tuples).T)
1130 else:
TypeError: Argument 'rows' has incorrect type (expected list, got FrozenList)
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
880 method = get_real_method(obj, self.print_method)
881 if method is not None:
--> 882 method()
883 return True
884
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in _ipython_display_(self)
138 latex = self._repr_latex_() if hasattr(self, '_repr_latex_') else None
139 html = self._repr_html_() if hasattr(self, '_repr_html_') else None
--> 140 table_schema = self._repr_table_schema_()
141 # We need the inital newline since we aren't going through the
142 # usual __repr__. See
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in _repr_table_schema_(self)
156 if config.get_option("display.html.table_schema"):
157 data = self.head(config.get_option('display.max_rows'))
--> 158 payload = json.loads(data.to_json(orient='table'),
159 object_pairs_hook=collections.OrderedDict)
160 return payload
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/core/generic.py in to_json(self, path_or_buf, orient, date_format, double_precision, force_ascii, date_unit, default_handler, lines)
1232 force_ascii=force_ascii, date_unit=date_unit,
1233 default_handler=default_handler,
-> 1234 lines=lines)
1235
1236 def to_hdf(self, path_or_buf, key, **kwargs):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/io/json/json.py in to_json(path_or_buf, obj, orient, date_format, double_precision, force_ascii, date_unit, default_handler, lines)
44 obj, orient=orient, date_format=date_format,
45 double_precision=double_precision, ensure_ascii=force_ascii,
---> 46 date_unit=date_unit, default_handler=default_handler).write()
47
48 if lines:
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/io/json/json.py in __init__(self, obj, orient, date_format, double_precision, ensure_ascii, date_unit, default_handler)
141 # TODO: Do this timedelta properly in objToJSON.c See GH #15137
142 if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
--> 143 len(obj.columns & obj.index.names)):
144 msg = "Overlapping names between the index and columns"
145 raise ValueError(msg)
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/base.py in __and__(self, other)
2046
2047 def __and__(self, other):
-> 2048 return self.intersection(other)
2049
2050 def __or__(self, other):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in intersection(self, other)
2447 """
2448 self._assert_can_do_setop(other)
-> 2449 other, result_names = self._convert_can_do_setop(other)
2450
2451 if self.equals(other):
/Users/kylek/code/src/github.com/pandas-dev/pandas/pandas/indexes/multi.py in _convert_can_do_setop(self, other)
2514 other = MultiIndex.from_tuples(other)
2515 except:
-> 2516 raise TypeError(msg)
2517 else:
2518 result_names = self.names if self.names == other.names else None
TypeError: other must be a MultiIndex or a list of tuples
|
TypeError
|
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert {}, already exists".format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
|
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert %s, already exists" % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
|
https://github.com/pandas-dev/pandas/issues/16120
|
In [2]: df = pd.DataFrame(index=range(2), columns=pd.MultiIndex.from_tuples([['A', 'a'], ['B', '']]))
In [3]: df.index.name = 'B'
In [4]: df.reset_index()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-6983677cc901> in <module>()
----> 1 df.reset_index()
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in reset_index(self, level, drop, inplace, col_level, col_fill)
3056 name = tuple(name_lst)
3057 values = _maybe_casted_values(self.index)
-> 3058 new_obj.insert(0, name, values)
3059
3060 new_obj.index = new_index
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in insert(self, loc, column, value, allow_duplicates)
2517 value = self._sanitize_column(column, value, broadcast=False)
2518 self._data.insert(loc, column, value,
-> 2519 allow_duplicates=allow_duplicates)
2520
2521 def assign(self, **kwargs):
/home/pietro/nobackup/repo/pandas/pandas/core/internals.py in insert(self, loc, item, value, allow_duplicates)
3808 if not allow_duplicates and item in self.items:
3809 # Should this be a different kind of error??
-> 3810 raise ValueError('cannot insert %s, already exists' % item)
3811
3812 if not isinstance(loc, int):
TypeError: not all arguments converted during string formatting
|
TypeError
|
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.categorical import Categorical
def cats(label):
return np.arange(
np.array(label).max() + 1 if len(label) else 0, dtype=label.dtype
)
return [
Categorical.from_codes(label, cats(label), ordered=True)
for label in self.labels
]
|
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.categorical import Categorical
return [
Categorical.from_codes(
label, np.arange(np.array(label).max() + 1, dtype=label.dtype), ordered=True
)
for label in self.labels
]
|
https://github.com/pandas-dev/pandas/issues/16064
|
import pandas as pd
df = pd.DataFrame(index=[0], columns=['a', 'b', 'c'])
df.groupby('a').nth(10)
Empty DataFrame
Columns: [b, c]
Index: []
df.groupby(['a', 'b']).nth(10)
Traceback (most recent call last):
File "<ipython-input-3-ae8299c3984e>", line 1, in <module>
df.groupby(['a', 'b']).nth(10)
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/groupby.py", line 1390, in nth
return out.sort_index() if self.sort else out
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/frame.py", line 3344, in sort_index
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/indexes/multi.py", line 1652, in _get_labels_for_sorting
for label in self.labels]
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/indexes/multi.py", line 1652, in <listcomp>
for label in self.labels]
File "~/anaconda3/lib/python3.5/site-packages/numpy/core/_methods.py", line 26, in _amax
return umr_maximum(a, axis, None, out, keepdims)
ValueError: zero-size array to reduction operation maximum which has no identity
|
ValueError
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
# 10726
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, pls use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(
labels.labels, orders=ascending, na_position=na_position
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=baxis, convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
# 10726
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, pls use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# sort by the index
if level is not None:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
if not labels.is_lexsorted():
labels = MultiIndex.from_tuples(labels.values)
indexer = lexsort_indexer(
labels.labels, orders=ascending, na_position=na_position
)
else:
from pandas.core.sorting import nargsort
# GH11080 - Check monotonic-ness before sort an index
# if monotonic (already sorted), return None or copy() according
# to 'inplace'
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
new_data = self._data.take(
indexer, axis=self._get_block_manager_axis(axis), convert=False, verify=False
)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/13431
|
df2.sort_index().loc['b':'d']
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
# a lot of lines omitted here.
/Users/yimengzh/miniconda/envs/cafferc3/lib/python2.7/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1488 raise KeyError('Key length (%d) was greater than MultiIndex'
1489 ' lexsort depth (%d)' %
-> 1490 (len(tup), self.lexsort_depth))
1491
1492 n = len(tup)
KeyError: 'Key length (1) was greater than MultiIndex lexsort depth (0)'
|
KeyError
|
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError("Incompatible index for Cython grouper")
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups, dummy)
result, counts = grouper.get_result()
return result, counts
|
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError("Incompatible index for Cython grouper")
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups, dummy)
result, counts = grouper.get_result()
return result, counts
|
https://github.com/pandas-dev/pandas/issues/13431
|
df2.sort_index().loc['b':'d']
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
# a lot of lines omitted here.
/Users/yimengzh/miniconda/envs/cafferc3/lib/python2.7/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1488 raise KeyError('Key length (%d) was greater than MultiIndex'
1489 ' lexsort depth (%d)' %
-> 1490 (len(tup), self.lexsort_depth))
1491
1492 n = len(tup)
KeyError: 'Key length (1) was greater than MultiIndex lexsort depth (0)'
|
KeyError
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
):
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
index = self.index
if level:
new_index, indexer = index.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(labels.labels, orders=ascending)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and index.is_monotonic_increasing) or (
not ascending and index.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
index, kind=kind, ascending=ascending, na_position=na_position
)
indexer = _ensure_platform_int(indexer)
new_index = index.take(indexer)
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
index = self.index
if level is not None:
new_index, indexer = index.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(index.labels, orders=ascending)
else:
from pandas.core.sorting import nargsort
indexer = nargsort(
index, kind=kind, ascending=ascending, na_position=na_position
)
indexer = _ensure_platform_int(indexer)
new_index = index.take(indexer)
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/13431
|
df2.sort_index().loc['b':'d']
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
# a lot of lines omitted here.
/Users/yimengzh/miniconda/envs/cafferc3/lib/python2.7/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1488 raise KeyError('Key length (%d) was greater than MultiIndex'
1489 ' lexsort depth (%d)' %
-> 1490 (len(tup), self.lexsort_depth))
1491
1492 n = len(tup)
KeyError: 'Key length (1) was greater than MultiIndex lexsort depth (0)'
|
KeyError
|
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import _factorize_from_iterables
from pandas.tools.util import cartesian_product
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels, labels, sortorder=sortorder, names=names)
|
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import _factorize_from_iterables
from pandas.tools.util import cartesian_product
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels=levels, labels=labels, sortorder=sortorder, names=names)
|
https://github.com/pandas-dev/pandas/issues/13431
|
df2.sort_index().loc['b':'d']
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
# a lot of lines omitted here.
/Users/yimengzh/miniconda/envs/cafferc3/lib/python2.7/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1488 raise KeyError('Key length (%d) was greater than MultiIndex'
1489 ' lexsort depth (%d)' %
-> 1490 (len(tup), self.lexsort_depth))
1491
1492 n = len(tup)
KeyError: 'Key length (1) was greater than MultiIndex lexsort depth (0)'
|
KeyError
|
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
"Key length (%d) was greater than MultiIndex"
" lexsort depth (%d)" % (len(tup), self.lexsort_depth)
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError("Level type mismatch: %s" % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
|
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise KeyError(
"Key length (%d) was greater than MultiIndex"
" lexsort depth (%d)" % (len(tup), self.lexsort_depth)
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError("Level type mismatch: %s" % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
|
https://github.com/pandas-dev/pandas/issues/13431
|
df2.sort_index().loc['b':'d']
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
# a lot of lines omitted here.
/Users/yimengzh/miniconda/envs/cafferc3/lib/python2.7/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1488 raise KeyError('Key length (%d) was greater than MultiIndex'
1489 ' lexsort depth (%d)' %
-> 1490 (len(tup), self.lexsort_depth))
1491
1492 n = len(tup)
KeyError: 'Key length (1) was greater than MultiIndex lexsort depth (0)'
|
KeyError
|
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if not self.skip_blank_lines and (
self._empty(self.data[self.pos - 1]) or line
):
break
elif self.skip_blank_lines:
ret = self._check_empty([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line()
line = self._check_comments([orig_line])[0]
self.pos += 1
if not self.skip_blank_lines and (self._empty(orig_line) or line):
break
elif self.skip_blank_lines:
ret = self._check_empty([line])
if ret:
line = ret[0]
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
|
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if not self.skip_blank_lines and (
self._empty(self.data[self.pos - 1]) or line
):
break
elif self.skip_blank_lines:
ret = self._check_empty([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
try:
orig_line = next(self.data)
except csv.Error as e:
msg = str(e)
if "NULL byte" in str(e):
msg = (
"NULL byte detected. This byte "
"cannot be processed in Python's "
"native csv library at the moment, "
"so please pass in engine='c' instead"
)
if self.skipfooter > 0:
reason = (
"Error could possibly be due to "
"parsing errors in the skipped footer rows "
"(the skipfooter keyword is only applied "
"after Python's csv library has parsed "
"all rows)."
)
msg += ". " + reason
raise csv.Error(msg)
line = self._check_comments([orig_line])[0]
self.pos += 1
if not self.skip_blank_lines and (self._empty(orig_line) or line):
break
elif self.skip_blank_lines:
ret = self._check_empty([line])
if ret:
line = ret[0]
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
|
https://github.com/pandas-dev/pandas/issues/15910
|
from pandas.compat import StringIO
pd.read_csv(StringIO('''Date,Value
1/1/2012,100.00
1/2/2012,102.00
"a quoted junk row"morejunk'''), skipfooter=1)
Out[21]
ERROR:root:An unexpected error occurred while tokenizing input
The following traceback may be corrupted or invalid
The error message is: ('EOF in multi-line string', (1, 20))
---------------------------------------------------------------------------
Error Traceback (most recent call last)
<ipython-input-34-d8dff6b9f4a7> in <module>()
2 1/1/2012,100.00
3 1/2/2012,102.00
----> 4 "a quoted junk row" '''), skipfooter=1)
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, skip_footer, doublequote, delim_whitespace, as_recarray, compact_ints, use_unsigned, low_memory, buffer_lines, memory_map, float_precision)
651 skip_blank_lines=skip_blank_lines)
652
--> 653 return _read(filepath_or_buffer, kwds)
654
655 parser_f.__name__ = name
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in _read(filepath_or_buffer, kwds)
404
405 try:
--> 406 data = parser.read()
407 finally:
408 parser.close()
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in read(self, nrows)
977 raise ValueError('skipfooter not supported for iteration')
978
--> 979 ret = self._engine.read(nrows)
980
981 if self.options.get('as_recarray'):
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in read(self, rows)
2066 def read(self, rows=None):
2067 try:
-> 2068 content = self._get_lines(rows)
2069 except StopIteration:
2070 if self._first_chunk:
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in _get_lines(self, rows)
2717 while True:
2718 try:
-> 2719 new_rows.append(next(source))
2720 rows += 1
2721 except csv.Error as inst:
Error: ',' expected after '"'
|
ValueError
|
def _get_lines(self, rows=None):
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos :]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos : self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
while True:
new_row = self._next_iter_line(row_num=self.pos + rows)
new_rows.append(new_row)
rows += 1
except StopIteration:
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[: -self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._check_empty(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
|
def _get_lines(self, rows=None):
source = self.data
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(source, list):
if self.pos > len(source):
raise StopIteration
if rows is None:
new_rows = source[self.pos :]
new_pos = len(source)
else:
new_rows = source[self.pos : self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(source))
lines.extend(new_rows)
else:
rows = 0
while True:
try:
new_rows.append(next(source))
rows += 1
except csv.Error as inst:
if "newline inside string" in str(inst):
row_num = str(self.pos + rows)
msg = "EOF inside string starting with line " + row_num
raise Exception(msg)
raise
except StopIteration:
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[: -self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._check_empty(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
|
https://github.com/pandas-dev/pandas/issues/15910
|
from pandas.compat import StringIO
pd.read_csv(StringIO('''Date,Value
1/1/2012,100.00
1/2/2012,102.00
"a quoted junk row"morejunk'''), skipfooter=1)
Out[21]
ERROR:root:An unexpected error occurred while tokenizing input
The following traceback may be corrupted or invalid
The error message is: ('EOF in multi-line string', (1, 20))
---------------------------------------------------------------------------
Error Traceback (most recent call last)
<ipython-input-34-d8dff6b9f4a7> in <module>()
2 1/1/2012,100.00
3 1/2/2012,102.00
----> 4 "a quoted junk row" '''), skipfooter=1)
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, skip_footer, doublequote, delim_whitespace, as_recarray, compact_ints, use_unsigned, low_memory, buffer_lines, memory_map, float_precision)
651 skip_blank_lines=skip_blank_lines)
652
--> 653 return _read(filepath_or_buffer, kwds)
654
655 parser_f.__name__ = name
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in _read(filepath_or_buffer, kwds)
404
405 try:
--> 406 data = parser.read()
407 finally:
408 parser.close()
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in read(self, nrows)
977 raise ValueError('skipfooter not supported for iteration')
978
--> 979 ret = self._engine.read(nrows)
980
981 if self.options.get('as_recarray'):
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in read(self, rows)
2066 def read(self, rows=None):
2067 try:
-> 2068 content = self._get_lines(rows)
2069 except StopIteration:
2070 if self._first_chunk:
C:\Users\chris.bartak\Documents\python-dev\pandas\pandas\io\parsers.py in _get_lines(self, rows)
2717 while True:
2718 try:
-> 2719 new_rows.append(next(source))
2720 rows += 1
2721 except csv.Error as inst:
Error: ',' expected after '"'
|
ValueError
|
def scatter_matrix(
frame,
alpha=0.5,
figsize=None,
ax=None,
grid=False,
diagonal="hist",
marker=".",
density_kwds=None,
hist_kwds=None,
range_padding=0.05,
**kwds,
):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault("edgecolors", "none")
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == "hist":
ax.hist(values, **hist_kwds)
elif diagonal in ("kde", "density"):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(
df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds
)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
|
def scatter_matrix(
frame,
alpha=0.5,
figsize=None,
ax=None,
grid=False,
diagonal="hist",
marker=".",
density_kwds=None,
hist_kwds=None,
range_padding=0.05,
**kwds,
):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault("c", plt.rcParams["patch.facecolor"])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == "hist":
ax.hist(values, **hist_kwds)
elif diagonal in ("kde", "density"):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(
df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds
)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
|
https://github.com/pandas-dev/pandas/issues/14855
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-f3d30057e124> in <module>()
14 columns=iris.feature_names)
15 scatterplot = pd.scatter_matrix(dataframe, alpha=0.3,
---> 16 figsize=(10, 10), diagonal='hist', color=colors, marker='o', grid=True)
/Users/e/anaconda/lib/python3.5/site-packages/pandas/tools/plotting.py in scatter_matrix(frame, alpha, figsize, ax, grid, diagonal, marker, density_kwds, hist_kwds, range_padding, **kwds)
393
394 ax.scatter(df[b][common], df[a][common],
--> 395 marker=marker, alpha=alpha, **kwds)
396
397 ax.set_xlim(boundaries_list[j])
/Users/e/anaconda/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax, *args, **kwargs)
1817 warnings.warn(msg % (label_namer, func.__name__),
1818 RuntimeWarning, stacklevel=2)
-> 1819 return func(ax, *args, **kwargs)
1820 pre_doc = inner.__doc__
1821 if pre_doc is None:
/Users/e/anaconda/lib/python3.5/site-packages/matplotlib/axes/_axes.py in scatter(self, x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, edgecolors, **kwargs)
3787 facecolors = co
3788 if c is not None:
-> 3789 raise ValueError("Supply a 'c' kwarg or a 'color' kwarg"
3790 " but not both; they differ but"
3791 " their functionalities overlap.")
ValueError: Supply a 'c' kwarg or a 'color' kwarg but not both; they differ but their functionalities overlap.
|
ValueError
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault("edgecolors", "none")
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwargs.setdefault("c", plt.rcParams["patch.facecolor"])
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
|
https://github.com/pandas-dev/pandas/issues/14855
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-f3d30057e124> in <module>()
14 columns=iris.feature_names)
15 scatterplot = pd.scatter_matrix(dataframe, alpha=0.3,
---> 16 figsize=(10, 10), diagonal='hist', color=colors, marker='o', grid=True)
/Users/e/anaconda/lib/python3.5/site-packages/pandas/tools/plotting.py in scatter_matrix(frame, alpha, figsize, ax, grid, diagonal, marker, density_kwds, hist_kwds, range_padding, **kwds)
393
394 ax.scatter(df[b][common], df[a][common],
--> 395 marker=marker, alpha=alpha, **kwds)
396
397 ax.set_xlim(boundaries_list[j])
/Users/e/anaconda/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax, *args, **kwargs)
1817 warnings.warn(msg % (label_namer, func.__name__),
1818 RuntimeWarning, stacklevel=2)
-> 1819 return func(ax, *args, **kwargs)
1820 pre_doc = inner.__doc__
1821 if pre_doc is None:
/Users/e/anaconda/lib/python3.5/site-packages/matplotlib/axes/_axes.py in scatter(self, x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, edgecolors, **kwargs)
3787 facecolors = co
3788 if c is not None:
-> 3789 raise ValueError("Supply a 'c' kwarg or a 'color' kwarg"
3790 " but not both; they differ but"
3791 " their functionalities overlap.")
ValueError: Supply a 'c' kwarg or a 'color' kwarg but not both; they differ but their functionalities overlap.
|
ValueError
|
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
v = [v]
v = set(v) | _NA_VALUES
na_values[k] = v
na_fvalues = dict(
[
(k, _floatify_na_values(v))
for k, v in na_values.items() # noqa
]
)
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
|
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = []
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
v = [v]
v = set(v) | _NA_VALUES
na_values[k] = v
na_fvalues = dict(
[
(k, _floatify_na_values(v))
for k, v in na_values.items() # noqa
]
)
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
|
https://github.com/pandas-dev/pandas/issues/15835
|
Traceback (most recent call last):
File "/home/alexey.lisicyn/testPand.py", line 22, in <module>
header_v = read_excel(file, **pre_kwargs).columns.values
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/excel.py", line 170, in read_excel
skip_footer=skip_footer, converters=converters, **kwds)
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/excel.py", line 438, in _parse_excel
output[asheetname] = parser.read()
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/parsers.py", line 747, in read
ret = self._engine.read(nrows)
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/parsers.py", line 1611, in read
index, columns = self._make_index(data, alldata, columns, indexnamerow)
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/parsers.py", line 920, in _make_index
index = self._agg_index(index)
File "/tmp/opt/linux-CentOS_4.4-x64/P7/python-2.7.7-dbg/lib/python2.7/site-packages/pandas/io/parsers.py", line 1012, in _agg_index
arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
TypeError: unsupported operand type(s) for |: 'list' and 'set'
|
TypeError
|
def asof(self, where, subset=None):
"""
The last row without any NaN is taken (or the last row without
NaN considering only the subset of columns in the case of a DataFrame)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array of dates
subset : string or list of strings, default None
if not None use these columns for NaN propagation
Notes
-----
Dates are assumed to be sorted
Raises if this is not the case
Returns
-------
where is scalar
- value or NaN if input is Series
- Series if input is DataFrame
where is Index: same shape object as input
See Also
--------
merge_asof
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError(
"asof is not implemented for {type}".format(type(self))
)
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isnull(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isnull() if is_series else self[subset].isnull().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
|
def asof(self, where, subset=None):
"""
The last row without any NaN is taken (or the last row without
NaN considering only the subset of columns in the case of a DataFrame)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array of dates
subset : string or list of strings, default None
if not None use these columns for NaN propagation
Notes
-----
Dates are assumed to be sorted
Raises if this is not the case
Returns
-------
where is scalar
- value or NaN if input is Series
- Series if input is DataFrame
where is Index: same shape object as input
See Also
--------
merge_asof
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError(
"asof is not implemented for {type}".format(type(self))
)
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isnull(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isnull() if is_series else self[subset].isnull().any(1)
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
|
https://github.com/pandas-dev/pandas/issues/15713
|
import pandas as pd
import numpy as np
pd.Series([np.nan]).asof([0])
Traceback (most recent call last):
File "<ipython-input-268-2b0f8084a393>", line 1, in <module>
pd.Series([np.nan]).asof([0])
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/generic.py", line 3985, in asof
locs = self.index.asof_locs(where, ~(nulls.values))
File "~/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 1837, in asof_locs
result = np.arange(len(self))[mask].take(locs)
IndexError: cannot do a non-empty take from an empty axes.
|
IndexError
|
def cut(
x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False
):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int or sequence of scalars
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int
The precision at which to store and display the bins labels
include_lowest : bool
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
([(0.191, 3.367], (0.191, 3.367], (0.191, 3.367], (3.367, 6.533],
(6.533, 9.7], (0.191, 3.367]]
Categories (3, object): [(0.191, 3.367] < (3.367, 6.533] < (6.533, 9.7]],
array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ]))
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3,
labels=["good","medium","bad"])
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1], dtype=int64)
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
sz = x.size
if sz == 0:
raise ValueError("Cannot cut empty array")
# handle empty arrays. Can't determine range, so use 0-1.
# rng = (0, 1)
else:
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn) if mn != 0 else 0.001
mx += 0.001 * abs(mx) if mx != 0 else 0.001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins)
if (np.diff(bins) < 0).any():
raise ValueError("bins must increase monotonically.")
fac, bins = _bins_to_cuts(
x,
bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
)
return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name)
|
def cut(
x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False
):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int or sequence of scalars
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int
The precision at which to store and display the bins labels
include_lowest : bool
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
([(0.191, 3.367], (0.191, 3.367], (0.191, 3.367], (3.367, 6.533],
(6.533, 9.7], (0.191, 3.367]]
Categories (3, object): [(0.191, 3.367] < (3.367, 6.533] < (6.533, 9.7]],
array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ]))
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3,
labels=["good","medium","bad"])
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1], dtype=int64)
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
sz = x.size
if sz == 0:
raise ValueError("Cannot cut empty array")
# handle empty arrays. Can't determine range, so use 0-1.
# rng = (0, 1)
else:
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn)
mx += 0.001 * abs(mx)
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins)
if (np.diff(bins) < 0).any():
raise ValueError("bins must increase monotonically.")
fac, bins = _bins_to_cuts(
x,
bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
)
return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name)
|
https://github.com/pandas-dev/pandas/issues/15428
|
import pandas as pd
pd.__version__
u'0.19.2'
s = pd.Series([0,0,0])
pd.cut(s, 1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/luca/.local/lib/python2.7/site-packages/pandas/tools/tile.py", line 119, in cut
include_lowest=include_lowest)
File "/home/luca/.local/lib/python2.7/site-packages/pandas/tools/tile.py", line 194, in _bins_to_cuts
raise ValueError('Bin edges must be unique: %s' % repr(bins))
ValueError: Bin edges must be unique: array([ 0., 0.])
s = pd.Series([-1,-1,-1])
pd.cut(s, 1)
0 (-1.001, -0.999]
1 (-1.001, -0.999]
2 (-1.001, -0.999]
s = pd.Series([1,1,1])
pd.cut(s, 1)
0 (0.999, 1.001]
1 (0.999, 1.001]
2 (0.999, 1.001]
|
ValueError
|
def _bins_to_cuts(
x,
bins,
right=True,
labels=None,
precision=3,
include_lowest=False,
dtype=None,
duplicates="raise",
):
if duplicates not in ["raise", "drop"]:
raise ValueError(
"invalid value for 'duplicates' parameter, valid options are: raise, drop"
)
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == "raise":
raise ValueError(
"Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins))
)
else:
bins = unique_bins
side = "left" if right else "right"
ids = bins.searchsorted(x, side=side)
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isnull(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
increases = 0
while True:
try:
levels = _format_levels(
bins,
precision,
right=right,
include_lowest=include_lowest,
dtype=dtype,
)
except ValueError:
increases += 1
precision += 1
if increases >= 20:
raise
else:
break
else:
if len(labels) != len(bins) - 1:
raise ValueError(
"Bin labels must be one fewer than the number of bin edges"
)
levels = labels
levels = np.asarray(levels, dtype=object)
np.putmask(ids, na_mask, 0)
fac = Categorical(ids - 1, levels, ordered=True, fastpath=True)
else:
fac = ids - 1
if has_nas:
fac = fac.astype(np.float64)
np.putmask(fac, na_mask, np.nan)
return fac, bins
|
def _bins_to_cuts(
x,
bins,
right=True,
labels=None,
precision=3,
include_lowest=False,
dtype=None,
duplicates="raise",
):
if duplicates not in ["raise", "drop"]:
raise ValueError(
"invalid value for 'duplicates' parameter, valid options are: raise, drop"
)
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins):
if duplicates == "raise":
raise ValueError(
"Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins))
)
else:
bins = unique_bins
side = "left" if right else "right"
ids = bins.searchsorted(x, side=side)
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isnull(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
increases = 0
while True:
try:
levels = _format_levels(
bins,
precision,
right=right,
include_lowest=include_lowest,
dtype=dtype,
)
except ValueError:
increases += 1
precision += 1
if increases >= 20:
raise
else:
break
else:
if len(labels) != len(bins) - 1:
raise ValueError(
"Bin labels must be one fewer than the number of bin edges"
)
levels = labels
levels = np.asarray(levels, dtype=object)
np.putmask(ids, na_mask, 0)
fac = Categorical(ids - 1, levels, ordered=True, fastpath=True)
else:
fac = ids - 1
if has_nas:
fac = fac.astype(np.float64)
np.putmask(fac, na_mask, np.nan)
return fac, bins
|
https://github.com/pandas-dev/pandas/issues/15428
|
import pandas as pd
pd.__version__
u'0.19.2'
s = pd.Series([0,0,0])
pd.cut(s, 1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/luca/.local/lib/python2.7/site-packages/pandas/tools/tile.py", line 119, in cut
include_lowest=include_lowest)
File "/home/luca/.local/lib/python2.7/site-packages/pandas/tools/tile.py", line 194, in _bins_to_cuts
raise ValueError('Bin edges must be unique: %s' % repr(bins))
ValueError: Bin edges must be unique: array([ 0., 0.])
s = pd.Series([-1,-1,-1])
pd.cut(s, 1)
0 (-1.001, -0.999]
1 (-1.001, -0.999]
2 (-1.001, -0.999]
s = pd.Series([1,1,1])
pd.cut(s, 1)
0 (0.999, 1.001]
1 (0.999, 1.001]
2 (0.999, 1.001]
|
ValueError
|
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
|
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", **kwargs)
|
https://github.com/pandas-dev/pandas/issues/15561
|
import pandas as pd
x = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
x.groupby('a').b.cummax()
Traceback (most recent call last):
File "<ipython-input-9-316257648d5f>", line 1, in <module>
x.groupby('a').b.cummax()
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/groupby.py", line 1454, in cummax
return self._cython_transform('cummax', **kwargs)
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/groupby.py", line 806, in _cython_transform
raise DataError('No numeric types to aggregate')
DataError: No numeric types to aggregate
|
DataError
|
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
|
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", **kwargs)
|
https://github.com/pandas-dev/pandas/issues/15561
|
import pandas as pd
x = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
x.groupby('a').b.cummax()
Traceback (most recent call last):
File "<ipython-input-9-316257648d5f>", line 1, in <module>
x.groupby('a').b.cummax()
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/groupby.py", line 1454, in cummax
return self._cython_transform('cummax', **kwargs)
File "~/anaconda3/lib/python3.5/site-packages/pandas/core/groupby.py", line 806, in _cython_transform
raise DataError('No numeric types to aggregate')
DataError: No numeric types to aggregate
|
DataError
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and len(data) is 0:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
meta_keys = [".".join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"%s is not always present",
e,
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name %s, need distinguishing prefix " % k
)
result[k] = np.array(v).repeat(lengths)
return result
|
def json_normalize(
data,
record_path=None,
meta=None,
meta_prefix=None,
record_prefix=None,
errors="raise",
):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
meta_keys = [".".join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with "
"errors='ignore' as key "
"%s is not always present",
e,
)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
"Conflicting metadata name %s, need distinguishing prefix " % k
)
result[k] = np.array(v).repeat(lengths)
return result
|
https://github.com/pandas-dev/pandas/issues/15534
|
In[21]: pandas.io.json.json_normalize([])
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-21-1bd834af8a9b> in <module>()
----> 1 pandas.io.json.json_normalize([])
/usr/local/lib/python3.6/site-packages/pandas/io/json.py in json_normalize(data, record_path, meta, meta_prefix, record_prefix)
791
792 if record_path is None:
--> 793 if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
794 # naive normalization, this is idempotent for flat records
795 # and potentially will inflate the data considerably for
IndexError: list index out of range
|
IndexError
|
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = "level_%d" % count
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = lmap(compat.text_type, index_names) + lmap(
compat.text_type, self.columns
)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
|
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = "level_%d" % count
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = lmap(str, index_names) + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])
return np.rec.fromarrays(arrays, dtype=dtype, names=names)
|
https://github.com/pandas-dev/pandas/issues/11879
|
In [322]: df = pandas.DataFrame({u'c/\u03c3':[1,2,3]})
In [323]: df
Out[323]:
c/σ
0 1
1 2
2 3
In [324]: df.to_records()
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-324-6d3142e97d2d> in <module>()
----> 1 df.to_records()
/redacted/python2.7/site-packages/pandas/core/frame.pyc in to_records(self, index, convert_datetime64)
1013 elif index_names[0] is None:
1014 index_names = ['index']
-> 1015 names = index_names + lmap(str, self.columns)
1016 else:
1017 arrays = [self[c].get_values() for c in self.columns]
UnicodeEncodeError: 'ascii' codec can't encode character u'\u03c3' in position 2: ordinal not in range(128)
|
UnicodeEncodeError
|
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError(
"Indexing a MultiIndex with a DataFrame key is not implemented"
)
elif hasattr(key, "ndim") and key.ndim > 1:
raise NotImplementedError(
"Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented"
)
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
|
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
if isinstance(key, ABCSeries):
# GH 14730
key = list(key)
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/15424
|
In [2]: d = pd.DataFrame([[1, 1, 3],
...: [1, 2, 4],
...: [2, 2, 5]], columns=['a', 'b', 'c'])
In [3]: d.set_index(['a', 'b']).loc[pd.Series([1, 2])]
Out[3]:
c
a b
1 1 3
2 4
2 2 5
In [4]: d.set_index(['a', 'b']).loc[pd.Series([1, 2], index=[2,0])]
Out[4]:
c
a b
1 1 3
2 4
2 2 5
In [5]: d.set_index(['a', 'b']).loc[pd.Series([1, 2], index=[2,3])]
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-5-a23ad44827f6> in <module>()
----> 1 d.set_index(['a', 'b']).loc[pd.Series([1, 2], index=[2,3])]
/home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key)
1339 else:
1340 key = com._apply_if_callable(key, self.obj)
-> 1341 return self._getitem_axis(key, axis=0)
1342
1343 def _is_scalar_access(self, key):
/home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1527 if isinstance(labels, MultiIndex):
1528 if (not isinstance(key, tuple) and len(key) > 1 and
-> 1529 not isinstance(key[0], tuple)):
1530 if isinstance(key, ABCSeries):
1531 # GH 14730
/home/pietro/nobackup/repo/pandas/pandas/core/series.py in __getitem__(self, key)
604 key = com._apply_if_callable(key, self)
605 try:
--> 606 result = self.index.get_value(self, key)
607
608 if not is_scalar(result):
/home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in get_value(self, series, key)
2304 try:
2305 return self._engine.get_value(s, k,
-> 2306 tz=getattr(series.dtype, 'tz', None))
2307 except KeyError as e1:
2308 if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
/home/pietro/nobackup/repo/pandas/pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3992)()
/home/pietro/nobackup/repo/pandas/pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3689)()
/home/pietro/nobackup/repo/pandas/pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:4688)()
/home/pietro/nobackup/repo/pandas/pandas/src/hashtable_class_helper.pxi in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:13370)()
/home/pietro/nobackup/repo/pandas/pandas/src/hashtable_class_helper.pxi in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:13308)()
KeyError: 0
|
KeyError
|
def _cython_operation(self, kind, values, how, axis):
assert kind in ["transform", "aggregate"]
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError("categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ["add", "prod", "cumsum", "cumprod"]:
raise NotImplementedError(
"datetime64 type does not support {} operations".format(how)
)
elif is_timedelta64_dtype(values):
if how in ["prod", "cumprod"]:
raise NotImplementedError(
"timedelta64 type does not support {} operations".format(how)
)
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == tslib.iNaT).any():
values = _ensure_float64(values)
else:
values = values.astype("int64", copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = "%s%d" % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = "object"
labels, _, _ = self.group_info
if kind == "aggregate":
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype), fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric, is_datetimelike
)
elif kind == "transform":
result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan)
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike
)
if is_integer_dtype(result):
mask = result == tslib.iNaT
if mask.any():
result = result.astype("float64")
result[mask] = np.nan
if kind == "aggregate" and self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result), (counts > 0).view(np.uint8)
)
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
|
def _cython_operation(self, kind, values, how, axis):
assert kind in ["transform", "aggregate"]
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError("categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ["add", "prod", "cumsum", "cumprod"]:
raise NotImplementedError(
"datetime64 type does not support {} operations".format(how)
)
elif is_timedelta64_dtype(values):
if how in ["prod", "cumprod"]:
raise NotImplementedError(
"timedelta64 type does not support {} operations".format(how)
)
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
values = values.astype("int64", copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = "%s%d" % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = "object"
labels, _, _ = self.group_info
if kind == "aggregate":
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype), fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, labels, func, is_numeric)
elif kind == "transform":
result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(result, accum, values, labels, func, is_numeric)
if is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype("float64")
result[result == tslib.iNaT] = np.nan
if kind == "aggregate" and self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result), (counts > 0).view(np.uint8)
)
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
|
https://github.com/pandas-dev/pandas/issues/15109
|
(pandas) bash-3.2$ nosetests pandas/tests/groupby/test_groupby.py -s -m cummin_cummax
F
======================================================================
FAIL: test_cummin_cummax (test_groupby.TestGroupBy)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jreback/pandas/pandas/tests/groupby/test_groupby.py", line 5798, in test_cummin_cummax
tm.assert_frame_equal(result, expected)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1313, in assert_frame_equal
obj='DataFrame.iloc[:, {0}]'.format(i))
File "/Users/jreback/pandas/pandas/util/testing.py", line 1154, in assert_series_equal
assert_attr_equal('dtype', left, right)
File "/Users/jreback/pandas/pandas/util/testing.py", line 878, in assert_attr_equal
left_attr, right_attr)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1018, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: float64
[right]: int64
----------------------------------------------------------------------
Ran 1 test in 0.046s
FAILED (failures=1)
|
AssertionError
|
def _aggregate(
self, result, counts, values, comp_ids, agg_func, is_numeric, is_datetimelike
):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
|
def _aggregate(self, result, counts, values, comp_ids, agg_func, is_numeric):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
|
https://github.com/pandas-dev/pandas/issues/15109
|
(pandas) bash-3.2$ nosetests pandas/tests/groupby/test_groupby.py -s -m cummin_cummax
F
======================================================================
FAIL: test_cummin_cummax (test_groupby.TestGroupBy)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jreback/pandas/pandas/tests/groupby/test_groupby.py", line 5798, in test_cummin_cummax
tm.assert_frame_equal(result, expected)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1313, in assert_frame_equal
obj='DataFrame.iloc[:, {0}]'.format(i))
File "/Users/jreback/pandas/pandas/util/testing.py", line 1154, in assert_series_equal
assert_attr_equal('dtype', left, right)
File "/Users/jreback/pandas/pandas/util/testing.py", line 878, in assert_attr_equal
left_attr, right_attr)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1018, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: float64
[right]: int64
----------------------------------------------------------------------
Ran 1 test in 0.046s
FAILED (failures=1)
|
AssertionError
|
def _transform(
self, result, values, comp_ids, transform_func, is_numeric, is_datetimelike
):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values, comp_ids, is_datetimelike)
else:
transform_func(result, values, comp_ids, is_datetimelike)
return result
|
def _transform(self, result, accum, values, comp_ids, transform_func, is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values, comp_ids, accum)
else:
transform_func(result, values, comp_ids, accum)
return result
|
https://github.com/pandas-dev/pandas/issues/15109
|
(pandas) bash-3.2$ nosetests pandas/tests/groupby/test_groupby.py -s -m cummin_cummax
F
======================================================================
FAIL: test_cummin_cummax (test_groupby.TestGroupBy)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jreback/pandas/pandas/tests/groupby/test_groupby.py", line 5798, in test_cummin_cummax
tm.assert_frame_equal(result, expected)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1313, in assert_frame_equal
obj='DataFrame.iloc[:, {0}]'.format(i))
File "/Users/jreback/pandas/pandas/util/testing.py", line 1154, in assert_series_equal
assert_attr_equal('dtype', left, right)
File "/Users/jreback/pandas/pandas/util/testing.py", line 878, in assert_attr_equal
left_attr, right_attr)
File "/Users/jreback/pandas/pandas/util/testing.py", line 1018, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: float64
[right]: int64
----------------------------------------------------------------------
Ran 1 test in 0.046s
FAILED (failures=1)
|
AssertionError
|
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r"takes (no|(exactly|at (least|most)) ?\d+) arguments?"
else:
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
|
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError):
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
|
https://github.com/pandas-dev/pandas/issues/15055
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-26-bc58c5c60bd2> in <module>()
----> 1 s.str.replace(r'[aeiou]', lambda m: m.group().upper())
/anaconda3/lib/python3.4/site-packages/pandas/core/strings.py in replace(self, pat, repl, n, case, flags)
1504 def replace(self, pat, repl, n=-1, case=True, flags=0):
1505 result = str_replace(self._data, pat, repl, n=n, case=case,
-> 1506 flags=flags)
1507 return self._wrap_result(result)
1508
/anaconda3/lib/python3.4/site-packages/pandas/core/strings.py in str_replace(arr, pat, repl, n, case, flags)
320 # Check whether repl is valid (GH 13438)
321 if not is_string_like(repl):
--> 322 raise TypeError("repl must be a string")
323 use_re = not case or len(pat) > 1 or flags
324
TypeError: repl must be a string
|
TypeError
|
def str_replace(arr, pat, repl, n=-1, case=True, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string
Character sequence or regular expression
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
replaced : Series/Index of objects
Examples
--------
When ``repl`` is a string, every ``pat`` is replaced as with
:meth:`str.replace`. NaN value(s) in the Series are left as is.
>>> Series(['foo', 'fuz', np.nan]).str.replace('f', 'b')
0 boo
1 buz
2 NaN
dtype: object
When ``repl`` is a callable, it is called on every ``pat`` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups:
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> Series(['Foo Bar Baz', np.nan]).str.replace(pat, repl)
0 bAR
1 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
use_re = not case or len(pat) > 1 or flags or callable(repl)
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
def f(x):
return regex.sub(repl, x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
|
def str_replace(arr, pat, repl, n=-1, case=True, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string
Character sequence or regular expression
repl : string
Replacement sequence
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
replaced : Series/Index of objects
"""
# Check whether repl is valid (GH 13438)
if not is_string_like(repl):
raise TypeError("repl must be a string")
use_re = not case or len(pat) > 1 or flags
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
def f(x):
return regex.sub(repl, x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
|
https://github.com/pandas-dev/pandas/issues/15055
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-26-bc58c5c60bd2> in <module>()
----> 1 s.str.replace(r'[aeiou]', lambda m: m.group().upper())
/anaconda3/lib/python3.4/site-packages/pandas/core/strings.py in replace(self, pat, repl, n, case, flags)
1504 def replace(self, pat, repl, n=-1, case=True, flags=0):
1505 result = str_replace(self._data, pat, repl, n=n, case=case,
-> 1506 flags=flags)
1507 return self._wrap_result(result)
1508
/anaconda3/lib/python3.4/site-packages/pandas/core/strings.py in str_replace(arr, pat, repl, n, case, flags)
320 # Check whether repl is valid (GH 13438)
321 if not is_string_like(repl):
--> 322 raise TypeError("repl must be a string")
323 use_re = not case or len(pat) > 1 or flags
324
TypeError: repl must be a string
|
TypeError
|
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if self.is_datetimelike and isinstance(
self.window, (compat.string_types, DateOffset)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented for datetimelike and offset based windows"
)
# this will raise ValueError on non-fixed freqs
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
|
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if self.is_datetimelike and isinstance(
self.window, (compat.string_types, DateOffset)
):
# must be monotonic for on
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be monotonic".format(formatted))
from pandas.tseries.frequencies import to_offset
try:
freq = to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} in not compat with a datetimelike index".format(
self.window
)
)
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented for datetimelike and offset based windows"
)
# this will raise ValueError on non-fixed freqs
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
|
https://github.com/pandas-dev/pandas/issues/15130
|
In [7]: dates_df.groupby('name').rolling('180D', on='date')['amount'].sum()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-8896cb99a66a> in <module>()
----> 1 dates_df.groupby('name').rolling('180D', on='date')['amount'].sum()
/Users/jreback/pandas/pandas/core/groupby.py in rolling(self, *args, **kwargs)
1148 """
1149 from pandas.core.window import RollingGroupby
-> 1150 return RollingGroupby(self, *args, **kwargs)
1151
1152 @Substitution(name='groupby')
/Users/jreback/pandas/pandas/core/window.py in __init__(self, obj, *args, **kwargs)
635 self._groupby.mutated = True
636 self._groupby.grouper.mutated = True
--> 637 super(GroupByMixin, self).__init__(obj, *args, **kwargs)
638
639 count = GroupByMixin._dispatch('count')
/Users/jreback/pandas/pandas/core/window.py in __init__(self, obj, window, min_periods, freq, center, win_type, axis, on, **kwargs)
76 self.win_type = win_type
77 self.axis = obj._get_axis_number(axis) if axis is not None else None
---> 78 self.validate()
79
80 @property
/Users/jreback/pandas/pandas/core/window.py in validate(self)
1030 formatted = self.on or 'index'
1031 raise ValueError("{0} must be "
-> 1032 "monotonic".format(formatted))
1033
1034 from pandas.tseries.frequencies import to_offset
ValueError: date must be monotonic
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.