after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
|
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if isscalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
|
https://github.com/pandas-dev/pandas/issues/15130
|
In [7]: dates_df.groupby('name').rolling('180D', on='date')['amount'].sum()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-8896cb99a66a> in <module>()
----> 1 dates_df.groupby('name').rolling('180D', on='date')['amount'].sum()
/Users/jreback/pandas/pandas/core/groupby.py in rolling(self, *args, **kwargs)
1148 """
1149 from pandas.core.window import RollingGroupby
-> 1150 return RollingGroupby(self, *args, **kwargs)
1151
1152 @Substitution(name='groupby')
/Users/jreback/pandas/pandas/core/window.py in __init__(self, obj, *args, **kwargs)
635 self._groupby.mutated = True
636 self._groupby.grouper.mutated = True
--> 637 super(GroupByMixin, self).__init__(obj, *args, **kwargs)
638
639 count = GroupByMixin._dispatch('count')
/Users/jreback/pandas/pandas/core/window.py in __init__(self, obj, window, min_periods, freq, center, win_type, axis, on, **kwargs)
76 self.win_type = win_type
77 self.axis = obj._get_axis_number(axis) if axis is not None else None
---> 78 self.validate()
79
80 @property
/Users/jreback/pandas/pandas/core/window.py in validate(self)
1030 formatted = self.on or 'index'
1031 raise ValueError("{0} must be "
-> 1032 "monotonic".format(formatted))
1033
1034 from pandas.tseries.frequencies import to_offset
ValueError: date must be monotonic
|
ValueError
|
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
# we have a mi columns, so read an extra line
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = True
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
"Passed header=%s but only %d lines in file"
% (hr, self.line_pos + 1)
)
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns
if not self.names:
raise EmptyDataError("No columns to parse from file")
line = self.names[:]
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == "":
if have_mi_columns:
this_columns.append("Unnamed: %d_level_%d" % (i, level))
else:
this_columns.append("Unnamed: %d" % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = {}
for i, col in enumerate(this_columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
this_columns[i] = "%s.%d" % (col, cur_count)
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = len(self.index_col) if self.index_col is not None else 0
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if (self.usecols is not None and len(names) != len(self.usecols)) or (
self.usecols is None and len(names) != len(columns[0])
):
raise ValueError(
"Number of passed names did not match "
"number of header fields in the file"
)
if len(columns) > 1:
raise TypeError("Cannot pass names with multi-index columns")
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError("No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [["%s%d" % (self.prefix, i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if not callable(self.usecols) and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
|
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
# we have a mi columns, so read an extra line
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = True
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
"Passed header=%s but only %d lines in file"
% (hr, self.line_pos + 1)
)
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns
if not self.names:
raise EmptyDataError("No columns to parse from file")
line = self.names[:]
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == "":
if have_mi_columns:
this_columns.append("Unnamed: %d_level_%d" % (i, level))
else:
this_columns.append("Unnamed: %d" % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = {}
for i, col in enumerate(this_columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
this_columns[i] = "%s.%d" % (col, cur_count)
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = len(self.index_col) if self.index_col is not None else 0
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if (self.usecols is not None and len(names) != len(self.usecols)) or (
self.usecols is None and len(names) != len(columns[0])
):
raise ValueError(
"Number of passed names did not match "
"number of header fields in the file"
)
if len(columns) > 1:
raise TypeError("Cannot pass names with multi-index columns")
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError("No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [["%s%d" % (self.prefix, i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) == num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if self.usecols and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
|
https://github.com/pandas-dev/pandas/issues/6710
|
In [98]: mydata='1,2,3\n1,2\n1,2\n'
In [99]: pd.read_csv(StringIO(mydata), names=['a', 'b', 'c'], usecols=['a', 'c'])
Out[99]:
a c
0 1 3
1 1 NaN
2 1 NaN
[3 rows x 2 columns]
In [100]: mydata='1,2\n1,2,3\n4,5,6\n'
In [101]: pd.read_csv(StringIO(mydata), names=['a', 'b', 'c'], usecols=['a', 'c'])
---------------------------------------------------------------------------
CParserError Traceback (most recent call last)
<ipython-input-101-f60127771eeb> in <module>()
----> 1 pd.read_csv(StringIO(mydata), names=['a', 'b', 'c'], usecols=['a', 'c'])
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/io/parsers.pyc in parser_f(filepath_or_buffer, sep, dialect, compression, doublequote, escapechar, quotechar, quoting, skipinitialspace, lineterminator, header, index_col, names, prefix, skiprows, skipfooter, skip_footer, na_values, na_fvalues, true_values, false_values, delimiter, converters, dtype, usecols, engine, delim_whitespace, as_recarray, na_filter, compact_ints, use_unsigned, low_memory, buffer_lines, warn_bad_lines, error_bad_lines, keep_default_na, thousands, comment, decimal, parse_dates, keep_date_col, dayfirst, date_parser, memory_map, nrows, iterator, chunksize, verbose, encoding, squeeze, mangle_dupe_cols, tupleize_cols, infer_datetime_format)
418 infer_datetime_format=infer_datetime_format)
419
--> 420 return _read(filepath_or_buffer, kwds)
421
422 parser_f.__name__ = name
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/io/parsers.pyc in _read(filepath_or_buffer, kwds)
223 return parser
224
--> 225 return parser.read()
226
227 _parser_defaults = {
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/io/parsers.pyc in read(self, nrows)
624 raise ValueError('skip_footer not supported for iteration')
625
--> 626 ret = self._engine.read(nrows)
627
628 if self.options.get('as_recarray'):
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/io/parsers.pyc in read(self, nrows)
1068
1069 try:
-> 1070 data = self._reader.read(nrows)
1071 except StopIteration:
1072 if nrows is None:
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/parser.so in pandas.parser.TextReader.read (pandas/parser.c:6866)()
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/parser.so in pandas.parser.TextReader._read_low_memory (pandas/parser.c:7086)()
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/parser.so in pandas.parser.TextReader._read_rows (pandas/parser.c:7691)()
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/parser.so in pandas.parser.TextReader._tokenize_rows (pandas/parser.c:7575)()
/home/altaurog/venv/p27/local/lib/python2.7/site-packages/pandas/parser.so in pandas.parser.raise_parser_error (pandas/parser.c:19038)()
CParserError: Error tokenizing data. C error: Expected 2 fields in line 2, saw 3
In [102]: pd.show_versions()
INSTALLED VERSIONS
------------------
commit: None
python: 2.7.3.final.0
python-bits: 64
OS: Linux
OS-release: 3.2.0-4-amd64
machine: x86_64
processor:
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
pandas: 0.13.1
Cython: 0.20
numpy: 1.8.0
scipy: None
statsmodels: None
IPython: 1.2.1
sphinx: 1.1.3
patsy: None
scikits.timeseries: None
dateutil: 1.5
pytz: 2012c
bottleneck: 0.8.0
tables: 3.1.0
numexpr: 2.3.1
matplotlib: 1.3.1
openpyxl: None
xlrd: None
xlwt: 0.7.4
xlsxwriter: None
sqlalchemy: None
lxml: None
bs4: None
html5lib: 0.999
bq: None
apiclient: None
|
CParserError
|
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
if isinstance(key, ABCSeries):
# GH 14730
key = list(key)
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
|
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if (
not isinstance(key, tuple)
and len(key) > 1
and not isinstance(key[0], tuple)
):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
|
https://github.com/pandas-dev/pandas/issues/14730
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/indexing.py in _get_label(self, label, axis)
94 try:
---> 95 return self.obj._xs(label, axis=axis)
96 except:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/generic.py in xs(self, key, axis, level, drop_level)
1776 loc, new_index = self.index.get_loc_level(key,
-> 1777 drop_level=drop_level)
1778 else:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_loc_level(self, key, level, drop_level)
1795 else:
-> 1796 return partial_selection(key)
1797 else:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in partial_selection(key, indexer)
1761 if indexer is None:
-> 1762 indexer = self.get_loc(key)
1763 ilevels = [i for i in range(len(key))
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_loc(self, key, method)
1671 start, stop = (self.slice_locs(lead_key, lead_key)
-> 1672 if lead_key else (0, len(self)))
1673
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in slice_locs(self, start, end, step, kind)
1577 # happens in get_slice_bound method), but it adds meaningful doc.
-> 1578 return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
1579
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/base.py in slice_locs(self, start, end, step, kind)
3175 if start is not None:
-> 3176 start_slice = self.get_slice_bound(start, 'left', kind)
3177 if start_slice is None:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_slice_bound(self, label, side, kind)
1548 label = label,
-> 1549 return self._partial_tup_index(label, side=side)
1550
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1591
-> 1592 if lab not in lev:
1593 if not lev.is_type_compatible(lib.infer_dtype([lab])):
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/base.py in __contains__(self, key)
1392 def __contains__(self, key):
-> 1393 hash(key)
1394 # work around some kind of odd cython bug
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/generic.py in __hash__(self)
830 raise TypeError('{0!r} objects are mutable, thus they cannot be'
--> 831 ' hashed'.format(self.__class__.__name__))
832
TypeError: 'Series' objects are mutable, thus they cannot be hashed
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-7-5927dd14b2bf> in <module>()
4 y = pd.Series([1,3])
5 x.loc[[1,3]] # can index series with list
----> 6 x.loc[y] # cannot index series with another series
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1309 return self._getitem_tuple(key)
1310 else:
-> 1311 return self._getitem_axis(key, axis=0)
1312
1313 def _getitem_axis(self, key, axis=0):
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1480 # fall thru to straight lookup
1481 self._has_valid_type(key, axis)
-> 1482 return self._get_label(key, axis=axis)
1483
1484
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/indexing.py in _get_label(self, label, axis)
95 return self.obj._xs(label, axis=axis)
96 except:
---> 97 return self.obj[label]
98 elif isinstance(label, tuple) and isinstance(label[axis], slice):
99 raise IndexingError('no slices here, handle elsewhere')
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
640 key = check_bool_indexer(self.index, key)
641
--> 642 return self._get_with(key)
643
644 def _get_with(self, key):
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/series.py in _get_with(self, key)
653 if isinstance(key, tuple):
654 try:
--> 655 return self._get_values_tuple(key)
656 except:
657 if len(key) == 1:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/series.py in _get_values_tuple(self, key)
701
702 # If key is contained, would have returned by now
--> 703 indexer, new_index = self.index.get_loc_level(key)
704 return self._constructor(self._values[indexer],
705 index=new_index).__finalize__(self)
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_loc_level(self, key, level, drop_level)
1794 return partial_selection(key)
1795 else:
-> 1796 return partial_selection(key)
1797 else:
1798 indexer = None
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in partial_selection(key, indexer)
1760 def partial_selection(key, indexer=None):
1761 if indexer is None:
-> 1762 indexer = self.get_loc(key)
1763 ilevels = [i for i in range(len(key))
1764 if key[i] != slice(None, None)]
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_loc(self, key, method)
1670 lead_key, follow_key = key[:i], key[i:]
1671 start, stop = (self.slice_locs(lead_key, lead_key)
-> 1672 if lead_key else (0, len(self)))
1673
1674 if start == stop:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in slice_locs(self, start, end, step, kind)
1576 # This function adds nothing to its parent implementation (the magic
1577 # happens in get_slice_bound method), but it adds meaningful doc.
-> 1578 return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
1579
1580 def _partial_tup_index(self, tup, side='left'):
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/base.py in slice_locs(self, start, end, step, kind)
3174 start_slice = None
3175 if start is not None:
-> 3176 start_slice = self.get_slice_bound(start, 'left', kind)
3177 if start_slice is None:
3178 start_slice = 0
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in get_slice_bound(self, label, side, kind)
1547 if not isinstance(label, tuple):
1548 label = label,
-> 1549 return self._partial_tup_index(label, side=side)
1550
1551 def slice_locs(self, start=None, end=None, step=None, kind=None):
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/multi.py in _partial_tup_index(self, tup, side)
1590 section = labs[start:end]
1591
-> 1592 if lab not in lev:
1593 if not lev.is_type_compatible(lib.infer_dtype([lab])):
1594 raise TypeError('Level type mismatch: %s' % lab)
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/indexes/base.py in __contains__(self, key)
1391
1392 def __contains__(self, key):
-> 1393 hash(key)
1394 # work around some kind of odd cython bug
1395 try:
/home/owl/miniconda3/envs/p3/lib/python3.5/site-packages/pandas/core/generic.py in __hash__(self)
829 def __hash__(self):
830 raise TypeError('{0!r} objects are mutable, thus they cannot be'
--> 831 ' hashed'.format(self.__class__.__name__))
832
833 def __iter__(self):
TypeError: 'Series' objects are mutable, thus they cannot be hashed
|
TypeError
|
def _write_body(self, indent):
self.write("<tbody>", indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(min(len(self.frame), self.max_rows)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write("</tbody>", indent)
indent -= self.indent_delta
return indent
|
def _write_body(self, indent):
self.write("<tbody>", indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(len(self.frame)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write("</tbody>", indent)
indent -= self.indent_delta
return indent
|
https://github.com/pandas-dev/pandas/issues/14998
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-7-c16b7151f39c> in <module>()
----> 1 df.to_html(max_rows=10, index=False)
/Users/tom.augspurger/Envs/py3/lib/python3.6/site-packages/pandas-0.19.0+265.gaba7d255a-py3.6-macosx-10.12-x86_64.egg/pandas/core/frame.py in to_html(self, buf, columns, col_space, header, index, na_rep, formatters, float_format, sparsify, index_names, justify, bold_rows, classes, escape, max_rows, max_cols, show_dimensions, notebook, decimal, border)
1553 decimal=decimal)
1554 # TODO: a generic formatter wld b in DataFrameFormatter
-> 1555 formatter.to_html(classes=classes, notebook=notebook, border=border)
1556
1557 if buf is None:
/Users/tom.augspurger/Envs/py3/lib/python3.6/site-packages/pandas-0.19.0+265.gaba7d255a-py3.6-macosx-10.12-x86_64.egg/pandas/formats/format.py in to_html(self, classes, notebook, border)
698 border=border)
699 if hasattr(self.buf, 'write'):
--> 700 html_renderer.write_result(self.buf)
701 elif isinstance(self.buf, compat.string_types):
702 with open(self.buf, 'w') as f:
/Users/tom.augspurger/Envs/py3/lib/python3.6/site-packages/pandas-0.19.0+265.gaba7d255a-py3.6-macosx-10.12-x86_64.egg/pandas/formats/format.py in write_result(self, buf)
1022 indent += self.indent_delta
1023 indent = self._write_header(indent)
-> 1024 indent = self._write_body(indent)
1025
1026 self.write('</table>', indent)
/Users/tom.augspurger/Envs/py3/lib/python3.6/site-packages/pandas-0.19.0+265.gaba7d255a-py3.6-macosx-10.12-x86_64.egg/pandas/formats/format.py in _write_body(self, indent)
1184 else:
1185 for i in range(len(self.frame)):
-> 1186 row = [fmt_values[j][i] for j in range(len(self.columns))]
1187 self.write_tr(row, indent, self.indent_delta, tags=None)
1188
/Users/tom.augspurger/Envs/py3/lib/python3.6/site-packages/pandas-0.19.0+265.gaba7d255a-py3.6-macosx-10.12-x86_64.egg/pandas/formats/format.py in <listcomp>(.0)
1184 else:
1185 for i in range(len(self.frame)):
-> 1186 row = [fmt_values[j][i] for j in range(len(self.columns))]
1187 self.write_tr(row, indent, self.indent_delta, tags=None)
1188
IndexError: list index out of range
|
IndexError
|
def create_axes(
self,
axes,
obj,
validate=True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
**kwargs,
):
"""create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError(
"cannot properly create the storer for: "
"[group->%s,value->%s]" % (self.group._v_name, type(obj))
)
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError("currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = (
_convert_index(a, self.encoding, self.format_type)
.set_name(name)
.set_axis(i)
)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if append_axis != exist_axis:
# ahah! -> reindex
if sorted(append_axis) == sorted(exist_axis):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info["names"] = list(a.names)
info["type"] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info) for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj).consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)), axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict(
[
(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)
]
)
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ",".join(pprint_thing(item) for item in items)
)
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError(
"Incompatible appended table [%s] with "
"existing table [%s]" % (blocks, existing_table.values_axes)
)
else:
existing_col = None
try:
col = klass.create_for_block(i=i, name=name, version=self.version)
col.set_atom(
block=b,
block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs,
)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s" % (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
|
def create_axes(
self,
axes,
obj,
validate=True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
**kwargs,
):
"""create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError(
"cannot properly create the storer for: "
"[group->%s,value->%s]" % (self.group._v_name, type(obj))
)
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError("currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = (
_convert_index(a, self.encoding, self.format_type)
.set_name(name)
.set_axis(i)
)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if append_axis != exist_axis:
# ahah! -> reindex
if sorted(append_axis) == sorted(exist_axis):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info["names"] = list(a.names)
info["type"] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info) for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
if validate:
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj).consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)), axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict(
[
(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)
]
)
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ",".join(pprint_thing(item) for item in items)
)
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError(
"Incompatible appended table [%s] with "
"existing table [%s]" % (blocks, existing_table.values_axes)
)
else:
existing_col = None
try:
col = klass.create_for_block(i=i, name=name, version=self.version)
col.set_atom(
block=b,
block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs,
)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s" % (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
|
https://github.com/pandas-dev/pandas/issues/10381
|
In [25]: df.index.name = 'theindex'
In [26]: df.to_hdf('store.h5', 'test2', format='table', min_itemsize={'theindex': 10})
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
|
ValueError
|
def nunique(self, dropna=True):
"""Returns number of unique elements in the group"""
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isnull = lambda a: a == -1
else:
_isnull = isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
res = out if ids[0] != -1 else out[1:]
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res, index=ri, name=self.name)
|
def nunique(self, dropna=True):
"""Returns number of unique elements in the group"""
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isnull = lambda a: a == -1
else:
_isnull = isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
res = out if ids[0] != -1 else out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res, index=ri, name=self.name)
|
https://github.com/pandas-dev/pandas/issues/12553
|
In [18]: b = pandas.Series()
In [19]: g = b.groupby(level = 0)
In [20]: g.nunique()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-20-fbbfc3108eac> in <module>()
----> 1 g.nunique()
/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.pyc in nunique(self, dropna)
2693
2694 out = np.add.reduceat(inc, idx).astype('int64', copy=False)
-> 2695 return Series(out if ids[0] != -1 else out[1:],
2696 index=self.grouper.result_index,
2697 name=self.name)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b"d").sum()
ns = (self.column_types == b"s").sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
|
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
nd = (self.column_types == b"d").sum()
ns = (self.column_types == b"s").sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
|
https://github.com/pandas-dev/pandas/issues/13654
|
import pandas as pd
f = pd.read_sas('test.sas7bdat', iterator=True)
for chunk in f:
print(f)
---------------------------------------------------------------------------
AbstractMethodError Traceback (most recent call last)
<ipython-input-3-bedf2769bffe> in <module>()
----> 1 for chunk in f:
2 print(chunk)
C:\Anaconda3\lib\site-packages\pandas\io\common.py in __next__(self)
99
100 def __next__(self):
--> 101 raise AbstractMethodError(self)
102
103 if not compat.PY3:
AbstractMethodError: This method must be defined in the concrete class of SAS7BDATReader
|
AbstractMethodError
|
def _generate(
cls,
start,
end,
periods,
name,
offset,
tz=None,
normalize=False,
ambiguous="raise",
closed=None,
):
if com._count_not_none(start, end, periods) != 2:
raise ValueError("Must specify two of start, end, or periods")
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError(
"Closed has to be None if not both of startand end are defined"
)
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
)
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, "localize"):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not tslib.get_timezone(inferred_tz) == tslib.get_timezone(tz):
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, "delta") and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, "tz", None) is None:
index = tslib.tz_localize_to_utc(
_ensure_int64(index), tz, ambiguous=ambiguous
)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
|
def _generate(
cls,
start,
end,
periods,
name,
offset,
tz=None,
normalize=False,
ambiguous="raise",
closed=None,
):
if com._count_not_none(start, end, periods) != 2:
raise ValueError("Must specify two of start, end, or periods")
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError(
"Closed has to be None if not both of startand end are defined"
)
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
)
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, "localize"):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, "delta") and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, "tz", None) is None:
index = tslib.tz_localize_to_utc(
_ensure_int64(index), tz, ambiguous=ambiguous
)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
|
https://github.com/pandas-dev/pandas/issues/14682
|
Traceback (most recent call last):
File "./t.py", line 7, in <module>
dfo = df.groupby(pd.TimeGrouper('5min'))
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 3984, in groupby
**kwargs)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 1501, in groupby
return klass(obj, by, **kwds)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 370, in __init__
mutated=self.mutated)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 2382, in _get_grouper
binner, grouper, obj = key._get_grouper(obj)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1062, in _get_grouper
r._set_binner()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 237, in _set_binner
self.binner, self.grouper = self._get_binner()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 245, in _get_binner
binner, bins, binlabels = self._get_binner_for_time()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 660, in _get_binner_for_time
return self.groupby._get_time_bins(self.ax)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1118, in _get_time_bins
base=self.base)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1262, in _get_range_edges
closed=closed, base=base)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1326, in _adjust_dates_anchored
return (Timestamp(fresult).tz_localize(first_tzinfo),
File "pandas/tslib.pyx", line 621, in pandas.tslib.Timestamp.tz_localize (pandas/tslib.c:13694)
File "pandas/tslib.pyx", line 4308, in pandas.tslib.tz_localize_to_utc (pandas/tslib.c:74816)
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2016-10-30 02:20:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# 14682 - Since we need to drop the TZ information to perform
# the adjustment in the presence of a DST change,
# save TZ Info and the DST state of the first and last parameters
# so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
first_dst = bool(first.dst())
last_dst = bool(last.dst())
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (
Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst),
)
|
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
first_tzinfo = first.tzinfo
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
# return (Timestamp(fresult, tz=first.tz),
# Timestamp(lresult, tz=last.tz))
return (
Timestamp(fresult).tz_localize(first_tzinfo),
Timestamp(lresult).tz_localize(first_tzinfo),
)
|
https://github.com/pandas-dev/pandas/issues/14682
|
Traceback (most recent call last):
File "./t.py", line 7, in <module>
dfo = df.groupby(pd.TimeGrouper('5min'))
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 3984, in groupby
**kwargs)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 1501, in groupby
return klass(obj, by, **kwds)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 370, in __init__
mutated=self.mutated)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/groupby.py", line 2382, in _get_grouper
binner, grouper, obj = key._get_grouper(obj)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1062, in _get_grouper
r._set_binner()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 237, in _set_binner
self.binner, self.grouper = self._get_binner()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 245, in _get_binner
binner, bins, binlabels = self._get_binner_for_time()
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 660, in _get_binner_for_time
return self.groupby._get_time_bins(self.ax)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1118, in _get_time_bins
base=self.base)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1262, in _get_range_edges
closed=closed, base=base)
File "/usr/local/lib/python2.7/dist-packages/pandas/tseries/resample.py", line 1326, in _adjust_dates_anchored
return (Timestamp(fresult).tz_localize(first_tzinfo),
File "pandas/tslib.pyx", line 621, in pandas.tslib.Timestamp.tz_localize (pandas/tslib.c:13694)
File "pandas/tslib.pyx", line 4308, in pandas.tslib.tz_localize_to_utc (pandas/tslib.c:74816)
pytz.exceptions.AmbiguousTimeError: Cannot infer dst time from Timestamp('2016-10-30 02:20:00'), try using the 'ambiguous' argument
|
pytz.exceptions.AmbiguousTimeError
|
def read_clipboard(sep="\s+", **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop("encoding", "utf-8")
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text,
encoding=(kwargs.get("encoding") or get_option("display.encoding")),
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count("\t") for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = "\t"
if sep is None and kwargs.get("delim_whitespace") is None:
sep = "\s+"
return read_table(StringIO(text), sep=sep, **kwargs)
|
def read_clipboard(sep="\s+", **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text,
encoding=(kwargs.get("encoding") or get_option("display.encoding")),
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count("\t") for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = "\t"
if sep is None and kwargs.get("delim_whitespace") is None:
sep = "\s+"
return read_table(StringIO(text), sep=sep, **kwargs)
|
https://github.com/pandas-dev/pandas/issues/14362
|
# Your code here
In [1]: df = pd.DataFrame(np.random.randn(10, 2))
In [2]: df.to_clipboard()
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-2-1f8b11f0ff98> in <module>()
----> 1 df.to_clipboard()
c:\users\chris\documents\python-dev\pandas\pandas\core\generic.py in to_clipboard(self, excel, sep, **kwargs)
1236 """
1237 from pandas.io import clipboard
-> 1238 clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
1239
1240 def to_xarray(self):
c:\users\chris\documents\python-dev\pandas\pandas\io\clipboard.py in to_clipboard(obj, excel, sep, **kwargs)
96 else:
97 objstr = str(obj)
---> 98 clipboard_set(objstr)
c:\users\chris\documents\python-dev\pandas\pandas\util\clipboard.py in _copyWindows(text)
83 len(text.encode('utf-16-le')) + 2)
84 pchData = d.kernel32.GlobalLock(hCd)
---> 85 ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
86 d.kernel32.GlobalUnlock(hCd)
87 d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
OSError: exception: access violation writing 0x0000000000000000
In [4]: pd.read_clipboard()
<segfault>
|
OSError
|
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop("encoding", "utf-8")
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise ValueError("clipboard only supports utf-8 encoding")
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = "\t"
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode("utf-8")
clipboard_set(text)
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context("display.max_colwidth", 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
|
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = "\t"
buf = StringIO()
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context("display.max_colwidth", 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
|
https://github.com/pandas-dev/pandas/issues/14362
|
# Your code here
In [1]: df = pd.DataFrame(np.random.randn(10, 2))
In [2]: df.to_clipboard()
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-2-1f8b11f0ff98> in <module>()
----> 1 df.to_clipboard()
c:\users\chris\documents\python-dev\pandas\pandas\core\generic.py in to_clipboard(self, excel, sep, **kwargs)
1236 """
1237 from pandas.io import clipboard
-> 1238 clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
1239
1240 def to_xarray(self):
c:\users\chris\documents\python-dev\pandas\pandas\io\clipboard.py in to_clipboard(obj, excel, sep, **kwargs)
96 else:
97 objstr = str(obj)
---> 98 clipboard_set(objstr)
c:\users\chris\documents\python-dev\pandas\pandas\util\clipboard.py in _copyWindows(text)
83 len(text.encode('utf-16-le')) + 2)
84 pchData = d.kernel32.GlobalLock(hCd)
---> 85 ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
86 d.kernel32.GlobalUnlock(hCd)
87 d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
OSError: exception: access violation writing 0x0000000000000000
In [4]: pd.read_clipboard()
<segfault>
|
OSError
|
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop("encoding", "utf-8")
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text,
encoding=(kwargs.get("encoding") or get_option("display.encoding")),
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count("\t") for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
kwargs["sep"] = "\t"
if kwargs.get("sep") is None and kwargs.get("delim_whitespace") is None:
kwargs["sep"] = "\s+"
return read_table(StringIO(text), **kwargs)
|
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text,
encoding=(kwargs.get("encoding") or get_option("display.encoding")),
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count("\t") for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
kwargs["sep"] = "\t"
if kwargs.get("sep") is None and kwargs.get("delim_whitespace") is None:
kwargs["sep"] = "\s+"
return read_table(StringIO(text), **kwargs)
|
https://github.com/pandas-dev/pandas/issues/14362
|
# Your code here
In [1]: df = pd.DataFrame(np.random.randn(10, 2))
In [2]: df.to_clipboard()
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-2-1f8b11f0ff98> in <module>()
----> 1 df.to_clipboard()
c:\users\chris\documents\python-dev\pandas\pandas\core\generic.py in to_clipboard(self, excel, sep, **kwargs)
1236 """
1237 from pandas.io import clipboard
-> 1238 clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
1239
1240 def to_xarray(self):
c:\users\chris\documents\python-dev\pandas\pandas\io\clipboard.py in to_clipboard(obj, excel, sep, **kwargs)
96 else:
97 objstr = str(obj)
---> 98 clipboard_set(objstr)
c:\users\chris\documents\python-dev\pandas\pandas\util\clipboard.py in _copyWindows(text)
83 len(text.encode('utf-16-le')) + 2)
84 pchData = d.kernel32.GlobalLock(hCd)
---> 85 ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
86 d.kernel32.GlobalUnlock(hCd)
87 d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
OSError: exception: access violation writing 0x0000000000000000
In [4]: pd.read_clipboard()
<segfault>
|
OSError
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/14381
|
In [3]: pd.DataFrame(dict(a=None),index=[0])
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-20b65f605ca3> in <module>()
----> 1 pd.DataFrame(dict(a=None),index=[0])
miniconda2/envs/readout2/lib/python2.7/site-packages/pandas/core/frame.pyc in __init__(self, data, index, columns, dtype, copy)
264 dtype=dtype, copy=copy)
265 elif isinstance(data, dict):
--> 266 mgr = self._init_dict(data, index, columns, dtype=dtype)
267 elif isinstance(data, ma.MaskedArray):
268 import numpy.ma.mrecords as mrecords
miniconda2/envs/readout2/lib/python2.7/site-packages/pandas/core/frame.pyc in _init_dict(self, data, index, columns, dtype)
400 arrays = [data[k] for k in keys]
401
--> 402 return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
403
404 def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
miniconda2/envs/readout2/lib/python2.7/site-packages/pandas/core/frame.pyc in _arrays_to_mgr(arrays, arr_names, index, columns, dtype)
5382
5383 # don't force copy because getting jammed in an ndarray anyway
-> 5384 arrays = _homogenize(arrays, index, dtype)
5385
5386 # from BlockManager perspective
miniconda2/envs/readout2/lib/python2.7/site-packages/pandas/core/frame.pyc in _homogenize(data, index, dtype)
5693 v = lib.fast_multiget(v, oindex.values, default=NA)
5694 v = _sanitize_array(v, index, dtype=dtype, copy=False,
-> 5695 raise_cast_failure=False)
5696
5697 homogenized.append(v)
miniconda2/envs/readout2/lib/python2.7/site-packages/pandas/core/series.pyc in _sanitize_array(data, index, dtype, copy, raise_cast_failure)
2917
2918 # scalar like
-> 2919 if subarr.ndim == 0:
2920 if isinstance(data, list): # pragma: no cover
2921 subarr = np.array(data, dtype=object)
AttributeError: 'NoneType' object has no attribute 'ndim'
|
AttributeError
|
def __init__(
self, index, grouper=None, obj=None, name=None, level=None, sort=True, in_axis=False
):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError("Level %s not in index" % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = index._get_grouper_for_level(
self.grouper, level
)
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered
# Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make
# any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical
# groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(
np.arange(len(c)), categories=c, ordered=self.grouper.ordered
)
)
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, "ndim", 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (
hasattr(self.grouper, "__len__")
and len(self.grouper) == len(self.index)
):
errmsg = (
"Grouper result violates len(labels) == "
"len(data)\nresult: %s" % pprint_thing(self.grouper)
)
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
|
def __init__(
self, index, grouper=None, obj=None, name=None, level=None, sort=True, in_axis=False
):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError("Level %s not in index" % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = index._get_grouper_for_level(
self.grouper, level
)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered
# Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make
# any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical
# groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(
np.arange(len(c)), categories=c, ordered=self.grouper.ordered
)
)
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, "ndim", 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (
hasattr(self.grouper, "__len__")
and len(self.grouper) == len(self.index)
):
errmsg = (
"Grouper result violates len(labels) == "
"len(data)\nresult: %s" % pprint_thing(self.grouper)
)
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
|
https://github.com/pandas-dev/pandas/issues/14334
|
In [27]: df.groupby([pd.Grouper(key='A')]).count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-27-f4f86763ebfc> in <module>()
----> 1 df.groupby([pd.Grouper(key='A')]).count()
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/generic.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, **kwargs)
3776 return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
3777 sort=sort, group_keys=group_keys, squeeze=squeeze,
-> 3778 **kwargs)
3779
3780 def asfreq(self, freq, method=None, how=None, normalize=False):
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in groupby(obj, by, **kwds)
1425 raise TypeError('invalid type: %s' % type(obj))
1426
-> 1427 return klass(obj, by, **kwds)
1428
1429
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in __init__(self, obj, keys, axis, level, grouper, exclusions, selection, as_index, sort, group_keys, squeeze, **kwargs)
352 level=level,
353 sort=sort,
--> 354 mutated=self.mutated)
355
356 self.obj = obj
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in _get_grouper(obj, key, axis, level, sort, mutated)
2400 sort=sort,
2401 in_axis=in_axis) \
-> 2402 if not isinstance(gpr, Grouping) else gpr
2403
2404 groupings.append(ping)
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in __init__(self, index, grouper, obj, name, level, sort, in_axis)
2197
2198 # get the new grouper
-> 2199 grouper = self.grouper._get_binner_for_grouping(self.obj)
2200 self.obj = self.grouper.obj
2201 self.grouper = grouper
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in _get_binner_for_grouping(self, obj)
290 group_axis = obj._get_axis(self.axis)
291 return Grouping(group_axis, None, obj=obj, name=self.key,
--> 292 level=self.level, sort=self.sort, in_axis=False)
293
294 @property
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/core/groupby.py in __init__(self, index, grouper, obj, name, level, sort, in_axis)
2213 t = self.name or str(type(self.grouper))
2214 raise ValueError("Grouper for '%s' not 1-dimensional" % t)
-> 2215 self.grouper = self.index.map(self.grouper)
2216 if not (hasattr(self.grouper, "__len__") and
2217 len(self.grouper) == len(self.index)):
/Users/measejm1/anaconda/lib/python3.5/site-packages/pandas/indexes/base.py in map(self, mapper)
2238 applied : array
2239 """
-> 2240 return self._arrmap(self.values, mapper)
2241
2242 def isin(self, values, level=None):
pandas/src/generated.pyx in pandas.algos.arrmap_int64 (pandas/algos.c:94003)()
TypeError: 'NoneType' object is not callable
|
TypeError
|
def _groupby_indices(values):
if is_categorical_dtype(values):
# we have a categorical, so we can do quite a bit
# bit better than factorizing again
reverse = dict(enumerate(values.categories))
codes = values.codes.astype("int64")
mask = 0 <= codes
counts = np.bincount(codes[mask], minlength=values.categories.size)
else:
reverse, codes, counts = _algos.group_labels(
_values_from_object(_ensure_object(values))
)
return _algos.groupby_indices(reverse, codes, counts)
|
def _groupby_indices(values):
if is_categorical_dtype(values):
# we have a categorical, so we can do quite a bit
# bit better than factorizing again
reverse = dict(enumerate(values.categories))
codes = values.codes.astype("int64")
_, counts = _hash.value_count_int64(codes, False)
else:
reverse, codes, counts = _algos.group_labels(
_values_from_object(_ensure_object(values))
)
return _algos.groupby_indices(reverse, codes, counts)
|
https://github.com/pandas-dev/pandas/issues/13629
|
0
0
(0, 5] 3.333333
(5, 10] 7.500000
(10, 15] 11.000000
(15, 20] NaN
(20, 25] 24.500000
(25, 30] NaN
(30, 35] NaN
(35, 40] 36.000000
(40, 45] NaN
(45, 50] NaN
(50, 55] NaN
0
0
(0, 5] 3.5
(5, 10] 7.5
(10, 15] 11.0
(15, 20] 18.0
(20, 25] 24.5
(25, 30] 30.5
(30, 35] 30.5
(35, 40] 36.0
(40, 45] 18.0
(45, 50] 18.0
(50, 55] 18.0
0 3.5
dtype: float64
Traceback (most recent call last):
File "<ipython-input-9-0663486889da>", line 1, in <module>
runfile('C:/PythonDir/test04.py', wdir='C:/PythonDir')
File "C:\Anaconda2\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 714, in runfile
execfile(filename, namespace)
File "C:\Anaconda2\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 74, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/PythonDir/test04.py", line 20, in <module>
print g.get_group('(40, 45]').median()
File "C:\Anaconda2\lib\site-packages\pandas\core\groupby.py", line 587, in get_group
raise KeyError(name)
KeyError: '(40, 45]'
|
KeyError
|
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != "iloc":
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
else:
new_key = tup[:i] + tup[i + 1 :]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if isinstance(section, ABCDataFrame) and i > 0 and len(new_key) == 2:
a, b = new_key
new_key = b, a
if len(new_key) == 1:
(new_key,) = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError("not applicable")
|
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
if isinstance(ax0, MultiIndex):
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
else:
new_key = tup[:i] + tup[i + 1 :]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if isinstance(section, ABCDataFrame) and i > 0 and len(new_key) == 2:
a, b = new_key
new_key = b, a
if len(new_key) == 1:
(new_key,) = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError("not applicable")
|
https://github.com/pandas-dev/pandas/issues/13797
|
df1.iloc[0,0]
C:\Users\rikuhiro\Anaconda3\envs\pd-check\lib\site-packages\ipykernel\__main__.py:1: PerformanceWarning: indexing past lexsort depth may impact performance.
if __name__ == '__main__':
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-37-884ae2904642> in <module>()
----> 1 df1.iloc[0,0]
C:\Users\rikuhiro\Anaconda3\envs\pd-check\lib\site-packages\pandas\core\indexing.py in __getitem__(self, key)
1292
1293 if type(key) is tuple:
-> 1294 return self._getitem_tuple(key)
1295 else:
1296 return self._getitem_axis(key, axis=0)
C:\Users\rikuhiro\Anaconda3\envs\pd-check\lib\site-packages\pandas\core\indexing.py in _getitem_tuple(self, tup)
1561
1562 # if the dim was reduced, then pass a lower-dim the next time
-> 1563 if retval.ndim < self.ndim:
1564 axis -= 1
1565
AttributeError: 'str' object has no attribute 'ndim'
|
AttributeError
|
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif isinstance(values, pd.Series) and (
is_timedelta64_dtype(values) or is_datetime64_dtype(values)
):
supplied_dtype = values.dtype
inferred_type = supplied_dtype or lib.infer_dtype(values)
if inferred_type in ("datetime64", "datetime", "date", "time") or is_datetimetz(
inferred_type
):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (
supplied_dtype is None
and other is not None
and (other.dtype in ("timedelta64[ns]", "datetime64[ns]"))
and isnull(values).all()
):
values = np.empty(values.shape, dtype="timedelta64[ns]")
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif isinstance(ovalues, datetime.datetime) and hasattr(ovalues, "tzinfo"):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (
isinstance(values, (np.ndarray, ABCSeries)) and is_datetime64_dtype(values)
):
values = tslib.array_to_datetime(values)
elif inferred_type in ("timedelta", "timedelta64"):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors="coerce", box=False)
elif inferred_type == "integer":
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == "m":
values = values.astype("timedelta64[ns]")
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ("__truediv__", "__div__", "__mul__", "__rmul__"):
raise TypeError(
"incompatible type for a datetime/timedelta operation [{0}]".format(
name
)
)
elif inferred_type == "floating":
if isnull(values).all() and name in (
"__add__",
"__radd__",
"__sub__",
"__rsub__",
):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError(
"incompatible type [{0}] for a datetime/timedelta operation".format(
np.array(values).dtype
)
)
return values
|
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif isinstance(values, pd.Series) and (
is_timedelta64_dtype(values) or is_datetime64_dtype(values)
):
supplied_dtype = values.dtype
inferred_type = supplied_dtype or lib.infer_dtype(values)
if inferred_type in ("datetime64", "datetime", "date", "time") or is_datetimetz(
inferred_type
):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (
supplied_dtype is None
and other is not None
and (other.dtype in ("timedelta64[ns]", "datetime64[ns]"))
and isnull(values).all()
):
values = np.empty(values.shape, dtype="timedelta64[ns]")
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif isinstance(ovalues, datetime.datetime) and hasattr(ovalues, "tz"):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (
isinstance(values, (np.ndarray, ABCSeries)) and is_datetime64_dtype(values)
):
values = tslib.array_to_datetime(values)
elif inferred_type in ("timedelta", "timedelta64"):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors="coerce", box=False)
elif inferred_type == "integer":
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == "m":
values = values.astype("timedelta64[ns]")
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ("__truediv__", "__div__", "__mul__", "__rmul__"):
raise TypeError(
"incompatible type for a datetime/timedelta operation [{0}]".format(
name
)
)
elif inferred_type == "floating":
if isnull(values).all() and name in (
"__add__",
"__radd__",
"__sub__",
"__rsub__",
):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError(
"incompatible type [{0}] for a datetime/timedelta operation".format(
np.array(values).dtype
)
)
return values
|
https://github.com/pandas-dev/pandas/issues/14088
|
import pytz
import datetime
import pandas as pd
foo = pd.Series(datetime.datetime(2016, 8, 23, 12, tzinfo=pytz.utc))
foo - datetime.datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-52-0639980e7d31> in <module>()
----> 1 foo - datetime.datetime(2016, 8, 1, 12, tzinfo=pytz.utc)
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/core/ops.pyc in wrapper(left, right, name, na_op)
607
608 time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name,
--> 609 na_op)
610
611 if time_converted is None:
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/core/ops.pyc in maybe_convert_for_time_op(cls, left, right, name, na_op)
567 return None
568
--> 569 return cls(left, right, name, na_op)
570
571
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/core/ops.pyc in __init__(self, left, right, name, na_op)
281
282 lvalues = self._convert_to_array(left, name=name)
--> 283 rvalues = self._convert_to_array(right, name=name, other=lvalues)
284
285 self.name = name
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/core/ops.pyc in _convert_to_array(self, values, name, other)
419 elif not (isinstance(values, (np.ndarray, ABCSeries)) and
420 is_datetime64_dtype(values)):
--> 421 values = tslib.array_to_datetime(values)
422 elif inferred_type in ('timedelta', 'timedelta64'):
423 # have a timedelta, convert to to ns here
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/tslib.so in pandas.tslib.array_to_datetime (pandas/tslib.c:41972)()
/Users/charon/.virtualenvs/iwoca-django/lib/python2.7/site-packages/pandas/tslib.so in pandas.tslib.array_to_datetime (pandas/tslib.c:38943)()
ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 unless utc=True
|
ValueError
|
def fillna(self, value=None, method=None, limit=None):
"""Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ["S", "O", "f"]:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
if isnull(value):
values[mask] = -1
else:
values[mask] = self.categories.get_loc(value)
return self._constructor(
values, categories=self.categories, ordered=self.ordered, fastpath=True
)
|
def fillna(self, value=None, method=None, limit=None):
"""Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ["S", "O", "f"]:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
values[mask] = self.categories.get_loc(value)
return self._constructor(
values, categories=self.categories, ordered=self.ordered, fastpath=True
)
|
https://github.com/pandas-dev/pandas/issues/14021
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/indexes/base.py in get_loc(self, key, method, tolerance)
1875 try:
-> 1876 return self._engine.get_loc(key)
1877 except KeyError:
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:4027)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3891)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12408)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12359)()
KeyError: <class 'object'>
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-127-6817ed5716b0> in <module>()
2 tst = pd.DataFrame({'a':[1,2,1,np.nan],
3 'b':[np.nan, np.nan, np.nan, np.nan]}, dtype='category')
----> 4 tst.fillna(value=tst.median())
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/frame.py in fillna(self, value, method, axis, inplace, limit, downcast, **kwargs)
2754 self).fillna(value=value, method=method, axis=axis,
2755 inplace=inplace, limit=limit,
-> 2756 downcast=downcast, **kwargs)
2757
2758 @Appender(_shared_docs['shift'] % _shared_doc_kwargs)
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/generic.py in fillna(self, value, method, axis, inplace, limit, downcast)
3164 continue
3165 obj = result[k]
-> 3166 obj.fillna(v, limit=limit, inplace=True)
3167 return result
3168 elif not com.is_list_like(value):
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/series.py in fillna(self, value, method, axis, inplace, limit, downcast, **kwargs)
2350 axis=axis, inplace=inplace,
2351 limit=limit, downcast=downcast,
-> 2352 **kwargs)
2353
2354 @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/generic.py in fillna(self, value, method, axis, inplace, limit, downcast)
3151 new_data = self._data.fillna(value=value, limit=limit,
3152 inplace=inplace,
-> 3153 downcast=downcast)
3154
3155 elif isinstance(value, (dict, com.ABCSeries)):
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/internals.py in fillna(self, **kwargs)
2865
2866 def fillna(self, **kwargs):
-> 2867 return self.apply('fillna', **kwargs)
2868
2869 def downcast(self, **kwargs):
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/internals.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
2830
2831 kwargs['mgr'] = self
-> 2832 applied = getattr(b, f)(**kwargs)
2833 result_blocks = _extend_blocks(applied, result_blocks)
2834
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/internals.py in fillna(self, value, limit, inplace, downcast, mgr)
1884 values = self.values if inplace else self.values.copy()
1885 values = self._try_coerce_result(values.fillna(value=value,
-> 1886 limit=limit))
1887 return [self.make_block(values=values)]
1888
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/util/decorators.py in wrapper(*args, **kwargs)
89 else:
90 kwargs[new_arg_name] = new_arg_value
---> 91 return func(*args, **kwargs)
92 return wrapper
93 return _deprecate_kwarg
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/core/categorical.py in fillna(self, value, method, limit)
1415 if mask.any():
1416 values = values.copy()
-> 1417 values[mask] = self.categories.get_loc(value)
1418
1419 return Categorical(values, categories=self.categories,
/home/dan/.local/opt/miniconda3/envs/mathbs/lib/python3.5/site-packages/pandas/indexes/base.py in get_loc(self, key, method, tolerance)
1876 return self._engine.get_loc(key)
1877 except KeyError:
-> 1878 return self._engine.get_loc(self._maybe_cast_indexer(key))
1879
1880 indexer = self.get_indexer([key], method=method, tolerance=tolerance)
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:4027)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3891)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12408)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12359)()
KeyError: <class 'object'>
|
KeyError
|
def _is_offset(self, arr_or_obj):
"""check if obj or all elements of list-like is DateOffset"""
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj) and len(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
return False
|
def _is_offset(self, arr_or_obj):
"""check if obj or all elements of list-like is DateOffset"""
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
else:
return False
|
https://github.com/pandas-dev/pandas/issues/13844
|
Date_Time Item Relative_Time
0 2/8/2015 6:00:30 1 20
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/core/ops.py:477: PerformanceWarning: Adding/subtracting array of DateOffsets to Series not vectorized
"Series not vectorized", PerformanceWarning)
Traceback (most recent call last):
File "bug.py", line 12, in <module>
dtf.Date_Time = dtf.Date_Time + dtf.Relative_Time
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/core/ops.py", line 641, in wrapper
arr = na_op(lvalues, rvalues)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/core/ops.py", line 481, in <lambda>
self.na_op = lambda x, y: getattr(x, self.name)(y)
TypeError: ufunc add cannot use operands with types dtype('<M8[ns]') and dtype('O')
|
TypeError
|
def _validate_usecols_arg(usecols):
"""
Check whether or not the 'usecols' parameter
contains all integers (column selection by index)
or strings (column by name). Raises a ValueError
if that is not the case.
"""
msg = (
"The elements of 'usecols' must "
"either be all strings, all unicode, or all integers"
)
if usecols is not None:
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ("empty", "integer", "string", "unicode"):
raise ValueError(msg)
return set(usecols)
return usecols
|
def _validate_usecols_arg(usecols):
"""
Check whether or not the 'usecols' parameter
contains all integers (column selection by index)
or strings (column by name). Raises a ValueError
if that is not the case.
"""
msg = (
"The elements of 'usecols' must "
"either be all strings, all unicode, or all integers"
)
if usecols is not None:
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ("empty", "integer", "string", "unicode"):
raise ValueError(msg)
return usecols
|
https://github.com/pandas-dev/pandas/issues/12546
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-e39439e21b50> in <module>()
17 iterator = True, chunksize = chunksize , engine = "python" )
18
---> 19 print(list(ite)[0])
/home/.usr/py3/lib/pandas-0.18.0rc1+80.g820e110.dirty-py3.3-linux-x86_64.egg/pandas/io/parsers.py in __next__(self)
739
740 def __next__(self):
--> 741 return self.get_chunk()
742
743 def _make_engine(self, engine='c'):
/home/.usr/py3/lib/pandas-0.18.0rc1+80.g820e110.dirty-py3.3-linux-x86_64.egg/pandas/io/parsers.py in get_chunk(self, size)
780 if size is None:
781 size = self.chunksize
--> 782 return self.read(nrows=size)
783
784
/home/.usr/py3/lib/pandas-0.18.0rc1+80.g820e110.dirty-py3.3-linux-x86_64.egg/pandas/io/parsers.py in read(self, nrows)
759 raise ValueError('skip_footer not supported for iteration')
760
--> 761 ret = self._engine.read(nrows)
762
763 if self.options.get('as_recarray'):
/home/.usr/py3/lib/pandas-0.18.0rc1+80.g820e110.dirty-py3.3-linux-x86_64.egg/pandas/io/parsers.py in read(self, rows)
1617 content = content[1:]
1618
-> 1619 alldata = self._rows_to_cols(content)
1620 data = self._exclude_implicit_index(alldata)
1621
/home/.usr/py3/lib/pandas-0.18.0rc1+80.g820e110.dirty-py3.3-linux-x86_64.egg/pandas/io/parsers.py in _rows_to_cols(self, content)
1997 raise ValueError(msg)
1998
-> 1999 if self.usecols:
2000 if self._implicit_index:
2001 zipped_content = [
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/13646
|
======================================================================
FAIL: test_alignment_non_pandas (pandas.tests.frame.test_operators.TestDataFrameOperators)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\conda\Documents\pandas3.5\pandas\tests\frame\test_operators.py", line 1203, in test_alignment_non_pandas
Series([1, 2, 3], index=df.index))
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 1157, in assert_series_equal
assert_attr_equal('dtype', left, right)
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 882, in assert_attr_equal
left_attr, right_attr)
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 1021, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int32
[right]: int64None
|
AssertionError
|
def _possibly_convert_platform(values):
"""try to do platform conversion, allow ndarray or list here"""
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(list(values))
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
|
def _possibly_convert_platform(values):
"""try to do platform conversion, allow ndarray or list here"""
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
|
https://github.com/pandas-dev/pandas/issues/13646
|
======================================================================
FAIL: test_alignment_non_pandas (pandas.tests.frame.test_operators.TestDataFrameOperators)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\conda\Documents\pandas3.5\pandas\tests\frame\test_operators.py", line 1203, in test_alignment_non_pandas
Series([1, 2, 3], index=df.index))
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 1157, in assert_series_equal
assert_attr_equal('dtype', left, right)
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 882, in assert_attr_equal
left_attr, right_attr)
File "C:\Users\conda\Documents\pandas3.5\pandas\util\testing.py", line 1021, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int32
[right]: int64None
|
AssertionError
|
def get_service(self):
import httplib2
try:
from googleapiclient.discovery import build
except:
from apiclient.discovery import build
http = httplib2.Http()
http = self.credentials.authorize(http)
bigquery_service = build("bigquery", "v2", http=http)
return bigquery_service
|
def get_service(self):
import httplib2
from apiclient.discovery import build
http = httplib2.Http()
http = self.credentials.authorize(http)
bigquery_service = build("bigquery", "v2", http=http)
return bigquery_service
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def run_query(self, query):
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
_check_google_client_version()
job_collection = self.service.jobs()
job_data = {
"configuration": {
"query": {
"query": query
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
}
self._start_timer()
try:
self._print("Requesting query... ", end="")
query_reply = job_collection.insert(
projectId=self.project_id, body=job_data
).execute()
self._print("ok.\nQuery running...")
except (AccessTokenRefreshError, ValueError):
if self.private_key:
raise AccessDenied("The service account credentials are not valid")
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except HttpError as ex:
self.process_http_error(ex)
job_reference = query_reply["jobReference"]
while not query_reply.get("jobComplete", False):
self.print_elapsed_seconds(" Elapsed", "s. Waiting...")
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference["projectId"], jobId=job_reference["jobId"]
).execute()
except HttpError as ex:
self.process_http_error(ex)
if self.verbose:
if query_reply["cacheHit"]:
self._print("Query done.\nCache hit.\n")
else:
bytes_processed = int(query_reply.get("totalBytesProcessed", "0"))
self._print(
"Query done.\nProcessed: {}\n".format(self.sizeof_fmt(bytes_processed))
)
self._print("Retrieving results...")
total_rows = int(query_reply["totalRows"])
result_pages = list()
seen_page_tokens = list()
current_row = 0
# Only read schema on first page
schema = query_reply["schema"]
# Loop through each page of data
while "rows" in query_reply and current_row < total_rows:
page = query_reply["rows"]
result_pages.append(page)
current_row += len(page)
self.print_elapsed_seconds(
" Got page: {}; {}% done. Elapsed".format(
len(result_pages), round(100.0 * current_row / total_rows)
)
)
if current_row == total_rows:
break
page_token = query_reply.get("pageToken", None)
if not page_token and current_row < total_rows:
raise InvalidPageToken(
"Required pageToken was missing. Received {0} of {1} rows".format(
current_row, total_rows
)
)
elif page_token in seen_page_tokens:
raise InvalidPageToken("A duplicate pageToken was returned")
seen_page_tokens.append(page_token)
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference["projectId"],
jobId=job_reference["jobId"],
pageToken=page_token,
).execute()
except HttpError as ex:
self.process_http_error(ex)
if current_row < total_rows:
raise InvalidPageToken()
# print basic query stats
self._print("Got {} rows.\n".format(total_rows))
return schema, result_pages
|
def run_query(self, query):
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
_check_google_client_version()
job_collection = self.service.jobs()
job_data = {
"configuration": {
"query": {
"query": query
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
}
self._start_timer()
try:
self._print("Requesting query... ", end="")
query_reply = job_collection.insert(
projectId=self.project_id, body=job_data
).execute()
self._print("ok.\nQuery running...")
except (AccessTokenRefreshError, ValueError):
if self.private_key:
raise AccessDenied("The service account credentials are not valid")
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except HttpError as ex:
self.process_http_error(ex)
job_reference = query_reply["jobReference"]
while not query_reply.get("jobComplete", False):
self.print_elapsed_seconds(" Elapsed", "s. Waiting...")
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference["projectId"], jobId=job_reference["jobId"]
).execute()
except HttpError as ex:
self.process_http_error(ex)
if self.verbose:
if query_reply["cacheHit"]:
self._print("Query done.\nCache hit.\n")
else:
bytes_processed = int(query_reply.get("totalBytesProcessed", "0"))
self._print(
"Query done.\nProcessed: {}\n".format(self.sizeof_fmt(bytes_processed))
)
self._print("Retrieving results...")
total_rows = int(query_reply["totalRows"])
result_pages = list()
seen_page_tokens = list()
current_row = 0
# Only read schema on first page
schema = query_reply["schema"]
# Loop through each page of data
while "rows" in query_reply and current_row < total_rows:
page = query_reply["rows"]
result_pages.append(page)
current_row += len(page)
self.print_elapsed_seconds(
" Got page: {}; {}% done. Elapsed".format(
len(result_pages), round(100.0 * current_row / total_rows)
)
)
if current_row == total_rows:
break
page_token = query_reply.get("pageToken", None)
if not page_token and current_row < total_rows:
raise InvalidPageToken(
"Required pageToken was missing. Received {0} of {1} rows".format(
current_row, total_rows
)
)
elif page_token in seen_page_tokens:
raise InvalidPageToken("A duplicate pageToken was returned")
seen_page_tokens.append(page_token)
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference["projectId"],
jobId=job_reference["jobId"],
pageToken=page_token,
).execute()
except HttpError as ex:
self.process_http_error(ex)
if current_row < total_rows:
raise InvalidPageToken()
# print basic query stats
self._print("Got {} rows.\n".format(total_rows))
return schema, result_pages
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def load_data(self, dataframe, dataset_id, table_id, chunksize):
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
job_id = uuid.uuid4().hex
rows = []
remaining_rows = len(dataframe)
if self.verbose:
total_rows = remaining_rows
self._print("\n\n")
for index, row in dataframe.reset_index(drop=True).iterrows():
row_dict = dict()
row_dict["json"] = json.loads(
row.to_json(force_ascii=False, date_unit="s", date_format="iso")
)
row_dict["insertId"] = job_id + str(index)
rows.append(row_dict)
remaining_rows -= 1
if (len(rows) % chunksize == 0) or (remaining_rows == 0):
self._print(
"\rStreaming Insert is {0}% Complete".format(
((total_rows - remaining_rows) * 100) / total_rows
)
)
body = {"rows": rows}
try:
response = (
self.service.tabledata()
.insertAll(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
.execute()
)
except HttpError as ex:
self.process_http_error(ex)
# For streaming inserts, even if you receive a success HTTP
# response code, you'll need to check the insertErrors property
# of the response to determine if the row insertions were
# successful, because it's possible that BigQuery was only
# partially successful at inserting the rows. See the `Success
# HTTP Response Codes
# <https://cloud.google.com/bigquery/
# streaming-data-into-bigquery#troubleshooting>`__
# section
insert_errors = response.get("insertErrors", None)
if insert_errors:
self.process_insert_errors(insert_errors)
sleep(1) # Maintains the inserts "per second" rate per API
rows = []
self._print("\n")
|
def load_data(self, dataframe, dataset_id, table_id, chunksize):
from apiclient.errors import HttpError
job_id = uuid.uuid4().hex
rows = []
remaining_rows = len(dataframe)
if self.verbose:
total_rows = remaining_rows
self._print("\n\n")
for index, row in dataframe.reset_index(drop=True).iterrows():
row_dict = dict()
row_dict["json"] = json.loads(
row.to_json(force_ascii=False, date_unit="s", date_format="iso")
)
row_dict["insertId"] = job_id + str(index)
rows.append(row_dict)
remaining_rows -= 1
if (len(rows) % chunksize == 0) or (remaining_rows == 0):
self._print(
"\rStreaming Insert is {0}% Complete".format(
((total_rows - remaining_rows) * 100) / total_rows
)
)
body = {"rows": rows}
try:
response = (
self.service.tabledata()
.insertAll(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
.execute()
)
except HttpError as ex:
self.process_http_error(ex)
# For streaming inserts, even if you receive a success HTTP
# response code, you'll need to check the insertErrors property
# of the response to determine if the row insertions were
# successful, because it's possible that BigQuery was only
# partially successful at inserting the rows. See the `Success
# HTTP Response Codes
# <https://cloud.google.com/bigquery/
# streaming-data-into-bigquery#troubleshooting>`__
# section
insert_errors = response.get("insertErrors", None)
if insert_errors:
self.process_insert_errors(insert_errors)
sleep(1) # Maintains the inserts "per second" rate per API
rows = []
self._print("\n")
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def verify_schema(self, dataset_id, table_id, schema):
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
try:
return (
(
self.service.tables()
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id)
.execute()["schema"]
)
== schema
)
except HttpError as ex:
self.process_http_error(ex)
|
def verify_schema(self, dataset_id, table_id, schema):
from apiclient.errors import HttpError
try:
return (
(
self.service.tables()
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id)
.execute()["schema"]
)
== schema
)
except HttpError as ex:
self.process_http_error(ex)
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def __init__(
self, project_id, dataset_id, reauth=False, verbose=False, private_key=None
):
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
self.http_error = HttpError
self.dataset_id = dataset_id
super(_Table, self).__init__(project_id, reauth, verbose, private_key)
|
def __init__(
self, project_id, dataset_id, reauth=False, verbose=False, private_key=None
):
from apiclient.errors import HttpError
self.http_error = HttpError
self.dataset_id = dataset_id
super(_Table, self).__init__(project_id, reauth, verbose, private_key)
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def __init__(self, project_id, reauth=False, verbose=False, private_key=None):
try:
from googleapiclient.errors import HttpError
except:
from apiclient.errors import HttpError
self.http_error = HttpError
super(_Dataset, self).__init__(project_id, reauth, verbose, private_key)
|
def __init__(self, project_id, reauth=False, verbose=False, private_key=None):
from apiclient.errors import HttpError
self.http_error = HttpError
super(_Dataset, self).__init__(project_id, reauth, verbose, private_key)
|
https://github.com/pandas-dev/pandas/issues/13454
|
import apiclient
apiclient.__version__
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-662a0f56e34b> in <module>()
1 import apiclient
----> 2 apiclient.__version__
AttributeError: 'module' object has no attribute '__version__'
|
AttributeError
|
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif com.is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError("Please install scipy to generate window weight")
if not isinstance(self.win_type, compat.string_types):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
|
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif com.is_integer(window):
try:
import scipy.signal as sig
except ImportError:
raise ImportError("Please install scipy to generate window weight")
if not isinstance(self.win_type, compat.string_types):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
|
https://github.com/pandas-dev/pandas/issues/13383
|
In [171]: s = pd.Series(range(3))
In [172]: s.rolling(-1) # doesn't raise
Out[172]: Rolling [window=-1,center=False,axis=0]
In [173]: s.rolling(-1).mean() # Odd, indirect error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-173-cda5b8dd0812> in <module>()
----> 1 s.rolling(-1).mean()
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in mean(self, **kwargs)
885 @Appender(_shared_docs['mean'])
886 def mean(self, **kwargs):
--> 887 return super(Rolling, self).mean(**kwargs)
888
889 @Substitution(name='rolling')
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in mean(self, **kwargs)
651
652 def mean(self, **kwargs):
--> 653 return self._apply('roll_mean', 'mean', **kwargs)
654
655 _shared_docs['median'] = dedent("""
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in _apply(self, func, name, window, center, check_minp, how, **kwargs)
558 result = np.apply_along_axis(calc, self.axis, values)
559 else:
--> 560 result = calc(values)
561
562 if center:
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in calc(x)
553
554 def calc(x):
--> 555 return func(x, window, min_periods=self.min_periods)
556
557 if values.ndim > 1:
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in func(arg, window, min_periods)
540 # GH #12373: rolling functions error on float32 data
541 return cfunc(com._ensure_float64(arg),
--> 542 window, minp, **kwargs)
543
544 # calculation function
pandas/algos.pyx in pandas.algos.roll_mean (pandas/algos.c:28921)()
pandas/algos.pyx in pandas.algos._check_minp (pandas/algos.c:19103)()
ValueError: min_periods must be >= 0
In [174]: pd.Series([]).rolling(-1).mean() # Never raises
Out[174]: Series([], dtype: float64)
In [175]: pd.show_versions()
INSTALLED VERSIONS
------------------
commit: None
python: 2.7.6.final.0
python-bits: 64
OS: Linux
OS-release: 3.13.0-71-generic
machine: x86_64
processor: x86_64
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
pandas: 0.18.1
nose: 1.3.7
pip: 8.0.2
setuptools: 20.1.1
Cython: 0.24
numpy: 1.11.0
scipy: 0.17.1
statsmodels: 0.6.1
xarray: None
IPython: 4.1.1
sphinx: None
patsy: 0.4.1
dateutil: 2.5.3
pytz: 2016.4
blosc: 1.3.2
bottleneck: None
tables: 3.2.2
numexpr: 2.6.0
matplotlib: 1.5.1
openpyxl: None
xlrd: None
xlwt: None
xlsxwriter: None
lxml: None
bs4: None
html5lib: None
httplib2: None
apiclient: None
sqlalchemy: None
pymysql: None
psycopg2: None
jinja2: 2.8
boto: 2.40.0
pandas_datareader: None
|
ValueError
|
def validate(self):
super(Rolling, self).validate()
if not com.is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
|
def validate(self):
super(Rolling, self).validate()
if not com.is_integer(self.window):
raise ValueError("window must be an integer")
|
https://github.com/pandas-dev/pandas/issues/13383
|
In [171]: s = pd.Series(range(3))
In [172]: s.rolling(-1) # doesn't raise
Out[172]: Rolling [window=-1,center=False,axis=0]
In [173]: s.rolling(-1).mean() # Odd, indirect error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-173-cda5b8dd0812> in <module>()
----> 1 s.rolling(-1).mean()
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in mean(self, **kwargs)
885 @Appender(_shared_docs['mean'])
886 def mean(self, **kwargs):
--> 887 return super(Rolling, self).mean(**kwargs)
888
889 @Substitution(name='rolling')
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in mean(self, **kwargs)
651
652 def mean(self, **kwargs):
--> 653 return self._apply('roll_mean', 'mean', **kwargs)
654
655 _shared_docs['median'] = dedent("""
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in _apply(self, func, name, window, center, check_minp, how, **kwargs)
558 result = np.apply_along_axis(calc, self.axis, values)
559 else:
--> 560 result = calc(values)
561
562 if center:
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in calc(x)
553
554 def calc(x):
--> 555 return func(x, window, min_periods=self.min_periods)
556
557 if values.ndim > 1:
/home/mike/modernpandas/local/lib/python2.7/site-packages/pandas/core/window.pyc in func(arg, window, min_periods)
540 # GH #12373: rolling functions error on float32 data
541 return cfunc(com._ensure_float64(arg),
--> 542 window, minp, **kwargs)
543
544 # calculation function
pandas/algos.pyx in pandas.algos.roll_mean (pandas/algos.c:28921)()
pandas/algos.pyx in pandas.algos._check_minp (pandas/algos.c:19103)()
ValueError: min_periods must be >= 0
In [174]: pd.Series([]).rolling(-1).mean() # Never raises
Out[174]: Series([], dtype: float64)
In [175]: pd.show_versions()
INSTALLED VERSIONS
------------------
commit: None
python: 2.7.6.final.0
python-bits: 64
OS: Linux
OS-release: 3.13.0-71-generic
machine: x86_64
processor: x86_64
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
pandas: 0.18.1
nose: 1.3.7
pip: 8.0.2
setuptools: 20.1.1
Cython: 0.24
numpy: 1.11.0
scipy: 0.17.1
statsmodels: 0.6.1
xarray: None
IPython: 4.1.1
sphinx: None
patsy: 0.4.1
dateutil: 2.5.3
pytz: 2016.4
blosc: 1.3.2
bottleneck: None
tables: 3.2.2
numexpr: 2.6.0
matplotlib: 1.5.1
openpyxl: None
xlrd: None
xlwt: None
xlsxwriter: None
lxml: None
bs4: None
html5lib: None
httplib2: None
apiclient: None
sqlalchemy: None
pymysql: None
psycopg2: None
jinja2: 2.8
boto: 2.40.0
pandas_datareader: None
|
ValueError
|
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == "__ne__":
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):
if isscalar(y):
y = _index.convert_scalar(x, _values_from_object(y))
else:
y = y.view("i8")
mask = isnull(x)
x = x.view("i8")
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError("Series lengths must match to compare")
return self._constructor(
na_op(self.values, other.values), index=self.index, name=name
)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if not lib.isscalar(lib.item_from_zerodim(other)) and len(self) != len(
other
):
raise ValueError("Lengths must match to compare")
return self._constructor(
na_op(self.values, np.asarray(other)), index=self.index
).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = (
"Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'."
)
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError("Could not compare %s type with Series" % type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype="bool")
return res
return wrapper
|
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == "__ne__":
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):
if isscalar(y):
y = _index.convert_scalar(x, _values_from_object(y))
else:
y = y.view("i8")
mask = isnull(x)
x = x.view("i8")
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError("Series lengths must match to compare")
return self._constructor(
na_op(self.values, other.values), index=self.index, name=name
)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
return self._constructor(
na_op(self.values, np.asarray(other)), index=self.index
).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = (
"Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'."
)
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError("Could not compare %s type with Series" % type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype="bool")
return res
return wrapper
|
https://github.com/pandas-dev/pandas/issues/13006
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-0803a1472416> in <module>()
1 import numpy as np
2 import pandas as pd
----> 3 np.float64(0) < pd.Series([1,2,3],dtype=np.float64)
global np.float64 = <type 'numpy.float64'>
global pd.Series = <class 'pandas.core.series.Series'>
global dtype = undefined
/usr/local/lib/python2.7/dist-packages/pandas/core/ops.pyc in wrapper(self=0 1.0
1 2.0
2 3.0
dtype: float64, other=array(0.0), axis=None)
737 return NotImplemented
738 elif isinstance(other, (np.ndarray, pd.Index)):
--> 739 if len(self) != len(other):
global len = undefined
self = 0 1.0
1 2.0
2 3.0
dtype: float64
other = array(0.0)
740 raise ValueError('Lengths must match to compare')
741 return self._constructor(na_op(self.values, np.asarray(other)),
TypeError: len() of unsized object
|
TypeError
|
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError("Series lengths must match to compare")
return self._constructor(
na_op(self.values, other.values), index=self.index, name=name
)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if not lib.isscalar(lib.item_from_zerodim(other)) and len(self) != len(other):
raise ValueError("Lengths must match to compare")
return self._constructor(
na_op(self.values, np.asarray(other)), index=self.index
).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = (
"Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'."
)
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError("Could not compare %s type with Series" % type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype="bool")
return res
|
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError("Series lengths must match to compare")
return self._constructor(
na_op(self.values, other.values), index=self.index, name=name
)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
return self._constructor(
na_op(self.values, np.asarray(other)), index=self.index
).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = (
"Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'."
)
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError("Could not compare %s type with Series" % type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype="bool")
return res
|
https://github.com/pandas-dev/pandas/issues/13006
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-0803a1472416> in <module>()
1 import numpy as np
2 import pandas as pd
----> 3 np.float64(0) < pd.Series([1,2,3],dtype=np.float64)
global np.float64 = <type 'numpy.float64'>
global pd.Series = <class 'pandas.core.series.Series'>
global dtype = undefined
/usr/local/lib/python2.7/dist-packages/pandas/core/ops.pyc in wrapper(self=0 1.0
1 2.0
2 3.0
dtype: float64, other=array(0.0), axis=None)
737 return NotImplemented
738 elif isinstance(other, (np.ndarray, pd.Index)):
--> 739 if len(self) != len(other):
global len = undefined
self = 0 1.0
1 2.0
2 3.0
dtype: float64
other = array(0.0)
740 raise ValueError('Lengths must match to compare')
741 return self._constructor(na_op(self.values, np.asarray(other)),
TypeError: len() of unsized object
|
TypeError
|
def get_resampler_for_grouping(
groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs
):
"""return our appropriate resampler when grouping as well"""
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
|
def get_resampler_for_grouping(
groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs
):
"""return our appropriate resampler when grouping as well"""
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(
r, how=how, fill_method=fill_method, limit=limit, **kwargs
)
|
https://github.com/pandas-dev/pandas/issues/13235
|
TypeError Traceback (most recent call last)
<ipython-input-53-6e7ac0fde8b3> in <module>()
----> 1 df.groupby('col1').resample('1W', label='left').sum()
/Users/roycoding/venv-lib-upgrade/lib/python2.7/site-packages/pandas/core/groupby.pyc in resample(self, rule, *args, **kwargs)
1080 """
1081 from pandas.tseries.resample import get_resampler_for_grouping
-> 1082 return get_resampler_for_grouping(self, rule, *args, **kwargs)
1083
1084 @Substitution(name='groupby')
/Users/roycoding/venv-lib-upgrade/lib/python2.7/site-packages/pandas/tseries/resample.pyc in get_resampler_for_grouping(groupby, rule, how, fill_method, limit, kind, **kwargs)
910 fill_method=fill_method,
911 limit=limit,
--> 912 **kwargs)
913
914
TypeError: _maybe_process_deprecations() got an unexpected keyword argument 'label'
|
TypeError
|
def nested_to_record(ds, prefix="", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, compat.string_types):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + "." + k
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
|
def nested_to_record(ds, prefix="", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if level == 0:
newkey = str(k)
else:
newkey = prefix + "." + str(k)
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
|
https://github.com/pandas-dev/pandas/issues/13213
|
Traceback (most recent call last):
File "...lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-12-f866f9c7ec7c>", line 5, in <module>
pd.io.json.json_normalize(json.loads(testjson))
File ".../lib/python2.7/site-packages/pandas/io/json.py", line 715, in json_normalize
data = nested_to_record(data)
File ".../lib/python2.7/site-packages/pandas/io/json.py", line 617, in nested_to_record
newkey = str(k)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xdc' in position 0: ordinal not in range(128)
|
UnicodeEncodeError
|
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = (self.size().fillna(0) > 0).any()
out = algos.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
|
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
mask = ids != -1
out = func().values[ids]
if not mask.all():
out = np.where(mask, out, np.nan)
obs = np.zeros(ngroup, dtype="bool")
obs[ids[mask]] = True
if not obs.all():
out = self._try_cast(out, self._selected_obj)
return Series(out, index=self.obj.index)
|
https://github.com/pandas-dev/pandas/issues/13191
|
In [20]: df = pd.DataFrame({'grouping':[np.nan,1,1,3],
'v':[1.1, 2.1, 3.1, 4.5],
'd':pd.date_range('2014-1-1','2014-1-4')})
In [21]: df.groupby('grouping')['d'].transform('first')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-21-cb1ed73aabc3> in <module>()
----> 1 df.groupby('grouping')['d'].transform('first')
C:\Users\Chris\Anaconda\lib\site-packages\pandas\core\groupby.pyc in transform(self, func, *args, **kwargs)
2738 # cythonized aggregation and merge
2739 return self._transform_fast(
-> 2740 lambda: getattr(self, func)(*args, **kwargs))
2741
2742 # reg transform
C:\Users\Chris\Anaconda\lib\site-packages\pandas\core\groupby.pyc in _transform_fast(self, func)
2781 out = func().values[ids]
2782 if not mask.all():
-> 2783 out = np.where(mask, out, np.nan)
2784
2785 obs = np.zeros(ngroup, dtype='bool')
TypeError: invalid type promotion
|
TypeError
|
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj)
|
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
results = np.empty_like(obj.values, result.values.dtype)
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
if len(indexer) > 0:
results[indexer] = np.tile(row.values, len(indexer)).reshape(
len(indexer), -1
)
counts = self.size().fillna(0).values
if any(counts == 0):
results = self._try_cast(results, obj[result.columns])
return DataFrame(results, columns=result.columns, index=obj.index)._convert(
datetime=True
)
|
https://github.com/pandas-dev/pandas/issues/13191
|
In [20]: df = pd.DataFrame({'grouping':[np.nan,1,1,3],
'v':[1.1, 2.1, 3.1, 4.5],
'd':pd.date_range('2014-1-1','2014-1-4')})
In [21]: df.groupby('grouping')['d'].transform('first')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-21-cb1ed73aabc3> in <module>()
----> 1 df.groupby('grouping')['d'].transform('first')
C:\Users\Chris\Anaconda\lib\site-packages\pandas\core\groupby.pyc in transform(self, func, *args, **kwargs)
2738 # cythonized aggregation and merge
2739 return self._transform_fast(
-> 2740 lambda: getattr(self, func)(*args, **kwargs))
2741
2742 # reg transform
C:\Users\Chris\Anaconda\lib\site-packages\pandas\core\groupby.pyc in _transform_fast(self, func)
2781 out = func().values[ids]
2782 if not mask.all():
-> 2783 out = np.where(mask, out, np.nan)
2784
2785 obs = np.zeros(ngroup, dtype='bool')
TypeError: invalid type promotion
|
TypeError
|
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None)
groupby = kwargs.pop("groupby", None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = copy.copy(parent.groupby)
|
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None)
groupby = kwargs.pop("groupby", None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = parent.groupby
|
https://github.com/pandas-dev/pandas/issues/13174
|
/usr/local/bin/ipython:1: FutureWarning: .resample() is now a deferred operation
use .resample(...).mean() instead of .resample(...)
#!/usr/local/opt/python/bin/python2.7
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-30-5714547cf98c> in <module>()
----> 1 resampler['buyer'].count()
/usr/local/lib/python2.7/site-packages/pandas/tseries/resample.pyc in __getitem__(self, key)
179 # compat for deprecated
180 if isinstance(self.obj, com.ABCSeries):
--> 181 return self._deprecated()[key]
182
183 raise
/usr/local/lib/python2.7/site-packages/pandas/core/frame.pyc in __getitem__(self, key)
1995 return self._getitem_multilevel(key)
1996 else:
-> 1997 return self._getitem_column(key)
1998
1999 def _getitem_column(self, key):
/usr/local/lib/python2.7/site-packages/pandas/core/frame.pyc in _getitem_column(self, key)
2002 # get column
2003 if self.columns.is_unique:
-> 2004 return self._get_item_cache(key)
2005
2006 # duplicate columns & possible reduce dimensionality
/usr/local/lib/python2.7/site-packages/pandas/core/generic.pyc in _get_item_cache(self, item)
1348 res = cache.get(item)
1349 if res is None:
-> 1350 values = self._data.get(item)
1351 res = self._box_item_values(item, values)
1352 cache[item] = res
/usr/local/lib/python2.7/site-packages/pandas/core/internals.pyc in get(self, item, fastpath)
3288
3289 if not isnull(item):
-> 3290 loc = self.items.get_loc(item)
3291 else:
3292 indexer = np.arange(len(self.items))[isnull(self.items)]
/usr/local/lib/python2.7/site-packages/pandas/indexes/base.pyc in get_loc(self, key, method, tolerance)
1945 return self._engine.get_loc(key)
1946 except KeyError:
-> 1947 return self._engine.get_loc(self._maybe_cast_indexer(key))
1948
1949 indexer = self.get_indexer([key], method=method, tolerance=tolerance)
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:4154)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:4018)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12368)()
pandas/hashtable.pyx in pandas.hashtable.PyObjectHashTable.get_item (pandas/hashtable.c:12322)()
KeyError: 'buyer'
|
KeyError
|
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(
new_values, index=new_index, name=series.name
).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return _index.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)
):
try:
return _try_mi(key)
except KeyError:
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
|
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return _index.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)
):
try:
return _try_mi(key)
except KeyError:
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
|
https://github.com/pandas-dev/pandas/issues/13144
|
Traceback (most recent call last):
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 420, in _get_values
fastpath=True).__finalize__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 222, in __init__
self.index = index
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/generic.py", line 2685, in __setattr__
return object.__setattr__(self, name, value)
File "pandas/src/properties.pyx", line 65, in pandas.lib.AxisProperty.__set__ (pandas/lib.c:44748)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 274, in _set_axis
labels = _ensure_index(labels)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 3409, in _ensure_index
return Index(index_like)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 268, in __new__
cls._scalar_data_error(data)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 483, in _scalar_data_error
repr(data)))
TypeError: Index(...) must be called with a collection of some kind, None was passed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/base.py", line 46, in __str__
return self.__unicode__()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 306, in __unicode__
series_rep = Series.__unicode__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 984, in __unicode__
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1025, in to_string
dtype=dtype, name=name, max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1052, in _get_repr
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 145, in __init__
self._chk_truncate()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 158, in _chk_truncate
series = concat((series.iloc[:row_num],
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1296, in __getitem__
return self._getitem_axis(key, axis=0)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1587, in _getitem_axis
return self._get_slice_axis(key, axis=axis)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1579, in _get_slice_axis
return self._slice(slice_obj, axis=axis, kind='iloc')
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 99, in _slice
return self.obj._slice(obj, axis=axis, kind=kind)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 578, in _slice
return self._get_values(slobj)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 422, in _get_values
return self[indexer]
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 396, in __getitem__
return self._get_val_at(self.index.get_loc(key))
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 392, in _get_val_at
return self.block.values._get_val_at(loc)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/array.py", line 308, in _get_val_at
if loc < 0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
TypeError
|
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(
new_values, index=new_index, name=series.name
).__finalize__(self)
|
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
|
https://github.com/pandas-dev/pandas/issues/13144
|
Traceback (most recent call last):
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 420, in _get_values
fastpath=True).__finalize__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 222, in __init__
self.index = index
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/generic.py", line 2685, in __setattr__
return object.__setattr__(self, name, value)
File "pandas/src/properties.pyx", line 65, in pandas.lib.AxisProperty.__set__ (pandas/lib.c:44748)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 274, in _set_axis
labels = _ensure_index(labels)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 3409, in _ensure_index
return Index(index_like)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 268, in __new__
cls._scalar_data_error(data)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 483, in _scalar_data_error
repr(data)))
TypeError: Index(...) must be called with a collection of some kind, None was passed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/base.py", line 46, in __str__
return self.__unicode__()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 306, in __unicode__
series_rep = Series.__unicode__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 984, in __unicode__
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1025, in to_string
dtype=dtype, name=name, max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1052, in _get_repr
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 145, in __init__
self._chk_truncate()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 158, in _chk_truncate
series = concat((series.iloc[:row_num],
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1296, in __getitem__
return self._getitem_axis(key, axis=0)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1587, in _getitem_axis
return self._get_slice_axis(key, axis=axis)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1579, in _get_slice_axis
return self._slice(slice_obj, axis=axis, kind='iloc')
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 99, in _slice
return self.obj._slice(obj, axis=axis, kind=kind)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 578, in _slice
return self._get_values(slobj)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 422, in _get_values
return self[indexer]
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 396, in __getitem__
return self._get_val_at(self.index.get_loc(key))
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 392, in _get_val_at
return self.block.values._get_val_at(loc)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/array.py", line 308, in _get_val_at
if loc < 0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
TypeError
|
def __init__(
self,
data=None,
index=None,
sparse_index=None,
kind="block",
fill_value=None,
name=None,
dtype=None,
copy=False,
fastpath=False,
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = np.nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert len(index) == len(data)
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
else:
assert len(data) == sparse_index.npoints
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == "block":
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == "block":
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(
data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype,
copy=copy,
)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
|
def __init__(
self,
data=None,
index=None,
sparse_index=None,
kind="block",
fill_value=None,
name=None,
dtype=None,
copy=False,
fastpath=False,
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert len(index) == len(data)
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
else:
assert len(data) == sparse_index.npoints
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == "block":
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == "block":
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(
data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype,
copy=copy,
)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
|
https://github.com/pandas-dev/pandas/issues/13144
|
Traceback (most recent call last):
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 420, in _get_values
fastpath=True).__finalize__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 222, in __init__
self.index = index
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/generic.py", line 2685, in __setattr__
return object.__setattr__(self, name, value)
File "pandas/src/properties.pyx", line 65, in pandas.lib.AxisProperty.__set__ (pandas/lib.c:44748)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 274, in _set_axis
labels = _ensure_index(labels)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 3409, in _ensure_index
return Index(index_like)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 268, in __new__
cls._scalar_data_error(data)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 483, in _scalar_data_error
repr(data)))
TypeError: Index(...) must be called with a collection of some kind, None was passed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/base.py", line 46, in __str__
return self.__unicode__()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 306, in __unicode__
series_rep = Series.__unicode__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 984, in __unicode__
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1025, in to_string
dtype=dtype, name=name, max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1052, in _get_repr
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 145, in __init__
self._chk_truncate()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 158, in _chk_truncate
series = concat((series.iloc[:row_num],
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1296, in __getitem__
return self._getitem_axis(key, axis=0)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1587, in _getitem_axis
return self._get_slice_axis(key, axis=axis)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1579, in _get_slice_axis
return self._slice(slice_obj, axis=axis, kind='iloc')
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 99, in _slice
return self.obj._slice(obj, axis=axis, kind=kind)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 578, in _slice
return self._get_values(slobj)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 422, in _get_values
return self[indexer]
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 396, in __getitem__
return self._get_val_at(self.index.get_loc(key))
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 392, in _get_val_at
return self.block.values._get_val_at(loc)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/array.py", line 308, in _get_val_at
if loc < 0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
TypeError
|
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception("Requested index not in this series!")
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key], index=self.index[key]).__finalize__(self)
|
def __getitem__(self, key):
try:
return self._get_val_at(self.index.get_loc(key))
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception("Requested index not in this series!")
except TypeError:
# Could not hash item, must be array-like?
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
key = _values_from_object(key)
dataSlice = self.values[key]
new_index = Index(self.index.view(ndarray)[key])
return self._constructor(dataSlice, index=new_index).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/13144
|
Traceback (most recent call last):
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 420, in _get_values
fastpath=True).__finalize__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 222, in __init__
self.index = index
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/generic.py", line 2685, in __setattr__
return object.__setattr__(self, name, value)
File "pandas/src/properties.pyx", line 65, in pandas.lib.AxisProperty.__set__ (pandas/lib.c:44748)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 274, in _set_axis
labels = _ensure_index(labels)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 3409, in _ensure_index
return Index(index_like)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 268, in __new__
cls._scalar_data_error(data)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/indexes/base.py", line 483, in _scalar_data_error
repr(data)))
TypeError: Index(...) must be called with a collection of some kind, None was passed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/base.py", line 46, in __str__
return self.__unicode__()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 306, in __unicode__
series_rep = Series.__unicode__(self)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 984, in __unicode__
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1025, in to_string
dtype=dtype, name=name, max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 1052, in _get_repr
max_rows=max_rows)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 145, in __init__
self._chk_truncate()
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/formats/format.py", line 158, in _chk_truncate
series = concat((series.iloc[:row_num],
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1296, in __getitem__
return self._getitem_axis(key, axis=0)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1587, in _getitem_axis
return self._get_slice_axis(key, axis=axis)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 1579, in _get_slice_axis
return self._slice(slice_obj, axis=axis, kind='iloc')
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/indexing.py", line 99, in _slice
return self.obj._slice(obj, axis=axis, kind=kind)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/core/series.py", line 578, in _slice
return self._get_values(slobj)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 422, in _get_values
return self[indexer]
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 396, in __getitem__
return self._get_val_at(self.index.get_loc(key))
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/series.py", line 392, in _get_val_at
return self.block.values._get_val_at(loc)
File "/Users/bryan/anaconda3/lib/python3.5/site-packages/pandas/sparse/array.py", line 308, in _get_val_at
if loc < 0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
TypeError
|
def _parse_raw_thead(self, table):
thead = self._parse_thead(table)
res = []
if thead:
res = lmap(self._text_getter, self._parse_th(thead[0]))
return np.atleast_1d(np.array(res).squeeze()) if res and len(res) == 1 else res
|
def _parse_raw_thead(self, table):
thead = self._parse_thead(table)
res = []
if thead:
res = lmap(self._text_getter, self._parse_th(thead[0]))
return np.array(res).squeeze() if res and len(res) == 1 else res
|
https://github.com/pandas-dev/pandas/issues/9178
|
Traceback (most recent call last):
File "./posti.py", line 69, in <module>
dfs = read_html(html)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 851, in read_html
parse_dates, tupleize_cols, thousands, attrs, encoding)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 721, in _parse
infer_types, parse_dates, tupleize_cols, thousands))
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 609, in _data_to_frame
_expand_elements(body)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 586, in _expand_elements
lens = Series(lmap(len, body))
File "/usr/lib/python3.4/site-packages/pandas/compat/__init__.py", line 87, in lmap
return list(map(*args, **kwargs))
TypeError: len() of unsized object
|
TypeError
|
def _parse_raw_tfoot(self, table):
tfoot = self._parse_tfoot(table)
res = []
if tfoot:
res = lmap(self._text_getter, self._parse_td(tfoot[0]))
return np.atleast_1d(np.array(res).squeeze()) if res and len(res) == 1 else res
|
def _parse_raw_tfoot(self, table):
tfoot = self._parse_tfoot(table)
res = []
if tfoot:
res = lmap(self._text_getter, self._parse_td(tfoot[0]))
return np.array(res).squeeze() if res and len(res) == 1 else res
|
https://github.com/pandas-dev/pandas/issues/9178
|
Traceback (most recent call last):
File "./posti.py", line 69, in <module>
dfs = read_html(html)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 851, in read_html
parse_dates, tupleize_cols, thousands, attrs, encoding)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 721, in _parse
infer_types, parse_dates, tupleize_cols, thousands))
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 609, in _data_to_frame
_expand_elements(body)
File "/usr/lib/python3.4/site-packages/pandas/io/html.py", line 586, in _expand_elements
lens = Series(lmap(len, body))
File "/usr/lib/python3.4/site-packages/pandas/compat/__init__.py", line 87, in lmap
return list(map(*args, **kwargs))
TypeError: len() of unsized object
|
TypeError
|
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif self.block.is_sparse:
# fill_value is not NaN and have holes
if not values._null_fill_value and values.sp_index.ngaps > 0:
return False
values_flat = values.ravel(order="K")
else:
values_flat = values.ravel(order="K")
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i : i + chunk_len]).all():
return False
return True
|
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
else:
values_flat = values.ravel(order="K")
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i : i + chunk_len]).all():
return False
return True
|
https://github.com/pandas-dev/pandas/issues/9765
|
.
Expected:
A B C
0 1 0 0
1 0 0 0
2 1 0 1
3 0 0 0
Got:
A B C
0 1 NaN 0
1 0 NaN 0
2 1 NaN 1
3 0 NaN 0
F
======================================================================
FAIL: test_concat_sparse_to_df (__main__.TestSparseConcat)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 28, in test_concat_sparse_to_df
assert_frame_equal(C, self.expected)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 748, in assert_frame_equal
check_exact=check_exact)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 692, in assert_series_equal
assert_almost_equal(left.values, right.values, check_less_precise)
File "das/src/testing.pyx", line 58, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2758)
File "das/src/testing.pyx", line 93, in pandas._testing.assert_almost_equal (pandas/src/testing.c:1843)
File "das/src/testing.pyx", line 102, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2010)
AssertionError: First object is null, second isn't: nan != 0.0
----------------------------------------------------------------------
Ran 2 tests in 0.024s
FAILED (failures=1)
|
AssertionError
|
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False):
pass
elif getattr(self.block, "is_categorical", False):
pass
elif getattr(self.block, "is_sparse", False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
|
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False):
pass
elif getattr(self.block, "is_categorical", False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
|
https://github.com/pandas-dev/pandas/issues/9765
|
.
Expected:
A B C
0 1 0 0
1 0 0 0
2 1 0 1
3 0 0 0
Got:
A B C
0 1 NaN 0
1 0 NaN 0
2 1 NaN 1
3 0 NaN 0
F
======================================================================
FAIL: test_concat_sparse_to_df (__main__.TestSparseConcat)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 28, in test_concat_sparse_to_df
assert_frame_equal(C, self.expected)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 748, in assert_frame_equal
check_exact=check_exact)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 692, in assert_series_equal
assert_almost_equal(left.values, right.values, check_less_precise)
File "das/src/testing.pyx", line 58, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2758)
File "das/src/testing.pyx", line 93, in pandas._testing.assert_almost_equal (pandas/src/testing.c:1843)
File "das/src/testing.pyx", line 102, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2010)
AssertionError: First object is null, second isn't: nan != 0.0
----------------------------------------------------------------------
Ran 2 tests in 0.024s
FAILED (failures=1)
|
AssertionError
|
def __getitem__(self, key):
""" """
if com.is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.values[key]
else:
if isinstance(key, SparseArray):
key = np.asarray(key)
if hasattr(key, "__len__") and len(self) != len(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
|
def __getitem__(self, key):
""" """
if com.is_integer(key):
return self._get_val_at(key)
else:
if isinstance(key, SparseArray):
key = np.asarray(key)
if hasattr(key, "__len__") and len(self) != len(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
|
https://github.com/pandas-dev/pandas/issues/9765
|
.
Expected:
A B C
0 1 0 0
1 0 0 0
2 1 0 1
3 0 0 0
Got:
A B C
0 1 NaN 0
1 0 NaN 0
2 1 NaN 1
3 0 NaN 0
F
======================================================================
FAIL: test_concat_sparse_to_df (__main__.TestSparseConcat)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 28, in test_concat_sparse_to_df
assert_frame_equal(C, self.expected)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 748, in assert_frame_equal
check_exact=check_exact)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 692, in assert_series_equal
assert_almost_equal(left.values, right.values, check_less_precise)
File "das/src/testing.pyx", line 58, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2758)
File "das/src/testing.pyx", line 93, in pandas._testing.assert_almost_equal (pandas/src/testing.c:1843)
File "das/src/testing.pyx", line 102, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2010)
AssertionError: First object is null, second isn't: nan != 0.0
----------------------------------------------------------------------
Ran 2 tests in 0.024s
FAILED (failures=1)
|
AssertionError
|
def __init__(
self,
data=None,
index=None,
sparse_index=None,
kind="block",
fill_value=None,
name=None,
dtype=None,
copy=False,
fastpath=False,
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert len(index) == len(data)
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
else:
assert len(data) == sparse_index.npoints
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == "block":
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == "block":
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(
data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype,
copy=copy,
)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
|
def __init__(
self,
data=None,
index=None,
sparse_index=None,
kind="block",
fill_value=None,
name=None,
dtype=None,
copy=False,
fastpath=False,
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert len(index) == len(data)
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value)
else:
assert len(data) == sparse_index.npoints
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == "block":
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == "block":
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(
data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype,
copy=copy,
)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
|
https://github.com/pandas-dev/pandas/issues/9765
|
.
Expected:
A B C
0 1 0 0
1 0 0 0
2 1 0 1
3 0 0 0
Got:
A B C
0 1 NaN 0
1 0 NaN 0
2 1 NaN 1
3 0 NaN 0
F
======================================================================
FAIL: test_concat_sparse_to_df (__main__.TestSparseConcat)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 28, in test_concat_sparse_to_df
assert_frame_equal(C, self.expected)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 748, in assert_frame_equal
check_exact=check_exact)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 692, in assert_series_equal
assert_almost_equal(left.values, right.values, check_less_precise)
File "das/src/testing.pyx", line 58, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2758)
File "das/src/testing.pyx", line 93, in pandas._testing.assert_almost_equal (pandas/src/testing.c:1843)
File "das/src/testing.pyx", line 102, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2010)
AssertionError: First object is null, second isn't: nan != 0.0
----------------------------------------------------------------------
Ran 2 tests in 0.024s
FAILED (failures=1)
|
AssertionError
|
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
# concat Series with length to keep dtype as much
non_empties = [x for x in self.objs if len(x) > 0]
if len(non_empties) > 0:
values = [x._values for x in non_empties]
else:
values = [x._values for x in self.objs]
new_data = _concat._concat_compat(values)
name = com._consensus_name_attr(self.objs)
cons = _concat._get_series_result_type(new_data)
return cons(
new_data, index=self.new_axes[0], name=name, dtype=new_data.dtype
).__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method="concat")
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return cons._from_axes(new_data, self.new_axes).__finalize__(
self, method="concat"
)
|
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
# concat Series with length to keep dtype as much
non_empties = [x for x in self.objs if len(x) > 0]
if len(non_empties) > 0:
values = [x._values for x in non_empties]
else:
values = [x._values for x in self.objs]
new_data = _concat._concat_compat(values)
name = com._consensus_name_attr(self.objs)
cons = _concat._get_series_result_type(new_data)
return cons(
new_data, index=self.new_axes[0], name=name, dtype=new_data.dtype
).__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method="concat")
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
return (
self.objs[0]
._from_axes(new_data, self.new_axes)
.__finalize__(self, method="concat")
)
|
https://github.com/pandas-dev/pandas/issues/9765
|
.
Expected:
A B C
0 1 0 0
1 0 0 0
2 1 0 1
3 0 0 0
Got:
A B C
0 1 NaN 0
1 0 NaN 0
2 1 NaN 1
3 0 NaN 0
F
======================================================================
FAIL: test_concat_sparse_to_df (__main__.TestSparseConcat)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 28, in test_concat_sparse_to_df
assert_frame_equal(C, self.expected)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 748, in assert_frame_equal
check_exact=check_exact)
File "/home/vagrant/.virtualenvs/ai-modeling/lib/python2.7/site-packages/pandas/util/testing.py", line 692, in assert_series_equal
assert_almost_equal(left.values, right.values, check_less_precise)
File "das/src/testing.pyx", line 58, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2758)
File "das/src/testing.pyx", line 93, in pandas._testing.assert_almost_equal (pandas/src/testing.c:1843)
File "das/src/testing.pyx", line 102, in pandas._testing.assert_almost_equal (pandas/src/testing.c:2010)
AssertionError: First object is null, second isn't: nan != 0.0
----------------------------------------------------------------------
Ran 2 tests in 0.024s
FAILED (failures=1)
|
AssertionError
|
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel("F") for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
|
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
|
https://github.com/pandas-dev/pandas/issues/12527
|
isinstance(n, int)
True
df.loc[(n, 0), 'dest']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 1196, in __getitem__
return self._getitem_tuple(key)
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 709, in _getitem_tuple
return self._getitem_lowerdim(tup)
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 817, in _getitem_lowerdim
return self._getitem_nested_tuple(tup)
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 889, in _getitem_nested_tuple
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 1343, in _getitem_axis
return self._get_label(key, axis=axis)
File "/usr/local/lib/python3.5/site-packages/pandas/core/indexing.py", line 86, in _get_label
return self.obj._xs(label, axis=axis)
File "/usr/local/lib/python3.5/site-packages/pandas/core/generic.py", line 1483, in xs
drop_level=drop_level)
File "/usr/local/lib/python3.5/site-packages/pandas/core/index.py", line 5432, in get_loc_level
return (self._engine.get_loc(_values_from_object(key)),
File "pandas/index.pyx", line 137, in pandas.index.IndexEngine.get_loc (pandas/index.c:3979)
File "pandas/index.pyx", line 146, in pandas.index.IndexEngine.get_loc (pandas/index.c:3693)
File "pandas/src/util.pxd", line 41, in util.get_value_at (pandas/index.c:13199)
IndexError: index out of bounds
|
IndexError
|
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
"""provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = _maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
|
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
"""provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_internal_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = _maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
# blocks) are always copied
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index).values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError(
"incompatible index of inserted column with frame index"
)
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = com._possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = _possibly_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
|
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
# blocks) are always copied
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index).values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError(
"incompatible index of inserted column with frame index"
)
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = com._possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = _possibly_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
# return internal types directly
if is_internal_type(value):
return value
# broadcast across multiple columns if necessary
if key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis), dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (
Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in enumerate(zip(values, res_index))
)
else: # pragma : no cover
raise AssertionError("Axis must be 0 or 1, got %s" % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, "args"):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ("occurred at index %s" % pprint_thing(k),)
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
|
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_internal_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis), dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (
Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in enumerate(zip(values, res_index))
)
else: # pragma : no cover
raise AssertionError("Axis must be 0 or 1, got %s" % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, "args"):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ("occurred at index %s" % pprint_thing(k),)
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def should_store(self, value):
return not (
issubclass(
value.dtype.type,
(np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),
)
or is_extension_type(value)
)
|
def should_store(self, value):
return not (
issubclass(
value.dtype.type,
(np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),
)
or is_internal_type(value)
)
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_type(value)
# categorical/spares/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
|
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_internal_type = is_internal_type(value)
# categorical/spares/datetimetz
if value_is_internal_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_internal_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values
Examples
--------
>>> x
one 1
two 2
three 3
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
Returns
-------
y : Series
same index as caller
"""
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.asobject
if na_action == "ignore":
def map_f(values, f):
return lib.map_infer_mask(values, f, isnull(values).view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
new_values = algos.take_1d(arg._values, indexer)
else:
new_values = map_f(values, arg)
return self._constructor(new_values, index=self.index).__finalize__(self)
|
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values
Examples
--------
>>> x
one 1
two 2
three 3
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
Returns
-------
y : Series
same index as caller
"""
values = self.asobject
if na_action == "ignore":
mask = isnull(values)
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = self._constructor(arg, index=arg.keys())
indexer = arg.index.get_indexer(values)
new_values = algos.take_1d(arg._values, indexer)
return self._constructor(new_values, index=self.index).__finalize__(self)
else:
mapped = map_f(values, arg)
return self._constructor(mapped, index=self.index).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def map_f(values, f):
return lib.map_infer_mask(values, f, isnull(values).view(np.uint8))
|
def map_f(values, f):
return lib.map_infer_mask(values, f, mask.view(np.uint8))
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> import pandas as pd
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
return f(self)
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
values = self.asobject
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped, index=self.index).__finalize__(self)
|
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> import pandas as pd
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
return f(self)
mapped = lib.map_infer(self.asobject, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped, index=self.index).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_internal_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
|
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_internal_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def map(self, mapper):
"""
Apply mapper function to its values.
Parameters
----------
mapper : callable
Function to be applied.
Returns
-------
applied : array
"""
return self._arrmap(self.values, mapper)
|
def map(self, mapper):
return self._arrmap(self.values, mapper)
|
https://github.com/pandas-dev/pandas/issues/12473
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-ba941613799a> in <module>()
----> 1 df.publication_timestamp.map(lambda x: x)
/Users/johria/.pyenv/versions/3.5.1/lib/python3.5/site-packages/pandas/core/series.py in map(self, arg, na_action)
2052 index=self.index).__finalize__(self)
2053 else:
-> 2054 mapped = map_f(values, arg)
2055 return self._constructor(mapped,
2056 index=self.index).__finalize__(self)
TypeError: Argument 'arr' has incorrect type (expected numpy.ndarray, got DatetimeIndex)
|
TypeError
|
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == "period": # pragma: no cover
print("Warning: multiple of frequency -> timestamps")
# Cannot have multiple of periods, convert to timestamp
self.kind = "timestamp"
# convert to timestamp
if not (self.kind is None or self.kind == "period"):
obj = obj.to_timestamp(how=self.convention)
return obj
|
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == "period": # pragma: no cover
print("Warning: multiple of frequency -> timestamps")
# Cannot have multiple of periods, convert to timestamp
self.kind = "timestamp"
if not len(obj):
self.kind = "timestamp"
# convert to timestamp
if not (self.kind is None or self.kind == "period"):
obj = obj.to_timestamp(how=self.convention)
return obj
|
https://github.com/pandas-dev/pandas/issues/12774
|
In [38]: pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-295afa97781f> in <module>()
----> 1 pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in f(self, _method)
473
474 def f(self, _method=method):
--> 475 return self._groupby_and_aggregate(None, _method)
476 f.__doc__ = getattr(GroupBy, method).__doc__
477 setattr(Resampler, method, f)
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _groupby_and_aggregate(self, grouper, how, *args, **kwargs)
353
354 if grouper is None:
--> 355 self._set_binner()
356 grouper = self.grouper
357
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _set_binner(self)
202
203 if self.binner is None:
--> 204 self.binner, self.grouper = self._get_binner()
205
206 def _get_binner(self):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner(self)
210 """
211
--> 212 binner, bins, binlabels = self._get_binner_for_time()
213 bin_grouper = BinGrouper(bins, binlabels)
214 return binner, bin_grouper
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner_for_time(self)
538 if self.kind == 'period':
539 return self.groupby._get_time_period_bins(self.ax)
--> 540 return self.groupby._get_time_bins(self.ax)
541
542 def _downsample(self, how, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
905 if not isinstance(ax, DatetimeIndex):
906 raise TypeError('axis must be a DatetimeIndex, but got '
--> 907 'an instance of %r' % type(ax).__name__)
908
909 if len(ax) == 0:
TypeError: axis must be a DatetimeIndex, but got an instance of 'PeriodIndex'
|
TypeError
|
def _get_new_index(self):
"""return our new index"""
ax = self.ax
if len(ax) == 0:
values = []
else:
start = ax[0].asfreq(self.freq, how=self.convention)
end = ax[-1].asfreq(self.freq, how="end")
values = period_range(start, end, freq=self.freq).values
return ax._shallow_copy(values, freq=self.freq)
|
def _get_new_index(self):
"""return our new index"""
ax = self.ax
ax_attrs = ax._get_attributes_dict()
ax_attrs["freq"] = self.freq
obj = self._selected_obj
if len(ax) == 0:
new_index = PeriodIndex(data=[], **ax_attrs)
return obj.reindex(new_index)
start = ax[0].asfreq(self.freq, how=self.convention)
end = ax[-1].asfreq(self.freq, how="end")
return period_range(start, end, **ax_attrs)
|
https://github.com/pandas-dev/pandas/issues/12774
|
In [38]: pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-295afa97781f> in <module>()
----> 1 pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in f(self, _method)
473
474 def f(self, _method=method):
--> 475 return self._groupby_and_aggregate(None, _method)
476 f.__doc__ = getattr(GroupBy, method).__doc__
477 setattr(Resampler, method, f)
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _groupby_and_aggregate(self, grouper, how, *args, **kwargs)
353
354 if grouper is None:
--> 355 self._set_binner()
356 grouper = self.grouper
357
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _set_binner(self)
202
203 if self.binner is None:
--> 204 self.binner, self.grouper = self._get_binner()
205
206 def _get_binner(self):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner(self)
210 """
211
--> 212 binner, bins, binlabels = self._get_binner_for_time()
213 bin_grouper = BinGrouper(bins, binlabels)
214 return binner, bin_grouper
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner_for_time(self)
538 if self.kind == 'period':
539 return self.groupby._get_time_period_bins(self.ax)
--> 540 return self.groupby._get_time_bins(self.ax)
541
542 def _downsample(self, how, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
905 if not isinstance(ax, DatetimeIndex):
906 raise TypeError('axis must be a DatetimeIndex, but got '
--> 907 'an instance of %r' % type(ax).__name__)
908
909 if len(ax) == 0:
TypeError: axis must be a DatetimeIndex, but got an instance of 'PeriodIndex'
|
TypeError
|
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == "timestamp":
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
new_index = self._get_new_index()
if len(new_index) == 0:
return self._wrap_result(self._selected_obj.reindex(new_index))
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
if is_subperiod(ax.freq, self.freq):
# Downsampling
rng = np.arange(memb.values[0], memb.values[-1] + 1)
bins = memb.searchsorted(rng, side="right")
grouper = BinGrouper(bins, new_index)
return self._groupby_and_aggregate(grouper, how)
elif is_superperiod(ax.freq, self.freq):
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise ValueError(
"Frequency {axfreq} cannot be resampled to {freq}".format(
axfreq=ax.freq, freq=self.freq
)
)
|
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == "timestamp":
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
new_index = self._get_new_index()
if len(new_index) == 0:
return self._wrap_result(new_index)
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
if is_subperiod(ax.freq, self.freq):
# Downsampling
rng = np.arange(memb.values[0], memb.values[-1] + 1)
bins = memb.searchsorted(rng, side="right")
grouper = BinGrouper(bins, new_index)
return self._groupby_and_aggregate(grouper, how)
elif is_superperiod(ax.freq, self.freq):
return self.asfreq()
raise ValueError(
"Frequency {axfreq} cannot be resampled to {freq}".format(
axfreq=ax.freq, freq=self.freq
)
)
|
https://github.com/pandas-dev/pandas/issues/12774
|
In [38]: pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-295afa97781f> in <module>()
----> 1 pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in f(self, _method)
473
474 def f(self, _method=method):
--> 475 return self._groupby_and_aggregate(None, _method)
476 f.__doc__ = getattr(GroupBy, method).__doc__
477 setattr(Resampler, method, f)
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _groupby_and_aggregate(self, grouper, how, *args, **kwargs)
353
354 if grouper is None:
--> 355 self._set_binner()
356 grouper = self.grouper
357
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _set_binner(self)
202
203 if self.binner is None:
--> 204 self.binner, self.grouper = self._get_binner()
205
206 def _get_binner(self):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner(self)
210 """
211
--> 212 binner, bins, binlabels = self._get_binner_for_time()
213 bin_grouper = BinGrouper(bins, binlabels)
214 return binner, bin_grouper
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner_for_time(self)
538 if self.kind == 'period':
539 return self.groupby._get_time_period_bins(self.ax)
--> 540 return self.groupby._get_time_bins(self.ax)
541
542 def _downsample(self, how, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
905 if not isinstance(ax, DatetimeIndex):
906 raise TypeError('axis must be a DatetimeIndex, but got '
--> 907 'an instance of %r' % type(ax).__name__)
908
909 if len(ax) == 0:
TypeError: axis must be a DatetimeIndex, but got an instance of 'PeriodIndex'
|
TypeError
|
def _upsample(self, method, limit=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
See also
--------
.fillna
"""
# we may need to actually resample as if we are timestamps
if self.kind == "timestamp":
return super(PeriodIndexResampler, self)._upsample(method, limit=limit)
ax = self.ax
obj = self.obj
new_index = self._get_new_index()
if len(new_index) == 0:
return self._wrap_result(self._selected_obj.reindex(new_index))
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(obj, indexer, new_index, axis=self.axis))
|
def _upsample(self, method, limit=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
See also
--------
.fillna
"""
# we may need to actually resample as if we are timestamps
if self.kind == "timestamp":
return super(PeriodIndexResampler, self)._upsample(method, limit=limit)
ax = self.ax
obj = self.obj
new_index = self._get_new_index()
if len(new_index) == 0:
return self._wrap_result(new_index)
if not is_superperiod(ax.freq, self.freq):
return self.asfreq()
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(obj, indexer, new_index, axis=self.axis))
|
https://github.com/pandas-dev/pandas/issues/12774
|
In [38]: pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-295afa97781f> in <module>()
----> 1 pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in f(self, _method)
473
474 def f(self, _method=method):
--> 475 return self._groupby_and_aggregate(None, _method)
476 f.__doc__ = getattr(GroupBy, method).__doc__
477 setattr(Resampler, method, f)
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _groupby_and_aggregate(self, grouper, how, *args, **kwargs)
353
354 if grouper is None:
--> 355 self._set_binner()
356 grouper = self.grouper
357
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _set_binner(self)
202
203 if self.binner is None:
--> 204 self.binner, self.grouper = self._get_binner()
205
206 def _get_binner(self):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner(self)
210 """
211
--> 212 binner, bins, binlabels = self._get_binner_for_time()
213 bin_grouper = BinGrouper(bins, binlabels)
214 return binner, bin_grouper
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner_for_time(self)
538 if self.kind == 'period':
539 return self.groupby._get_time_period_bins(self.ax)
--> 540 return self.groupby._get_time_bins(self.ax)
541
542 def _downsample(self, how, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
905 if not isinstance(ax, DatetimeIndex):
906 raise TypeError('axis must be a DatetimeIndex, but got '
--> 907 'an instance of %r' % type(ax).__name__)
908
909 if len(ax) == 0:
TypeError: axis must be a DatetimeIndex, but got an instance of 'PeriodIndex'
|
TypeError
|
def _groupby_and_aggregate(self, grouper, how, *args, **kwargs):
if grouper is None:
return self._downsample(how, **kwargs)
return super(PeriodIndexResampler, self)._groupby_and_aggregate(
grouper, how, *args, **kwargs
)
|
def _groupby_and_aggregate(self, grouper, how, *args, **kwargs):
"""revaluate the obj with a groupby aggregation"""
if grouper is None:
self._set_binner()
grouper = self.grouper
obj = self._selected_obj
try:
grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
except TypeError:
# panel grouper
grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
try:
result = grouped.aggregate(how, *args, **kwargs)
except Exception:
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
|
https://github.com/pandas-dev/pandas/issues/12774
|
In [38]: pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-295afa97781f> in <module>()
----> 1 pd.Series(1, index=pd.period_range(start='2000', periods=100)).resample('M').count()
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in f(self, _method)
473
474 def f(self, _method=method):
--> 475 return self._groupby_and_aggregate(None, _method)
476 f.__doc__ = getattr(GroupBy, method).__doc__
477 setattr(Resampler, method, f)
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _groupby_and_aggregate(self, grouper, how, *args, **kwargs)
353
354 if grouper is None:
--> 355 self._set_binner()
356 grouper = self.grouper
357
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _set_binner(self)
202
203 if self.binner is None:
--> 204 self.binner, self.grouper = self._get_binner()
205
206 def _get_binner(self):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner(self)
210 """
211
--> 212 binner, bins, binlabels = self._get_binner_for_time()
213 bin_grouper = BinGrouper(bins, binlabels)
214 return binner, bin_grouper
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_binner_for_time(self)
538 if self.kind == 'period':
539 return self.groupby._get_time_period_bins(self.ax)
--> 540 return self.groupby._get_time_bins(self.ax)
541
542 def _downsample(self, how, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/pandas/tseries/resample.py in _get_time_bins(self, ax)
905 if not isinstance(ax, DatetimeIndex):
906 raise TypeError('axis must be a DatetimeIndex, but got '
--> 907 'an instance of %r' % type(ax).__name__)
908
909 if len(ax) == 0:
TypeError: axis must be a DatetimeIndex, but got an instance of 'PeriodIndex'
|
TypeError
|
def __getitem__(self, key):
try:
return self._get_val_at(self.index.get_loc(key))
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception("Requested index not in this series!")
except TypeError:
# Could not hash item, must be array-like?
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
key = _values_from_object(key)
dataSlice = self.values[key]
new_index = Index(self.index.view(ndarray)[key])
return self._constructor(dataSlice, index=new_index).__finalize__(self)
|
def __getitem__(self, key):
""" """
try:
return self._get_val_at(self.index.get_loc(key))
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
raise Exception("Requested index not in this series!")
except TypeError:
# Could not hash item, must be array-like?
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
key = _values_from_object(key)
dataSlice = self.values[key]
new_index = Index(self.index.view(ndarray)[key])
return self._constructor(dataSlice, index=new_index).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/9467
|
In [2]: import pandas as pd
In [3]: ss = pd.Series(range(10)).to_sparse()
In [4]: ss[...]
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-4-f0ccf654123b> in <module>()
----> 1 ss[...]
/Users/shoyer/dev/pandas/pandas/sparse/series.pyc in __getitem__(self, key)
369 if isinstance(key, (int, np.integer)):
370 return self._get_val_at(key)
--> 371 raise Exception('Requested index not in this series!')
372
373 except TypeError:
Exception: Requested index not in this series!
|
Exception
|
def _wrap_result(self, result, use_codes=True, name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when infered
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, index=index)
else:
# Must a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
|
def _wrap_result(self, result, use_codes=True, name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when infered
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, index=index)
else:
# Must a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
|
https://github.com/pandas-dev/pandas/issues/12617
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/pwaller/.local/src/pandas/pandas/core/strings.py", line 1432, in partition
return self._wrap_result(result, expand=expand)
File "/home/pwaller/.local/src/pandas/pandas/core/strings.py", line 1348, in _wrap_result
return MultiIndex.from_tuples(result, names=name)
File "/home/pwaller/.local/src/pandas/pandas/indexes/multi.py", line 889, in from_tuples
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
File "/home/pwaller/.local/src/pandas/pandas/indexes/multi.py", line 844, in from_arrays
names=names, verify_integrity=False)
File "/home/pwaller/.local/src/pandas/pandas/indexes/multi.py", line 92, in __new__
result._set_names(names)
File "/home/pwaller/.local/src/pandas/pandas/indexes/multi.py", line 446, in _set_names
raise ValueError('Length of names must match number of levels in '
ValueError: Length of names must match number of levels in MultiIndex.
|
ValueError
|
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
|
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
|
https://github.com/pandas-dev/pandas/issues/12768
|
AttributeError Traceback (most recent call last)
<ipython-input-11-ffb9adbc134a> in <module>()
----> 1 pd.DataFrame({'a': [1,1,2], 'b':[1,2,0]}).groupby('a').filter(lambda x: x['b'].sum() > 5, dropna=False)
....../local/lib/python2.7/site-packages/pandas/core/groupby.py in filter(self, func, dropna, *args, **kwargs)
3570 type(res).__name__)
3571
-> 3572 return self._apply_filter(indices, dropna)
3573
3574
....../local/lib/python2.7/site-packages/pandas/core/groupby.py in _apply_filter(self, indices, dropna)
831 mask = np.empty(len(self._selected_obj.index), dtype=bool)
832 mask.fill(False)
--> 833 mask[indices.astype(int)] = True
834 # mask fails to broadcast when passed to where; broadcast manually.
835 mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
AttributeError: 'list' object has no attribute 'astype'
|
AttributeError
|
def __getitem__(self, key):
""" """
if com.is_integer(key):
return self._get_val_at(key)
else:
if isinstance(key, SparseArray):
key = np.asarray(key)
if hasattr(key, "__len__") and len(self) != len(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
|
def __getitem__(self, key):
""" """
if com.is_integer(key):
return self._get_val_at(key)
else:
if isinstance(key, SparseArray):
key = np.asarray(key)
if hasattr(key, "__len__") and len(self) != len(key):
indices = self.sp_index
if hasattr(indices, "to_int_index"):
indices = indices.to_int_index()
data_slice = self.values.take(indices.indices)[key]
else:
data_slice = self.values[key]
return self._constructor(data_slice)
|
https://github.com/pandas-dev/pandas/issues/10560
|
In [2]: pd.__version__
Out[2]: '0.16.2-123-gdf1f5cf'
In [3]: pd.options.display.max_rows = 3
In [4]: pd.Series(randn(3)).to_sparse()
Out[4]:
0 1.100684
1 -0.924482
2 -0.106069
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
In [5]: pd.Series(randn(4)).to_sparse()
Out[5]: ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
...
TypeError: cannot concatenate a non-NDFrame object
|
TypeError
|
def take(self, indices, axis=0):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
if com.is_integer(indices):
# return scalar
return self[indices]
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
n = len(self)
if ((indices >= n) | (indices < -1)).any():
raise IndexError("out of bounds access")
if self.sp_index.npoints > 0:
locs = np.array(
[self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices]
)
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
except ValueError:
# wrong dtype
result = result.astype("float64")
result[mask] = self.fill_value
else:
result = np.empty(len(indices))
result.fill(self.fill_value)
return self._constructor(result)
|
def take(self, indices, axis=0):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
n = len(self)
if ((indices >= n) | (indices < -1)).any():
raise IndexError("out of bounds access")
if self.sp_index.npoints > 0:
locs = np.array(
[self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices]
)
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
except ValueError:
# wrong dtype
result = result.astype("float64")
result[mask] = self.fill_value
else:
result = np.empty(len(indices))
result.fill(self.fill_value)
return result
|
https://github.com/pandas-dev/pandas/issues/10560
|
In [2]: pd.__version__
Out[2]: '0.16.2-123-gdf1f5cf'
In [3]: pd.options.display.max_rows = 3
In [4]: pd.Series(randn(3)).to_sparse()
Out[4]:
0 1.100684
1 -0.924482
2 -0.106069
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
In [5]: pd.Series(randn(4)).to_sparse()
Out[5]: ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
...
TypeError: cannot concatenate a non-NDFrame object
|
TypeError
|
def _reindex_index(
self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False
):
if level is not None:
raise TypeError("Reindex by level not supported for sparse")
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = com._ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(
new_series,
index=index,
columns=self.columns,
default_fill_value=self._default_fill_value,
)
|
def _reindex_index(
self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False
):
if level is not None:
raise TypeError("Reindex by level not supported for sparse")
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = com._ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
new = values.take(indexer)
if need_mask:
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(
new_series,
index=index,
columns=self.columns,
default_fill_value=self._default_fill_value,
)
|
https://github.com/pandas-dev/pandas/issues/10560
|
In [2]: pd.__version__
Out[2]: '0.16.2-123-gdf1f5cf'
In [3]: pd.options.display.max_rows = 3
In [4]: pd.Series(randn(3)).to_sparse()
Out[4]:
0 1.100684
1 -0.924482
2 -0.106069
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
In [5]: pd.Series(randn(4)).to_sparse()
Out[5]: ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
...
TypeError: cannot concatenate a non-NDFrame object
|
TypeError
|
def reindex(self, index=None, method=None, copy=True, limit=None, **kwargs):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(
self._data.reindex(new_index, method=method, limit=limit, copy=copy),
index=new_index,
).__finalize__(self)
|
def reindex(self, index=None, method=None, copy=True, limit=None):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(
self._data.reindex(new_index, method=method, limit=limit, copy=copy),
index=new_index,
).__finalize__(self)
|
https://github.com/pandas-dev/pandas/issues/10560
|
In [2]: pd.__version__
Out[2]: '0.16.2-123-gdf1f5cf'
In [3]: pd.options.display.max_rows = 3
In [4]: pd.Series(randn(3)).to_sparse()
Out[4]:
0 1.100684
1 -0.924482
2 -0.106069
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
In [5]: pd.Series(randn(4)).to_sparse()
Out[5]: ---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
...
TypeError: cannot concatenate a non-NDFrame object
|
TypeError
|
def describe(self, percentiles=None, include=None, exclude=None):
if self.ndim >= 3:
msg = "describe is not implemented on on Panel or PanelND objects."
raise NotImplementedError(msg)
if percentiles is not None:
# get them all to be in [0, 1]
self._check_percentile(percentiles)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# median should always be included
if (percentiles != 0.5).all(): # median isn't included
lh = percentiles[percentiles < 0.5]
uh = percentiles[percentiles > 0.5]
percentiles = np.hstack([lh, 0.5, uh])
def pretty_name(x):
x *= 100
if x == int(x):
return "%.0f%%" % x
else:
return "%.1f%%" % x
def describe_numeric_1d(series, percentiles):
stat_index = (
["count", "mean", "std", "min"]
+ [pretty_name(x) for x in percentiles]
+ ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ [series.quantile(x) for x in percentiles]
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if com.is_datetime64_dtype(data):
asint = data.dropna().values.view("i8")
names += ["top", "freq", "first", "last"]
result += [
lib.Timestamp(top),
freq,
lib.Timestamp(asint.min()),
lib.Timestamp(asint.max()),
]
else:
names += ["top", "freq"]
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data, percentiles):
if com.is_bool_dtype(data):
return describe_categorical_1d(data)
elif com.is_numeric_dtype(data):
return describe_numeric_1d(data, percentiles)
elif com.is_timedelta64_dtype(data):
return describe_numeric_1d(data, percentiles)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self, percentiles)
elif (include is None) and (exclude is None):
if len(self._get_numeric_data()._info_axis) > 0:
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
else:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s, percentiles) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = self.columns._shallow_copy(values=d.columns.values)
d.columns.names = data.columns.names
return d
|
def describe(self, percentiles=None, include=None, exclude=None):
if self.ndim >= 3:
msg = "describe is not implemented on on Panel or PanelND objects."
raise NotImplementedError(msg)
if percentiles is not None:
# get them all to be in [0, 1]
self._check_percentile(percentiles)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# median should always be included
if (percentiles != 0.5).all(): # median isn't included
lh = percentiles[percentiles < 0.5]
uh = percentiles[percentiles > 0.5]
percentiles = np.hstack([lh, 0.5, uh])
def pretty_name(x):
x *= 100
if x == int(x):
return "%.0f%%" % x
else:
return "%.1f%%" % x
def describe_numeric_1d(series, percentiles):
stat_index = (
["count", "mean", "std", "min"]
+ [pretty_name(x) for x in percentiles]
+ ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ [series.quantile(x) for x in percentiles]
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if com.is_datetime64_dtype(data):
asint = data.dropna().values.view("i8")
names += ["top", "freq", "first", "last"]
result += [
lib.Timestamp(top),
freq,
lib.Timestamp(asint.min()),
lib.Timestamp(asint.max()),
]
else:
names += ["top", "freq"]
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data, percentiles):
if com.is_bool_dtype(data):
return describe_categorical_1d(data)
elif com.is_numeric_dtype(data):
return describe_numeric_1d(data, percentiles)
elif com.is_timedelta64_dtype(data):
return describe_numeric_1d(data, percentiles)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self, percentiles)
elif (include is None) and (exclude is None):
if len(self._get_numeric_data()._info_axis) > 0:
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
else:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s, percentiles) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns.names = data.columns.names
return d
|
https://github.com/pandas-dev/pandas/issues/11558
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import pandas as pd\n",
"\n",
"df = pd.DataFrame(\n",
" {'a': range(10),\n",
" 'medium': [random.choice(['painting', 'sculpture']) for _ in range(10)],\n",
" 'artist': [random.choice(['picasso', 'davinci']) for _ in range(10)]}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>a</th>\n",
" <th>artist</th>\n",
" <th>medium</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>davinci</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>5</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>6</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>7</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>8</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>9</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" a artist medium\n",
"0 0 davinci painting\n",
"1 1 picasso sculpture\n",
"2 2 davinci sculpture\n",
"3 3 picasso sculpture\n",
"4 4 picasso painting\n",
"5 5 picasso sculpture\n",
"6 6 davinci painting\n",
"7 7 davinci painting\n",
"8 8 picasso painting\n",
"9 9 picasso painting"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/index.py:4281: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
" return np.sum(name == np.asarray(self.names)) > 1\n"
]
}
],
"source": [
"g = df.groupby(['artist', 'medium'])['a'].count().unstack()\n",
"\n",
"df['medium'] = df['medium'].astype('category')\n",
"gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 3\n",
"picasso 3\n",
"Name: painting, dtype: int64"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th>medium</th>\n",
" <th>painting</th>\n",
" </tr>\n",
" <tr>\n",
" <th>artist</th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>davinci</th>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>picasso</th>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"medium painting\n",
"artist \n",
"davinci 3\n",
"picasso 3"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gcat['painting']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>painting</th>\n",
" <th>sculpture</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>count</th>\n",
" <td>2</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>mean</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>std</th>\n",
" <td>0</td>\n",
" <td>1.414214</td>\n",
" </tr>\n",
" <tr>\n",
" <th>min</th>\n",
" <td>3</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>25%</th>\n",
" <td>3</td>\n",
" <td>1.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>50%</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>75%</th>\n",
" <td>3</td>\n",
" <td>2.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>max</th>\n",
" <td>3</td>\n",
" <td>3.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" painting sculpture\n",
"count 2 2.000000\n",
"mean 3 2.000000\n",
"std 0 1.414214\n",
"min 3 1.000000\n",
"25% 3 1.500000\n",
"50% 3 2.000000\n",
"75% 3 2.500000\n",
"max 3 3.000000"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g.describe()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'DataFrame' object has no attribute 'value_counts'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-7-f15e32e0e8e0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdescribe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe\u001b[0;34m(self, percentiles, include, exclude)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_1d\u001b[0;34m(data, percentiles)\u001b[0m\n\u001b[1;32m 4378\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_numeric_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4379\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4380\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4381\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4382\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_categorical_1d\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m 4354\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4355\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'unique'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4356\u001b[0;31m \u001b[0mobjcounts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue_counts\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4357\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m!=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4358\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2244\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2245\u001b[0m raise AttributeError(\"'%s' object has no attribute '%s'\" %\n\u001b[0;32m-> 2246\u001b[0;31m (type(self).__name__, name))\n\u001b[0m\u001b[1;32m 2247\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2248\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setattr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'DataFrame' object has no attribute 'value_counts'"
]
}
],
"source": [
"gcat.describe()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 4\n",
"picasso 6\n",
"dtype: int64"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting'] + g['sculpture']"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "Exception",
"evalue": "Data must be 1-dimensional",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-e261fa312c08>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'painting'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'sculpture'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/ops.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(self, other, axis, level, fill_value)\u001b[0m\n\u001b[1;32m 991\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdefault_axis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 992\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Another DataFrame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 993\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_frame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 994\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSeries\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 995\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_series\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m_combine_frame\u001b[0;34m(self, other, func, fill_value, level)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(col)\u001b[0m\n\u001b[1;32m 3352\u001b[0m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_arith_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mthis\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3353\u001b[0m return self._constructor_sliced(r, index=new_index,\n\u001b[0;32m-> 3354\u001b[0;31m dtype=r.dtype)\n\u001b[0m\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3356\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, data, index, dtype, name, copy, fastpath)\u001b[0m\n\u001b[1;32m 217\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 218\u001b[0m data = _sanitize_array(data, index, dtype, copy,\n\u001b[0;32m--> 219\u001b[0;31m raise_cast_failure=True)\n\u001b[0m\u001b[1;32m 220\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSingleBlockManager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfastpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m_sanitize_array\u001b[0;34m(data, index, dtype, copy, raise_cast_failure)\u001b[0m\n\u001b[1;32m 2838\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0msubarr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2839\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2840\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Data must be 1-dimensional'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2841\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2842\u001b[0m \u001b[0msubarr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_asarray_tuplesafe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mException\u001b[0m: Data must be 1-dimensional"
]
}
],
"source": [
"gcat['painting'] + gcat['sculpture']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|
AttributeError
|
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
|
def _shallow_copy(self, values=None, **kwargs):
"""
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
|
https://github.com/pandas-dev/pandas/issues/11558
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import pandas as pd\n",
"\n",
"df = pd.DataFrame(\n",
" {'a': range(10),\n",
" 'medium': [random.choice(['painting', 'sculpture']) for _ in range(10)],\n",
" 'artist': [random.choice(['picasso', 'davinci']) for _ in range(10)]}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>a</th>\n",
" <th>artist</th>\n",
" <th>medium</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>davinci</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>5</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>6</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>7</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>8</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>9</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" a artist medium\n",
"0 0 davinci painting\n",
"1 1 picasso sculpture\n",
"2 2 davinci sculpture\n",
"3 3 picasso sculpture\n",
"4 4 picasso painting\n",
"5 5 picasso sculpture\n",
"6 6 davinci painting\n",
"7 7 davinci painting\n",
"8 8 picasso painting\n",
"9 9 picasso painting"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/index.py:4281: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
" return np.sum(name == np.asarray(self.names)) > 1\n"
]
}
],
"source": [
"g = df.groupby(['artist', 'medium'])['a'].count().unstack()\n",
"\n",
"df['medium'] = df['medium'].astype('category')\n",
"gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 3\n",
"picasso 3\n",
"Name: painting, dtype: int64"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th>medium</th>\n",
" <th>painting</th>\n",
" </tr>\n",
" <tr>\n",
" <th>artist</th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>davinci</th>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>picasso</th>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"medium painting\n",
"artist \n",
"davinci 3\n",
"picasso 3"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gcat['painting']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>painting</th>\n",
" <th>sculpture</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>count</th>\n",
" <td>2</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>mean</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>std</th>\n",
" <td>0</td>\n",
" <td>1.414214</td>\n",
" </tr>\n",
" <tr>\n",
" <th>min</th>\n",
" <td>3</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>25%</th>\n",
" <td>3</td>\n",
" <td>1.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>50%</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>75%</th>\n",
" <td>3</td>\n",
" <td>2.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>max</th>\n",
" <td>3</td>\n",
" <td>3.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" painting sculpture\n",
"count 2 2.000000\n",
"mean 3 2.000000\n",
"std 0 1.414214\n",
"min 3 1.000000\n",
"25% 3 1.500000\n",
"50% 3 2.000000\n",
"75% 3 2.500000\n",
"max 3 3.000000"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g.describe()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'DataFrame' object has no attribute 'value_counts'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-7-f15e32e0e8e0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdescribe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe\u001b[0;34m(self, percentiles, include, exclude)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_1d\u001b[0;34m(data, percentiles)\u001b[0m\n\u001b[1;32m 4378\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_numeric_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4379\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4380\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4381\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4382\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_categorical_1d\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m 4354\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4355\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'unique'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4356\u001b[0;31m \u001b[0mobjcounts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue_counts\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4357\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m!=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4358\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2244\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2245\u001b[0m raise AttributeError(\"'%s' object has no attribute '%s'\" %\n\u001b[0;32m-> 2246\u001b[0;31m (type(self).__name__, name))\n\u001b[0m\u001b[1;32m 2247\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2248\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setattr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'DataFrame' object has no attribute 'value_counts'"
]
}
],
"source": [
"gcat.describe()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 4\n",
"picasso 6\n",
"dtype: int64"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting'] + g['sculpture']"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "Exception",
"evalue": "Data must be 1-dimensional",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-e261fa312c08>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'painting'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'sculpture'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/ops.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(self, other, axis, level, fill_value)\u001b[0m\n\u001b[1;32m 991\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdefault_axis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 992\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Another DataFrame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 993\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_frame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 994\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSeries\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 995\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_series\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m_combine_frame\u001b[0;34m(self, other, func, fill_value, level)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(col)\u001b[0m\n\u001b[1;32m 3352\u001b[0m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_arith_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mthis\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3353\u001b[0m return self._constructor_sliced(r, index=new_index,\n\u001b[0;32m-> 3354\u001b[0;31m dtype=r.dtype)\n\u001b[0m\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3356\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, data, index, dtype, name, copy, fastpath)\u001b[0m\n\u001b[1;32m 217\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 218\u001b[0m data = _sanitize_array(data, index, dtype, copy,\n\u001b[0;32m--> 219\u001b[0;31m raise_cast_failure=True)\n\u001b[0m\u001b[1;32m 220\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSingleBlockManager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfastpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m_sanitize_array\u001b[0;34m(data, index, dtype, copy, raise_cast_failure)\u001b[0m\n\u001b[1;32m 2838\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0msubarr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2839\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2840\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Data must be 1-dimensional'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2841\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2842\u001b[0m \u001b[0msubarr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_asarray_tuplesafe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mException\u001b[0m: Data must be 1-dimensional"
]
}
],
"source": [
"gcat['painting'] + gcat['sculpture']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|
AttributeError
|
def _shallow_copy(self, values=None, **kwargs):
if values is None:
return RangeIndex(
name=self.name, fastpath=True, **dict(self._get_data_as_items())
)
else:
kwargs.setdefault("name", self.name)
return self._int64index._shallow_copy(values, **kwargs)
|
def _shallow_copy(self, values=None, **kwargs):
"""create a new Index, don't copy the data, use the same object attributes
with passed in attributes taking precedence"""
if values is None:
return RangeIndex(
name=self.name, fastpath=True, **dict(self._get_data_as_items())
)
else:
kwargs.setdefault("name", self.name)
return self._int64index._shallow_copy(values, **kwargs)
|
https://github.com/pandas-dev/pandas/issues/11558
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import pandas as pd\n",
"\n",
"df = pd.DataFrame(\n",
" {'a': range(10),\n",
" 'medium': [random.choice(['painting', 'sculpture']) for _ in range(10)],\n",
" 'artist': [random.choice(['picasso', 'davinci']) for _ in range(10)]}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>a</th>\n",
" <th>artist</th>\n",
" <th>medium</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>davinci</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>5</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>6</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>7</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>8</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>9</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" a artist medium\n",
"0 0 davinci painting\n",
"1 1 picasso sculpture\n",
"2 2 davinci sculpture\n",
"3 3 picasso sculpture\n",
"4 4 picasso painting\n",
"5 5 picasso sculpture\n",
"6 6 davinci painting\n",
"7 7 davinci painting\n",
"8 8 picasso painting\n",
"9 9 picasso painting"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/index.py:4281: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
" return np.sum(name == np.asarray(self.names)) > 1\n"
]
}
],
"source": [
"g = df.groupby(['artist', 'medium'])['a'].count().unstack()\n",
"\n",
"df['medium'] = df['medium'].astype('category')\n",
"gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 3\n",
"picasso 3\n",
"Name: painting, dtype: int64"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th>medium</th>\n",
" <th>painting</th>\n",
" </tr>\n",
" <tr>\n",
" <th>artist</th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>davinci</th>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>picasso</th>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"medium painting\n",
"artist \n",
"davinci 3\n",
"picasso 3"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gcat['painting']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>painting</th>\n",
" <th>sculpture</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>count</th>\n",
" <td>2</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>mean</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>std</th>\n",
" <td>0</td>\n",
" <td>1.414214</td>\n",
" </tr>\n",
" <tr>\n",
" <th>min</th>\n",
" <td>3</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>25%</th>\n",
" <td>3</td>\n",
" <td>1.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>50%</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>75%</th>\n",
" <td>3</td>\n",
" <td>2.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>max</th>\n",
" <td>3</td>\n",
" <td>3.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" painting sculpture\n",
"count 2 2.000000\n",
"mean 3 2.000000\n",
"std 0 1.414214\n",
"min 3 1.000000\n",
"25% 3 1.500000\n",
"50% 3 2.000000\n",
"75% 3 2.500000\n",
"max 3 3.000000"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g.describe()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'DataFrame' object has no attribute 'value_counts'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-7-f15e32e0e8e0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdescribe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe\u001b[0;34m(self, percentiles, include, exclude)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_1d\u001b[0;34m(data, percentiles)\u001b[0m\n\u001b[1;32m 4378\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_numeric_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4379\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4380\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4381\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4382\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_categorical_1d\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m 4354\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4355\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'unique'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4356\u001b[0;31m \u001b[0mobjcounts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue_counts\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4357\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m!=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4358\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2244\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2245\u001b[0m raise AttributeError(\"'%s' object has no attribute '%s'\" %\n\u001b[0;32m-> 2246\u001b[0;31m (type(self).__name__, name))\n\u001b[0m\u001b[1;32m 2247\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2248\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setattr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'DataFrame' object has no attribute 'value_counts'"
]
}
],
"source": [
"gcat.describe()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 4\n",
"picasso 6\n",
"dtype: int64"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting'] + g['sculpture']"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "Exception",
"evalue": "Data must be 1-dimensional",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-e261fa312c08>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'painting'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'sculpture'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/ops.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(self, other, axis, level, fill_value)\u001b[0m\n\u001b[1;32m 991\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdefault_axis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 992\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Another DataFrame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 993\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_frame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 994\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSeries\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 995\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_series\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m_combine_frame\u001b[0;34m(self, other, func, fill_value, level)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(col)\u001b[0m\n\u001b[1;32m 3352\u001b[0m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_arith_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mthis\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3353\u001b[0m return self._constructor_sliced(r, index=new_index,\n\u001b[0;32m-> 3354\u001b[0;31m dtype=r.dtype)\n\u001b[0m\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3356\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, data, index, dtype, name, copy, fastpath)\u001b[0m\n\u001b[1;32m 217\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 218\u001b[0m data = _sanitize_array(data, index, dtype, copy,\n\u001b[0;32m--> 219\u001b[0;31m raise_cast_failure=True)\n\u001b[0m\u001b[1;32m 220\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSingleBlockManager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfastpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m_sanitize_array\u001b[0;34m(data, index, dtype, copy, raise_cast_failure)\u001b[0m\n\u001b[1;32m 2838\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0msubarr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2839\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2840\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Data must be 1-dimensional'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2841\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2842\u001b[0m \u001b[0msubarr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_asarray_tuplesafe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mException\u001b[0m: Data must be 1-dimensional"
]
}
],
"source": [
"gcat['painting'] + gcat['sculpture']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|
AttributeError
|
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
codes = self.categories.get_loc(key)
if codes == -1:
raise KeyError(key)
return self._engine.get_loc(codes)
|
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
codes = self.categories.get_loc(key)
if codes == -1:
raise KeyError(key)
indexer, _ = self._engine.get_indexer_non_unique(np.array([codes]))
if (indexer == -1).any():
raise KeyError(key)
return indexer
|
https://github.com/pandas-dev/pandas/issues/11558
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import pandas as pd\n",
"\n",
"df = pd.DataFrame(\n",
" {'a': range(10),\n",
" 'medium': [random.choice(['painting', 'sculpture']) for _ in range(10)],\n",
" 'artist': [random.choice(['picasso', 'davinci']) for _ in range(10)]}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>a</th>\n",
" <th>artist</th>\n",
" <th>medium</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>davinci</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>5</td>\n",
" <td>picasso</td>\n",
" <td>sculpture</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>6</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>7</td>\n",
" <td>davinci</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>8</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>9</td>\n",
" <td>picasso</td>\n",
" <td>painting</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" a artist medium\n",
"0 0 davinci painting\n",
"1 1 picasso sculpture\n",
"2 2 davinci sculpture\n",
"3 3 picasso sculpture\n",
"4 4 picasso painting\n",
"5 5 picasso sculpture\n",
"6 6 davinci painting\n",
"7 7 davinci painting\n",
"8 8 picasso painting\n",
"9 9 picasso painting"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/index.py:4281: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
" return np.sum(name == np.asarray(self.names)) > 1\n"
]
}
],
"source": [
"g = df.groupby(['artist', 'medium'])['a'].count().unstack()\n",
"\n",
"df['medium'] = df['medium'].astype('category')\n",
"gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 3\n",
"picasso 3\n",
"Name: painting, dtype: int64"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th>medium</th>\n",
" <th>painting</th>\n",
" </tr>\n",
" <tr>\n",
" <th>artist</th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>davinci</th>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>picasso</th>\n",
" <td>3</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"medium painting\n",
"artist \n",
"davinci 3\n",
"picasso 3"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gcat['painting']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>painting</th>\n",
" <th>sculpture</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>count</th>\n",
" <td>2</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>mean</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>std</th>\n",
" <td>0</td>\n",
" <td>1.414214</td>\n",
" </tr>\n",
" <tr>\n",
" <th>min</th>\n",
" <td>3</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>25%</th>\n",
" <td>3</td>\n",
" <td>1.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>50%</th>\n",
" <td>3</td>\n",
" <td>2.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>75%</th>\n",
" <td>3</td>\n",
" <td>2.500000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>max</th>\n",
" <td>3</td>\n",
" <td>3.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" painting sculpture\n",
"count 2 2.000000\n",
"mean 3 2.000000\n",
"std 0 1.414214\n",
"min 3 1.000000\n",
"25% 3 1.500000\n",
"50% 3 2.000000\n",
"75% 3 2.500000\n",
"max 3 3.000000"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g.describe()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'DataFrame' object has no attribute 'value_counts'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-7-f15e32e0e8e0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdescribe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe\u001b[0;34m(self, percentiles, include, exclude)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 4396\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect_dtypes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minclude\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexclude\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexclude\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4398\u001b[0;31m \u001b[0mldesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdescribe_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4399\u001b[0m \u001b[0;31m# set a convenient order for rows\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4400\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_1d\u001b[0;34m(data, percentiles)\u001b[0m\n\u001b[1;32m 4378\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_numeric_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpercentiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4379\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4380\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4381\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4382\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36mdescribe_categorical_1d\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m 4354\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdescribe_categorical_1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4355\u001b[0m \u001b[0mnames\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'unique'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4356\u001b[0;31m \u001b[0mobjcounts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue_counts\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4357\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mobjcounts\u001b[0m\u001b[0;34m!=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4358\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/generic.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2244\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2245\u001b[0m raise AttributeError(\"'%s' object has no attribute '%s'\" %\n\u001b[0;32m-> 2246\u001b[0;31m (type(self).__name__, name))\n\u001b[0m\u001b[1;32m 2247\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2248\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setattr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'DataFrame' object has no attribute 'value_counts'"
]
}
],
"source": [
"gcat.describe()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"artist\n",
"davinci 4\n",
"picasso 6\n",
"dtype: int64"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g['painting'] + g['sculpture']"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "Exception",
"evalue": "Data must be 1-dimensional",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-e261fa312c08>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'painting'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mgcat\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'sculpture'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/ops.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(self, other, axis, level, fill_value)\u001b[0m\n\u001b[1;32m 991\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdefault_axis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 992\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Another DataFrame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 993\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_frame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 994\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSeries\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 995\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_combine_series\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mother\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mna_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m_combine_frame\u001b[0;34m(self, other, func, fill_value, level)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 3354\u001b[0m dtype=r.dtype)\n\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3356\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3358\u001b[0m \u001b[0;31m# non-unique\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/frame.py\u001b[0m in \u001b[0;36mf\u001b[0;34m(col)\u001b[0m\n\u001b[1;32m 3352\u001b[0m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_arith_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mthis\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mother\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3353\u001b[0m return self._constructor_sliced(r, index=new_index,\n\u001b[0;32m-> 3354\u001b[0;31m dtype=r.dtype)\n\u001b[0m\u001b[1;32m 3355\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3356\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mcol\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthis\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, data, index, dtype, name, copy, fastpath)\u001b[0m\n\u001b[1;32m 217\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 218\u001b[0m data = _sanitize_array(data, index, dtype, copy,\n\u001b[0;32m--> 219\u001b[0;31m raise_cast_failure=True)\n\u001b[0m\u001b[1;32m 220\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSingleBlockManager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfastpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/Users/mike/.virtualenvs/ds3/lib/python3.5/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m_sanitize_array\u001b[0;34m(data, index, dtype, copy, raise_cast_failure)\u001b[0m\n\u001b[1;32m 2838\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0msubarr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2839\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2840\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Data must be 1-dimensional'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2841\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2842\u001b[0m \u001b[0msubarr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_asarray_tuplesafe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mException\u001b[0m: Data must be 1-dimensional"
]
}
],
"source": [
"gcat['painting'] + gcat['sculpture']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|
AttributeError
|
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
if len(data):
# coerce back to datetime objects for lookup
data = _dict_compat(data)
data = lib.fast_multiget(
data, index.astype("O"), default=np.nan
)
else:
data = np.nan
# GH #12169
elif isinstance(index, (PeriodIndex, TimedeltaIndex)):
data = [data.get(i, nan) for i in index] if data else np.nan
else:
data = lib.fast_multiget(data, index.values, default=np.nan)
except TypeError:
data = [data.get(i, nan) for i in index] if data else np.nan
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
# GH12574: Allow dtype=category only, otherwise error
if (dtype is not None) and not is_categorical_dtype(dtype):
raise ValueError(
"cannot specify a dtype with a Categorical unless dtype='category'"
)
elif isinstance(data, types.GeneratorType) or (
compat.PY3 and isinstance(data, map)
):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
object.__setattr__(self, "name", name)
self._set_axis(0, index, fastpath=True)
|
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
if len(data):
# coerce back to datetime objects for lookup
data = _dict_compat(data)
data = lib.fast_multiget(
data, index.astype("O"), default=np.nan
)
else:
data = np.nan
# GH #12169
elif isinstance(index, (PeriodIndex, TimedeltaIndex)):
data = [data.get(i, nan) for i in index] if data else np.nan
else:
data = lib.fast_multiget(data, index.values, default=np.nan)
except TypeError:
data = [data.get(i, nan) for i in index] if data else np.nan
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
if dtype is not None:
raise ValueError("cannot specify a dtype with a Categorical")
elif isinstance(data, types.GeneratorType) or (
compat.PY3 and isinstance(data, map)
):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
object.__setattr__(self, "name", name)
self._set_axis(0, index, fastpath=True)
|
https://github.com/pandas-dev/pandas/issues/12574
|
In [37]: pd.Series(pd.Categorical([1,2,3]), dtype='category')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-87a1a3228158> in <module>()
----> 1 pd.Series(pd.Categorical([1,2,3]), dtype='category')
ValueError: cannot specify a dtype with a Categorical
|
ValueError
|
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = "M"
for k in B_aliases:
alias_dict[k] = "B"
for k in D_aliases:
alias_dict[k] = "D"
for k in H_aliases:
alias_dict[k] = "H"
for k in T_aliases:
alias_dict[k] = "T"
for k in S_aliases:
alias_dict[k] = "S"
for k in L_aliases:
alias_dict[k] = "L"
for k in U_aliases:
alias_dict[k] = "U"
for k in N_aliases:
alias_dict[k] = "N"
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR", "YEARLY"]
Q_prefixes = [
"Q",
"QTR",
"QUARTER",
"QUARTERLY",
"Q-E",
"QTR-E",
"QUARTER-E",
"QUARTERLY-E",
]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"],
]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = "A"
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = "A-" + m1
alias_dict[k + sep + m2] = "A-" + m1
for k in Q_prefixes:
alias_dict[k] = "Q"
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = "Q-" + m1
alias_dict[k + sep + m2] = "Q-" + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"],
]
for k in W_prefixes:
alias_dict[k] = "W"
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = "W-" + d1
alias_dict[k + sep + d2] = "W-" + d1
return alias_dict
|
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = "M"
for k in B_aliases:
alias_dict[k] = "B"
for k in D_aliases:
alias_dict[k] = "D"
for k in H_aliases:
alias_dict[k] = "H"
for k in T_aliases:
alias_dict[k] = "Min"
for k in S_aliases:
alias_dict[k] = "S"
for k in L_aliases:
alias_dict[k] = "L"
for k in U_aliases:
alias_dict[k] = "U"
for k in N_aliases:
alias_dict[k] = "N"
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR", "YEARLY"]
Q_prefixes = [
"Q",
"QTR",
"QUARTER",
"QUARTERLY",
"Q-E",
"QTR-E",
"QUARTER-E",
"QUARTERLY-E",
]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"],
]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = "A"
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = "A-" + m1
alias_dict[k + sep + m2] = "A-" + m1
for k in Q_prefixes:
alias_dict[k] = "Q"
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = "Q-" + m1
alias_dict[k + sep + m2] = "Q-" + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"],
]
for k in W_prefixes:
alias_dict[k] = "W"
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = "W-" + d1
alias_dict[k + sep + d2] = "W-" + d1
return alias_dict
|
https://github.com/pandas-dev/pandas/issues/11854
|
from pandas.tseries.frequencies import _period_str_to_code
_period_str_to_code('Min')
8000
_period_str_to_code('T')
8000
_period_str_to_code('minute')
sys:1: FutureWarning: Freq "MINUTE" is deprecated, use "Min" as alternative.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "***/site-packages/pandas/tseries/frequencies.py", line 813, in _period_str_to_code
return _period_code_map[alias]
KeyError: 'Min'
|
KeyError
|
def _parse_excel(
self,
sheetname=0,
header=0,
skiprows=None,
skip_footer=0,
index_col=None,
has_index_names=None,
parse_cols=None,
parse_dates=False,
date_parser=None,
na_values=None,
thousands=None,
convert_float=True,
verbose=False,
squeeze=False,
**kwds,
):
skipfooter = kwds.pop("skipfooter", None)
if skipfooter is not None:
skip_footer = skipfooter
_validate_header_arg(header)
if has_index_names is not None:
warn(
"\nThe has_index_names argument is deprecated; index names "
"will be automatically inferred based on index_col.\n"
"This argmument is still necessary if reading Excel output "
"from 0.16.2 or prior with index names.",
FutureWarning,
stacklevel=3,
)
if "chunksize" in kwds:
raise NotImplementedError("chunksize keyword of read_excel is not implemented")
if parse_dates:
raise NotImplementedError(
"parse_dates keyword of read_excel is not implemented"
)
if date_parser is not None:
raise NotImplementedError(
"date_parser keyword of read_excel is not implemented"
)
import xlrd
from xlrd import (
xldate,
XL_CELL_DATE,
XL_CELL_ERROR,
XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if (not epoch1904 and year == (1899, 12, 31)) or (
epoch1904 and year == (1904, 1, 1)
):
cell_contents = time(
cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond,
)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < MINYEAR:
cell_contents = time(*dt[3:])
else:
cell_contents = datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
# handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(
zip(sheet.row_values(i), sheet.row_types(i))
):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if com.is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if com.is_list_like(header):
header_names = []
for row in header:
if com.is_integer(skiprows):
row += skiprows
data[row] = _fill_mi_header(data[row])
header_name, data[row] = _pop_header_name(data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if com.is_list_like(index_col):
# forward fill values for MultiIndex index
if not com.is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
if com.is_list_like(header) and len(header) > 1:
has_index_names = True
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(
data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
squeeze=squeeze,
**kwds,
)
output[asheetname] = parser.read()
if not squeeze or isinstance(output[asheetname], DataFrame):
output[asheetname].columns = output[asheetname].columns.set_names(
header_names
)
except StopIteration:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
|
def _parse_excel(
self,
sheetname=0,
header=0,
skiprows=None,
skip_footer=0,
index_col=None,
has_index_names=None,
parse_cols=None,
parse_dates=False,
date_parser=None,
na_values=None,
thousands=None,
convert_float=True,
verbose=False,
squeeze=False,
**kwds,
):
skipfooter = kwds.pop("skipfooter", None)
if skipfooter is not None:
skip_footer = skipfooter
_validate_header_arg(header)
if has_index_names is not None:
warn(
"\nThe has_index_names argument is deprecated; index names "
"will be automatically inferred based on index_col.\n"
"This argmument is still necessary if reading Excel output "
"from 0.16.2 or prior with index names.",
FutureWarning,
stacklevel=3,
)
if "chunksize" in kwds:
raise NotImplementedError("chunksize keyword of read_excel is not implemented")
if parse_dates:
raise NotImplementedError(
"parse_dates keyword of read_excel is not implemented"
)
if date_parser is not None:
raise NotImplementedError(
"date_parser keyword of read_excel is not implemented"
)
import xlrd
from xlrd import (
xldate,
XL_CELL_DATE,
XL_CELL_ERROR,
XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if (not epoch1904 and year == (1899, 12, 31)) or (
epoch1904 and year == (1904, 1, 1)
):
cell_contents = time(
cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond,
)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < MINYEAR:
cell_contents = time(*dt[3:])
else:
cell_contents = datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
# handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(
zip(sheet.row_values(i), sheet.row_types(i))
):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if com.is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if com.is_list_like(header):
header_names = []
for row in header:
if com.is_integer(skiprows):
row += skiprows
data[row] = _fill_mi_header(data[row])
header_name, data[row] = _pop_header_name(data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if com.is_list_like(index_col):
# forward fill values for MultiIndex index
if not com.is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
if com.is_list_like(header) and len(header) > 1:
has_index_names = True
parser = TextParser(
data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
squeeze=squeeze,
**kwds,
)
output[asheetname] = parser.read()
if not squeeze or isinstance(output[asheetname], DataFrame):
output[asheetname].columns = output[asheetname].columns.set_names(
header_names
)
if ret_dict:
return output
else:
return output[asheetname]
|
https://github.com/pandas-dev/pandas/issues/9002
|
df = pd.read_excel(r'\\path_to_Excel_file',
'Sheet1', parse_cols=['MODEL_YEAR'])
returns:
---------------------------------------------------------------------------
StopIteration Traceback (most recent call last)
<ipython-input-29-336e4fd9eea1> in <module>()
4
5 df = pd.read_excel(r'\\path_to_Excel_file',
----> 6 'Sheet1', parse_cols=["MODEL_YEAR"])
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\excel.py in read_excel(io, sheetname, **kwds)
125 engine = kwds.pop('engine', None)
126
--> 127 return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
128
129
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\excel.py in parse(self, sheetname, header, skiprows, skip_footer, index_col, parse_cols, parse_dates, date_parser, na_values, thousands, chunksize, convert_float, has_index_names, **kwds)
236 skip_footer=skip_footer,
237 convert_float=convert_float,
--> 238 **kwds)
239
240 def _should_parse(self, i, parse_cols):
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\excel.py in _parse_excel(self, sheetname, header, skiprows, skip_footer, index_col, has_index_names, parse_cols, parse_dates, date_parser, na_values, thousands, chunksize, convert_float, **kwds)
355 skip_footer=skip_footer,
356 chunksize=chunksize,
--> 357 **kwds)
358
359 return parser.read()
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in TextParser(*args, **kwds)
1285 """
1286 kwds['engine'] = 'python'
-> 1287 return TextFileReader(*args, **kwds)
1288
1289
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in __init__(self, f, engine, **kwds)
555 self.options['has_index_names'] = kwds['has_index_names']
556
--> 557 self._make_engine(self.engine)
558
559 def _get_options_with_defaults(self, engine):
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in _make_engine(self, engine)
698 elif engine == 'python-fwf':
699 klass = FixedWidthFieldParser
--> 700 self._engine = klass(self.f, **self.options)
701
702 def _failover_to_python(self):
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in __init__(self, f, **kwds)
1392 # infer column indices from self.usecols if is is specified.
1393 self._col_indices = None
-> 1394 self.columns, self.num_original_columns = self._infer_columns()
1395
1396 # Now self.columns has the set of columns that we will process.
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in _infer_columns(self)
1607 columns = []
1608 for level, hr in enumerate(header):
-> 1609 line = self._buffered_line()
1610
1611 while self.line_pos <= hr:
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in _buffered_line(self)
1734 return self.buf[0]
1735 else:
-> 1736 return self._next_line()
1737
1738 def _empty(self, line):
D:\Miniconda3\envs\notebook\lib\site-packages\pandas\io\parsers.py in _next_line(self)
1758 break
1759 except IndexError:
-> 1760 raise StopIteration
1761 else:
1762 while self.pos in self.skiprows:
StopIteration:
|
IndexError
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_internal_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = _coerce_to_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if _possibly_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = _possibly_cast_to_datetime(arr, dtype)
if not is_internal_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):
if not isnull(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, list) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = _possibly_convert_platform(data)
subarr = _possibly_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = _infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = _possibly_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index, subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
|
https://github.com/pandas-dev/pandas/issues/12336
|
In [2]: pd.Series([0,0,0], dtype="category")
Out[2]:
0 0
1 0
2 0
dtype: category
Categories (1, int64): [0]
In [3]: pd.Series(0, index=range(3), dtype="category")
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-2c1d7a241071> in <module>()
----> 1 pd.Series(0, index=range(3), dtype="category")
/home/nobackup/repo/pandas/pandas/core/series.pyc in __init__(self, data, index, dtype, name, copy, fastpath)
224 else:
225 data = _sanitize_array(data, index, dtype, copy,
--> 226 raise_cast_failure=True)
227
228 data = SingleBlockManager(data, index, fastpath=True)
/home/nobackup/repo/pandas/pandas/core/series.pyc in _sanitize_array(data, index, dtype, copy, raise_cast_failure)
2961 if len(subarr) != len(index) and len(subarr) == 1:
2962 subarr = create_from_value(subarr[0], index,
-> 2963 subarr.dtype)
2964
2965 elif subarr.ndim > 1:
/home/nobackup/repo/pandas/pandas/core/series.pyc in create_from_value(value, index, dtype)
2929 else:
2930 if not isinstance(dtype, (np.dtype, type(np.dtype))):
-> 2931 dtype = dtype.dtype
2932 subarr = np.empty(len(index), dtype=dtype)
2933 subarr.fill(value)
AttributeError: 'CategoricalDtype' object has no attribute 'dtype'
|
AttributeError
|
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
|
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
|
https://github.com/pandas-dev/pandas/issues/12336
|
In [2]: pd.Series([0,0,0], dtype="category")
Out[2]:
0 0
1 0
2 0
dtype: category
Categories (1, int64): [0]
In [3]: pd.Series(0, index=range(3), dtype="category")
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-2c1d7a241071> in <module>()
----> 1 pd.Series(0, index=range(3), dtype="category")
/home/nobackup/repo/pandas/pandas/core/series.pyc in __init__(self, data, index, dtype, name, copy, fastpath)
224 else:
225 data = _sanitize_array(data, index, dtype, copy,
--> 226 raise_cast_failure=True)
227
228 data = SingleBlockManager(data, index, fastpath=True)
/home/nobackup/repo/pandas/pandas/core/series.pyc in _sanitize_array(data, index, dtype, copy, raise_cast_failure)
2961 if len(subarr) != len(index) and len(subarr) == 1:
2962 subarr = create_from_value(subarr[0], index,
-> 2963 subarr.dtype)
2964
2965 elif subarr.ndim > 1:
/home/nobackup/repo/pandas/pandas/core/series.pyc in create_from_value(value, index, dtype)
2929 else:
2930 if not isinstance(dtype, (np.dtype, type(np.dtype))):
-> 2931 dtype = dtype.dtype
2932 subarr = np.empty(len(index), dtype=dtype)
2933 subarr.fill(value)
AttributeError: 'CategoricalDtype' object has no attribute 'dtype'
|
AttributeError
|
def nunique(self, dropna=True):
"""Returns number of unique elements in the group"""
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
res = out if ids[0] != -1 else out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res, index=ri, name=self.name)
|
def nunique(self, dropna=True):
"""Returns number of unique elements in the group"""
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, "val.dtype must be object, got %s" % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
return Series(
out if ids[0] != -1 else out[1:],
index=self.grouper.result_index,
name=self.name,
)
|
https://github.com/pandas-dev/pandas/issues/12352
|
Traceback (most recent call last):
File "test.py", line 6, in <module>
tmp.groupby(pd.TimeGrouper('D')).ID.nunique()
File "/usr/lib/python3/dist-packages/pandas/core/groupby.py", line 2697, in nunique
name=self.name)
File "/usr/lib/python3/dist-packages/pandas/core/series.py", line 227, in __init__
data = SingleBlockManager(data, index, fastpath=True)
File "/usr/lib/python3/dist-packages/pandas/core/internals.py", line 3736, in __init__
ndim=1, fastpath=True)
File "/usr/lib/python3/dist-packages/pandas/core/internals.py", line 2454, in make_block
placement=placement)
File "/usr/lib/python3/dist-packages/pandas/core/internals.py", line 87, in __init__
len(self.values), len(self.mgr_locs)))
ValueError: Wrong number of items passed 2, placement implies 4
|
ValueError
|
def __init__(
self,
data,
precision=None,
table_styles=None,
uuid=None,
caption=None,
table_attributes=None,
):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indicies.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = pd.options.display.precision
self.precision = precision
self.table_attributes = table_attributes
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if com.is_float(x):
return "{:>.{precision}g}".format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
|
def __init__(
self,
data,
precision=None,
table_styles=None,
uuid=None,
caption=None,
table_attributes=None,
):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indicies.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = pd.options.display.precision
self.precision = precision
self.table_attributes = table_attributes
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
idx_values = self.data.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = [
{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}
] * n_rlvls
for c in range(len(clabels[0])):
cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
cs.extend(cell_context.get("col_headings", {}).get(r, {}).get(c, []))
value = clabels[r][c]
row_es.append(
{
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
}
)
head.append(row_es)
if self.data.index.names:
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [COL_HEADING_CLASS, "level%s" % (n_clvls + 1), "col%s" % c]
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* len(clabels[0])
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
cs = [ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]
cs.extend(cell_context.get("row_headings", {}).get(r, {}).get(c, []))
row_es = [
{
"type": "th",
"value": rlabels[r][c],
"class": " ".join(cs),
"display_value": rlabels[r][c],
}
for c in range(len(rlabels[r]))
]
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_es.append(
{
"type": "td",
"value": value,
"class": " ".join(cs),
"id": "_".join(cs[1:]),
"display_value": formatter(value),
}
)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(["", ""])
cellstyle.append({"props": props, "selector": "row%s_col%s" % (r, c)})
body.append(row_es)
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=self.table_attributes,
)
|
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
idx_values = self.data.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = [
{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}
] * n_rlvls
for c in range(len(clabels[0])):
cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
cs.extend(cell_context.get("col_headings", {}).get(r, {}).get(c, []))
row_es.append({"type": "th", "value": clabels[r][c], "class": " ".join(cs)})
head.append(row_es)
if self.data.index.names:
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [COL_HEADING_CLASS, "level%s" % (n_clvls + 1), "col%s" % c]
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* len(clabels[0])
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
cs = [ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]
cs.extend(cell_context.get("row_headings", {}).get(r, {}).get(c, []))
row_es = [
{"type": "th", "value": rlabels[r][c], "class": " ".join(cs)}
for c in range(len(rlabels[r]))
]
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
row_es.append(
{
"type": "td",
"value": self.data.iloc[r][c],
"class": " ".join(cs),
"id": "_".join(cs[1:]),
}
)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(["", ""])
cellstyle.append({"props": props, "selector": "row%s_col%s" % (r, c)})
body.append(row_es)
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=self.table_attributes,
)
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wase,
updating the HTML representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func: function
axis: int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``.
subset: IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs: dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
|
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wase,
updating the HTML representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func: function
axis: int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``.
subset: IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs: dict
pass along to ``func``
Returns
-------
self
Notes
-----
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func : function
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
|
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func : function
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def set_precision(self, precision):
"""
Set the precision used to render.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self : Styler
"""
self.precision = precision
return self
|
def set_precision(self, precision):
"""
Set the precision used to render.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self
"""
self.precision = precision
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def set_table_attributes(self, attributes):
"""
Set the table attributes. These are the items
that show up in the opening ``<table>`` tag in addition
to to automatic (by default) id.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self : Styler
"""
self.table_attributes = attributes
return self
|
def set_table_attributes(self, attributes):
"""
Set the table attributes. These are the items
that show up in the opening ``<table>`` tag in addition
to to automatic (by default) id.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self
"""
self.table_attributes = attributes
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
.. versionadded:: 0.17.1
Parameters
----------
styles: list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
|
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
.. versionadded:: 0.17.1
Parameters
----------
styles: list
list of style functions
Returns
-------
self
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
.. versionadded:: 0.17.1
Parameters
----------
uuid: str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
|
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
.. versionadded:: 0.17.1
Parameters
----------
uuid: str
Returns
-------
self
"""
self.uuid = uuid
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def set_caption(self, caption):
"""
Se the caption on a Styler
.. versionadded:: 0.17.1
Parameters
----------
caption: str
Returns
-------
self : Styler
"""
self.caption = caption
return self
|
def set_caption(self, caption):
"""
Se the caption on a Styler
.. versionadded:: 0.17.1
Parameters
----------
caption: str
Returns
-------
self
"""
self.caption = caption
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler. These are placed in a
``<style>`` tag before the generated HTML table.
.. versionadded:: 0.17.1
Parameters
----------
table_styles: list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
|
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler. These are placed in a
``<style>`` tag before the generated HTML table.
.. versionadded:: 0.17.1
Parameters
----------
table_styles: list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def highlight_null(self, null_color="red"):
"""
Shade the background ``null_color`` for missing values.
.. versionadded:: 0.17.1
Parameters
----------
null_color: str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
|
def highlight_null(self, null_color="red"):
"""
Shade the background ``null_color`` for missing values.
.. versionadded:: 0.17.1
Parameters
----------
null_color: str
Returns
-------
self
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def background_gradient(self, cmap="PuBu", low=0, high=0, axis=0, subset=None):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
.. versionadded:: 0.17.1
Parameters
----------
cmap: str or colormap
matplotlib colormap
low, high: float
compress the range by these values.
axis: int or str
1 or 'columns' for colunwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
Returns
-------
self : Styler
Notes
-----
Tune ``low`` and ``high`` to keep the text legible by
not using the entire range of the color map. These extend
the range of the data by ``low * (x.max() - x.min())``
and ``high * (x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(
self._background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
)
return self
|
def background_gradient(self, cmap="PuBu", low=0, high=0, axis=0, subset=None):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
.. versionadded:: 0.17.1
Parameters
----------
cmap: str or colormap
matplotlib colormap
low, high: float
compress the range by these values.
axis: int or str
1 or 'columns' for colunwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
Returns
-------
self
Notes
-----
Tune ``low`` and ``high`` to keep the text legible by
not using the entire range of the color map. These extend
the range of the data by ``low * (x.max() - x.min())``
and ``high * (x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(
self._background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def bar(self, subset=None, axis=0, color="#d65f5f", width=100):
"""
Color the background ``color`` proptional to the values in each column.
Excludes non-numeric data by default.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
axis: int
color: str
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
Returns
-------
self : Styler
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, color=color, width=width)
return self
|
def bar(self, subset=None, axis=0, color="#d65f5f", width=100):
"""
Color the background ``color`` proptional to the values in each column.
Excludes non-numeric data by default.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
axis: int
color: str
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
Returns
-------
self
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, color=color, width=width)
return self
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def highlight_max(self, subset=None, color="yellow", axis=0):
"""
Highlight the maximum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
|
def highlight_max(self, subset=None, color="yellow", axis=0):
"""
Highlight the maximum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def highlight_min(self, subset=None, color="yellow", axis=0):
"""
Highlight the minimum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
|
def highlight_min(self, subset=None, color="yellow", axis=0):
"""
Highlight the minimum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
|
https://github.com/pandas-dev/pandas/issues/12125
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 method = _safe_get_formatter_method(obj, self.print_method)
340 if method is not None:
--> 341 return method()
342 return None
343 else:
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _repr_html_(self)
158 Hooks into Jupyter notebook rich display system.
159 '''
--> 160 return self.render()
161
162 def _translate(self):
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in render(self)
259 """
260 self._compute()
--> 261 d = self._translate()
262 # filter out empty styles, every cell will have a class
263 # but the list of props may just be [['', '']].
/usr/local/lib/python3.5/site-packages/pandas/core/style.py in _translate(self)
220 cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
221 cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
--> 222 row_es.append({"type": "td", "value": self.data.iloc[r][c],
223 "class": " ".join(cs), "id": "_".join(cs[1:])})
224 props = []
/usr/local/lib/python3.5/site-packages/pandas/core/series.py in __getitem__(self, key)
555 def __getitem__(self, key):
556 try:
--> 557 result = self.index.get_value(self, key)
558
559 if not np.isscalar(result):
/usr/local/lib/python3.5/site-packages/pandas/core/index.py in get_value(self, series, key)
1788
1789 try:
-> 1790 return self._engine.get_value(s, k)
1791 except KeyError as e1:
1792 if len(self) > 0 and self.inferred_type in ['integer','boolean']:
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:3204)()
pandas/index.pyx in pandas.index.IndexEngine.get_value (pandas/index.c:2903)()
pandas/index.pyx in pandas.index.IndexEngine.get_loc (pandas/index.c:3843)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6525)()
pandas/hashtable.pyx in pandas.hashtable.Int64HashTable.get_item (pandas/hashtable.c:6463)()
KeyError: 0
|
KeyError
|
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_internal_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis), dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (
Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in enumerate(zip(values, res_index))
)
else: # pragma : no cover
raise AssertionError("Axis must be 0 or 1, got %s" % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, "args"):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ("occurred at index %s" % com.pprint_thing(k),)
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
|
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis), dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (
Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in enumerate(zip(values, res_index))
)
else: # pragma : no cover
raise AssertionError("Axis must be 0 or 1, got %s" % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, "args"):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ("occurred at index %s" % com.pprint_thing(k),)
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
|
https://github.com/pandas-dev/pandas/issues/12244
|
In [29]: df = pd.DataFrame({'dt': pd.date_range("2015-01-01", periods=3, tz='Europe/Brussels')})
In [30]: df
Out[30]:
dt
0 2015-01-01 00:00:00+01:00
1 2015-01-02 00:00:00+01:00
2 2015-01-03 00:00:00+01:00
In [31]: df.apply(lambda x: x)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-31-9cd68f0fd3ff> in <module>()
----> 1 df.apply(lambda x: x)
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\frame.py in apply(self, func, a
xis, broadcast, raw, reduce, args, **kwds)
4029 if reduce is None:
4030 reduce = True
-> 4031 return self._apply_standard(f, axis, reduce=reduce)
4032 else:
4033 return self._apply_broadcast(f, axis)
c:\users\vdbosscj\scipy\pandas-joris\pandas\core\frame.py in _apply_standard(sel
f, func, axis, ignore_failures, reduce)
4076 # Create a dummy Series from an empty array
4077 index = self._get_axis(axis)
-> 4078 empty_arr = np.empty(len(index), dtype=values.dtype)
4079 dummy = Series(empty_arr, index=self._get_axis(axis),
4080 dtype=values.dtype)
TypeError: data type not understood
|
TypeError
|
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
attribs = self._get_attributes_dict()
freq = attribs["freq"]
if freq is not None and not isinstance(self, com.ABCPeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs["freq"] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
|
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
attribs = self._get_attributes_dict()
freq = attribs["freq"]
from pandas.tseries.period import PeriodIndex
if freq is not None and not isinstance(self, PeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs["freq"] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
|
https://github.com/pandas-dev/pandas/issues/11718
|
In [1]: import pandas as pd
In [2]: pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
Traceback (most recent call last):
File "<ipython-input-2-5e0738cec5fa>", line 1, in <module>
pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
File "pandas\tslib.pyx", line 1099, in pandas.tslib._NaT.__sub__ (pandas\tslib.c:21618)
File "pandas\tslib.pyx", line 1026, in pandas.tslib._Timestamp.__sub__ (pandas\tslib.c:20036)
TypeError: Timestamp subtraction must have the same timezones or no timezones
|
TypeError
|
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
other = Timestamp(other)
if other is tslib.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
else:
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return TimedeltaIndex(result, name=self.name, copy=False)
|
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
other = Timestamp(other)
# require tz compat
if not self._has_same_tz(other):
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return TimedeltaIndex(result, name=self.name, copy=False)
|
https://github.com/pandas-dev/pandas/issues/11718
|
In [1]: import pandas as pd
In [2]: pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
Traceback (most recent call last):
File "<ipython-input-2-5e0738cec5fa>", line 1, in <module>
pd.Timestamp(None, tz='utc') - pd.Timestamp('now', tz='utc')
File "pandas\tslib.pyx", line 1099, in pandas.tslib._NaT.__sub__ (pandas\tslib.c:21618)
File "pandas\tslib.pyx", line 1026, in pandas.tslib._Timestamp.__sub__ (pandas\tslib.c:20036)
TypeError: Timestamp subtraction must have the same timezones or no timezones
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.