after_merge
stringlengths
28
79.6k
before_merge
stringlengths
20
79.6k
url
stringlengths
38
71
full_traceback
stringlengths
43
922k
traceback_type
stringclasses
555 values
def _reindex_non_unique(self, target): """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer)) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # a unique indexer if target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer)) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer))) new_indexer[~check] = -1 if isinstance(self, ABCMultiIndex): new_index = type(self).from_tuples(new_labels, names=self.names) else: new_index = Index(new_labels, name=self.name) return new_index, indexer, new_indexer
def _reindex_non_unique(self, target): """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray or None Indices of output values in original index. """ target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer)) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # a unique indexer if target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer)) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer))) new_indexer[~check] = -1 if isinstance(self, ABCMultiIndex): new_index = type(self).from_tuples(new_labels, names=self.names) else: new_index = Index(new_labels, name=self.name) return new_index, indexer, new_indexer
https://github.com/pandas-dev/pandas/issues/13691
In [2]: s = pd.Series(0, index=pd.MultiIndex.from_product([[0], [1,2]])) In [3]: s.loc[[]] Out[3]: Series([], dtype: int64) In [4]: s = pd.Series(0, index=pd.MultiIndex.from_product([[0], [1,1]])) In [5]: s.loc[[]] --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-f9b6211189ca> in <module>() ----> 1 s.loc[[]] /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key) 1304 return self._getitem_tuple(key) 1305 else: -> 1306 return self._getitem_axis(key, axis=0) 1307 1308 def _getitem_axis(self, key, axis=0): /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis) 1464 raise ValueError('Cannot index with multidimensional key') 1465 -> 1466 return self._getitem_iterable(key, axis=axis) 1467 1468 # nested tuple slicing /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_iterable(self, key, axis) 1095 1096 new_target, indexer, new_indexer = labels._reindex_non_unique( -> 1097 keyarr) 1098 1099 if new_indexer is not None: /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in _reindex_non_unique(self, target) 2497 new_indexer[~check] = -1 2498 -> 2499 new_index = self._shallow_copy_with_infer(new_labels, freq=None) 2500 return new_index, indexer, new_indexer 2501 /home/pietro/nobackup/repo/pandas/pandas/indexes/multi.py in _shallow_copy_with_infer(self, values, **kwargs) 393 394 def _shallow_copy_with_infer(self, values=None, **kwargs): --> 395 return self._shallow_copy(values, **kwargs) 396 397 @Appender(_index_shared_docs['_shallow_copy']) /home/pietro/nobackup/repo/pandas/pandas/indexes/multi.py in _shallow_copy(self, values, **kwargs) 402 # discards freq 403 kwargs.pop('freq', None) --> 404 return MultiIndex.from_tuples(values, **kwargs) 405 return self.view() 406 /home/pietro/nobackup/repo/pandas/pandas/indexes/multi.py in from_tuples(cls, tuples, sortorder, names) 889 if len(tuples) == 0: 890 # I think this is right? Not quite sure... --> 891 raise TypeError('Cannot infer number of levels from empty list') 892 893 if isinstance(tuples, (np.ndarray, Index)): TypeError: Cannot infer number of levels from empty list
TypeError
def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None): # TODO: we only have one test that gets here, the only EA # that gets passed is DatetimeArray, and we never have # both self._filters and EA assert isinstance(value, (np.ndarray, ABCExtensionArray)), type(value) if key in self.group: self._handle.remove_node(self.group, key) # Transform needed to interface with pytables row/col notation empty_array = value.size == 0 transposed = False if is_categorical_dtype(value.dtype): raise NotImplementedError( "Cannot store a category dtype in a HDF5 dataset that uses format=" '"fixed". Use format="table".' ) if not empty_array: if hasattr(value, "T"): # ExtensionArrays (1d) may not have transpose. value = value.T transposed = True atom = None if self._filters is not None: with suppress(ValueError): # get the atom for this datatype atom = _tables().Atom.from_dtype(value.dtype) if atom is not None: # We only get here if self._filters is non-None and # the Atom.from_dtype call succeeded # create an empty chunked array and fill it from value if not empty_array: ca = self._handle.create_carray( self.group, key, atom, value.shape, filters=self._filters ) ca[:] = value else: self.write_array_empty(key, value) elif value.dtype.type == np.object_: # infer the type, warn if we have a non-string type here (for # performance) inferred_type = lib.infer_dtype(value, skipna=False) if empty_array: pass elif inferred_type == "string": pass else: ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) elif is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "datetime64" elif is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = "datetime64" elif is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "timedelta64" elif empty_array: self.write_array_empty(key, value) else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed
def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None): # TODO: we only have one test that gets here, the only EA # that gets passed is DatetimeArray, and we never have # both self._filters and EA assert isinstance(value, (np.ndarray, ABCExtensionArray)), type(value) if key in self.group: self._handle.remove_node(self.group, key) # Transform needed to interface with pytables row/col notation empty_array = value.size == 0 transposed = False if is_categorical_dtype(value.dtype): raise NotImplementedError( "Cannot store a category dtype in a HDF5 dataset that uses format=" '"fixed". Use format="table".' ) if not empty_array: if hasattr(value, "T"): # ExtensionArrays (1d) may not have transpose. value = value.T transposed = True atom = None if self._filters is not None: with suppress(ValueError): # get the atom for this datatype atom = _tables().Atom.from_dtype(value.dtype) if atom is not None: # We only get here if self._filters is non-None and # the Atom.from_dtype call succeeded # create an empty chunked array and fill it from value if not empty_array: ca = self._handle.create_carray( self.group, key, atom, value.shape, filters=self._filters ) ca[:] = value else: self.write_array_empty(key, value) elif value.dtype.type == np.object_: # infer the type, warn if we have a non-string type here (for # performance) inferred_type = lib.infer_dtype(value, skipna=False) if empty_array: pass elif inferred_type == "string": pass else: ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) elif empty_array: self.write_array_empty(key, value) elif is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "datetime64" elif is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = "datetime64" elif is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "timedelta64" else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed
https://github.com/pandas-dev/pandas/issues/20594
In [1]: import pandas as pd ...: ...: def check_roundtrip(obj): ...: with pd.HDFStore('test.h5', 'w') as store: ...: store['obj'] = obj ...: retrieved = store['obj'] ...: return obj.equals(retrieved) ...: ...: s = pd.Series([], dtype='datetime64[ns, UTC]') ...: t = pd.Series([0], dtype='datetime64[ns, UTC]') ...: df = pd.DataFrame({'A': s}) In [2]: print(check_roundtrip(s)) False In [3]: print(check_roundtrip(t)) False In [4]: print(check_roundtrip(df)) ------------------------------------------------------------------------------------------------------------------------------------ TypeError Traceback (most recent call last) <ipython-input-4-57f1e9853bf6> in <module>() ----> 1 print(check_roundtrip(df)) <ipython-input-1-1d7fb84453e9> in check_roundtrip(obj) 4 with pd.HDFStore('test.h5', 'w') as store: 5 store['obj'] = obj ----> 6 retrieved = store['obj'] 7 return obj.equals(retrieved) 8 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in __getitem__(self, key) 481 482 def __getitem__(self, key): --> 483 return self.get(key) 484 485 def __setitem__(self, key, value): /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in get(self, key) 669 if group is None: 670 raise KeyError('No object named %s in the file' % key) --> 671 return self._read_group(group) 672 673 def select(self, key, where=None, start=None, stop=None, columns=None, /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in _read_group(self, group, **kwargs) 1347 s = self._create_storer(group) 1348 s.infer_axes() -> 1349 return s.read(**kwargs) 1350 1351 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read(self, start, stop, **kwargs) 2902 blk_items = self.read_index('block%d_items' % i) 2903 values = self.read_array('block%d_values' % i, -> 2904 start=_start, stop=_stop) 2905 blk = make_block(values, 2906 placement=items.get_indexer(blk_items)) /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read_array(self, key, start, stop) 2464 if shape is not None: 2465 # length 0 axis -> 2466 ret = np.empty(shape, dtype=dtype) 2467 else: 2468 ret = node[start:stop] TypeError: Invalid datetime unit in metadata string "[ns, UTC]"
TypeError
def write_array(self, key: str, obj: FrameOrSeries, items: Optional[Index] = None): # TODO: we only have a few tests that get here, the only EA # that gets passed is DatetimeArray, and we never have # both self._filters and EA value = extract_array(obj, extract_numpy=True) if key in self.group: self._handle.remove_node(self.group, key) # Transform needed to interface with pytables row/col notation empty_array = value.size == 0 transposed = False if is_categorical_dtype(value.dtype): raise NotImplementedError( "Cannot store a category dtype in a HDF5 dataset that uses format=" '"fixed". Use format="table".' ) if not empty_array: if hasattr(value, "T"): # ExtensionArrays (1d) may not have transpose. value = value.T transposed = True atom = None if self._filters is not None: with suppress(ValueError): # get the atom for this datatype atom = _tables().Atom.from_dtype(value.dtype) if atom is not None: # We only get here if self._filters is non-None and # the Atom.from_dtype call succeeded # create an empty chunked array and fill it from value if not empty_array: ca = self._handle.create_carray( self.group, key, atom, value.shape, filters=self._filters ) ca[:] = value else: self.write_array_empty(key, value) elif value.dtype.type == np.object_: # infer the type, warn if we have a non-string type here (for # performance) inferred_type = lib.infer_dtype(value, skipna=False) if empty_array: pass elif inferred_type == "string": pass else: ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) elif is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "datetime64" elif is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = "datetime64" elif is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "timedelta64" elif empty_array: self.write_array_empty(key, value) else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed
def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None): # TODO: we only have one test that gets here, the only EA # that gets passed is DatetimeArray, and we never have # both self._filters and EA assert isinstance(value, (np.ndarray, ABCExtensionArray)), type(value) if key in self.group: self._handle.remove_node(self.group, key) # Transform needed to interface with pytables row/col notation empty_array = value.size == 0 transposed = False if is_categorical_dtype(value.dtype): raise NotImplementedError( "Cannot store a category dtype in a HDF5 dataset that uses format=" '"fixed". Use format="table".' ) if not empty_array: if hasattr(value, "T"): # ExtensionArrays (1d) may not have transpose. value = value.T transposed = True atom = None if self._filters is not None: with suppress(ValueError): # get the atom for this datatype atom = _tables().Atom.from_dtype(value.dtype) if atom is not None: # We only get here if self._filters is non-None and # the Atom.from_dtype call succeeded # create an empty chunked array and fill it from value if not empty_array: ca = self._handle.create_carray( self.group, key, atom, value.shape, filters=self._filters ) ca[:] = value else: self.write_array_empty(key, value) elif value.dtype.type == np.object_: # infer the type, warn if we have a non-string type here (for # performance) inferred_type = lib.infer_dtype(value, skipna=False) if empty_array: pass elif inferred_type == "string": pass else: ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) elif is_datetime64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "datetime64" elif is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = "datetime64" elif is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "timedelta64" elif empty_array: self.write_array_empty(key, value) else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed
https://github.com/pandas-dev/pandas/issues/20594
In [1]: import pandas as pd ...: ...: def check_roundtrip(obj): ...: with pd.HDFStore('test.h5', 'w') as store: ...: store['obj'] = obj ...: retrieved = store['obj'] ...: return obj.equals(retrieved) ...: ...: s = pd.Series([], dtype='datetime64[ns, UTC]') ...: t = pd.Series([0], dtype='datetime64[ns, UTC]') ...: df = pd.DataFrame({'A': s}) In [2]: print(check_roundtrip(s)) False In [3]: print(check_roundtrip(t)) False In [4]: print(check_roundtrip(df)) ------------------------------------------------------------------------------------------------------------------------------------ TypeError Traceback (most recent call last) <ipython-input-4-57f1e9853bf6> in <module>() ----> 1 print(check_roundtrip(df)) <ipython-input-1-1d7fb84453e9> in check_roundtrip(obj) 4 with pd.HDFStore('test.h5', 'w') as store: 5 store['obj'] = obj ----> 6 retrieved = store['obj'] 7 return obj.equals(retrieved) 8 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in __getitem__(self, key) 481 482 def __getitem__(self, key): --> 483 return self.get(key) 484 485 def __setitem__(self, key, value): /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in get(self, key) 669 if group is None: 670 raise KeyError('No object named %s in the file' % key) --> 671 return self._read_group(group) 672 673 def select(self, key, where=None, start=None, stop=None, columns=None, /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in _read_group(self, group, **kwargs) 1347 s = self._create_storer(group) 1348 s.infer_axes() -> 1349 return s.read(**kwargs) 1350 1351 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read(self, start, stop, **kwargs) 2902 blk_items = self.read_index('block%d_items' % i) 2903 values = self.read_array('block%d_values' % i, -> 2904 start=_start, stop=_stop) 2905 blk = make_block(values, 2906 placement=items.get_indexer(blk_items)) /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read_array(self, key, start, stop) 2464 if shape is not None: 2465 # length 0 axis -> 2466 ret = np.empty(shape, dtype=dtype) 2467 else: 2468 ret = node[start:stop] TypeError: Invalid datetime unit in metadata string "[ns, UTC]"
TypeError
def write(self, obj, **kwargs): super().write(obj, **kwargs) self.write_index("index", obj.index) self.write_array("values", obj) self.attrs.name = obj.name
def write(self, obj, **kwargs): super().write(obj, **kwargs) self.write_index("index", obj.index) self.write_array("values", obj.values) self.attrs.name = obj.name
https://github.com/pandas-dev/pandas/issues/20594
In [1]: import pandas as pd ...: ...: def check_roundtrip(obj): ...: with pd.HDFStore('test.h5', 'w') as store: ...: store['obj'] = obj ...: retrieved = store['obj'] ...: return obj.equals(retrieved) ...: ...: s = pd.Series([], dtype='datetime64[ns, UTC]') ...: t = pd.Series([0], dtype='datetime64[ns, UTC]') ...: df = pd.DataFrame({'A': s}) In [2]: print(check_roundtrip(s)) False In [3]: print(check_roundtrip(t)) False In [4]: print(check_roundtrip(df)) ------------------------------------------------------------------------------------------------------------------------------------ TypeError Traceback (most recent call last) <ipython-input-4-57f1e9853bf6> in <module>() ----> 1 print(check_roundtrip(df)) <ipython-input-1-1d7fb84453e9> in check_roundtrip(obj) 4 with pd.HDFStore('test.h5', 'w') as store: 5 store['obj'] = obj ----> 6 retrieved = store['obj'] 7 return obj.equals(retrieved) 8 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in __getitem__(self, key) 481 482 def __getitem__(self, key): --> 483 return self.get(key) 484 485 def __setitem__(self, key, value): /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in get(self, key) 669 if group is None: 670 raise KeyError('No object named %s in the file' % key) --> 671 return self._read_group(group) 672 673 def select(self, key, where=None, start=None, stop=None, columns=None, /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in _read_group(self, group, **kwargs) 1347 s = self._create_storer(group) 1348 s.infer_axes() -> 1349 return s.read(**kwargs) 1350 1351 /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read(self, start, stop, **kwargs) 2902 blk_items = self.read_index('block%d_items' % i) 2903 values = self.read_array('block%d_values' % i, -> 2904 start=_start, stop=_stop) 2905 blk = make_block(values, 2906 placement=items.get_indexer(blk_items)) /home/ashieh/.local/lib/python2.7/site-packages/pandas/io/pytables.pyc in read_array(self, key, start, stop) 2464 if shape is not None: 2465 # length 0 axis -> 2466 ret = np.empty(shape, dtype=dtype) 2467 else: 2468 ret = node[start:stop] TypeError: Invalid datetime unit in metadata string "[ns, UTC]"
TypeError
def maybe_casted_values(index, codes=None): """ Convert an index, given directly or as a pair (level, code), to a 1D array. Parameters ---------- index : Index codes : sequence of integers (optional) Returns ------- ExtensionArray or ndarray If codes is `None`, the values of `index`. If codes is passed, an array obtained by taking from `index` the indices contained in `codes`. """ values = index._values if not isinstance(index, (ABCPeriodIndex, ABCDatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the codes, extract the values with a mask if codes is not None: mask = codes == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in codes, so make all nan's if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype) else: values = values.take(codes) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin if isinstance(values, DatetimeLikeArrayMixin): values = values._data # TODO: can we de-kludge yet? if mask.any(): if isinstance(values, np.ndarray): values, _ = maybe_upcast_putmask(values, mask, np.nan) else: values[mask] = np.nan if issubclass(values_type, DatetimeLikeArrayMixin): values = values_type(values, dtype=values_dtype) return values
def maybe_casted_values(index, codes=None): """ Convert an index, given directly or as a pair (level, code), to a 1D array. Parameters ---------- index : Index codes : sequence of integers (optional) Returns ------- ExtensionArray or ndarray If codes is `None`, the values of `index`. If codes is passed, an array obtained by taking from `index` the indices contained in `codes`. """ values = index._values if not isinstance(index, (ABCPeriodIndex, ABCDatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the codes, extract the values with a mask if codes is not None: mask = codes == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in codes, so make all nan's if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype) else: values = values.take(codes) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin if isinstance(values, DatetimeLikeArrayMixin): values = values._data # TODO: can we de-kludge yet? if mask.any(): values, _ = maybe_upcast_putmask(values, mask, np.nan) if issubclass(values_type, DatetimeLikeArrayMixin): values = values_type(values, dtype=values_dtype) return values
https://github.com/pandas-dev/pandas/issues/24206
In [29]: idx = pd.MultiIndex([pd.CategoricalIndex(['A', 'B']), pd.CategoricalIndex(['a', 'b'])], [[0, 0, 1, 1], [0, 1, 0, -1]]) In [30]: df = pd.DataFrame({'col': range(len(idx))}, index=idx) In [31]: df Out[31]: col A a 0 b 1 B a 2 NaN 3 In [32]: df.reset_index() /home/joris/miniconda3/lib/python3.5/site-packages/pandas/core/frame.py:4091: FutureWarning: Interpreting negative values in 'indexer' as missing values. In the future, this will change to meaning positional indicies from the right. Use 'allow_fill=True' to retain the previous behavior and silence this warning. Use 'allow_fill=False' to accept the new behavior. values = values.take(labels) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/miniconda3/lib/python3.5/site-packages/pandas/core/dtypes/cast.py in maybe_upcast_putmask(result, mask, other) 249 try: --> 250 np.place(result, mask, other) 251 except Exception: ~/miniconda3/lib/python3.5/site-packages/numpy/lib/function_base.py in place(arr, mask, vals) 2371 raise TypeError("argument 1 must be numpy.ndarray, " -> 2372 "not {name}".format(name=type(arr).__name__)) 2373 TypeError: argument 1 must be numpy.ndarray, not Categorical During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-32-6983677cc901> in <module>() ----> 1 df.reset_index() ~/miniconda3/lib/python3.5/site-packages/pandas/core/frame.py in reset_index(self, level, drop, inplace, col_level, col_fill) 4136 name = tuple(name_lst) 4137 # to ndarray and maybe infer different dtype -> 4138 level_values = _maybe_casted_values(lev, lab) 4139 new_obj.insert(0, name, level_values) 4140 ~/miniconda3/lib/python3.5/site-packages/pandas/core/frame.py in _maybe_casted_values(index, labels) 4092 if mask.any(): 4093 values, changed = maybe_upcast_putmask( -> 4094 values, mask, np.nan) 4095 return values 4096 ~/miniconda3/lib/python3.5/site-packages/pandas/core/dtypes/cast.py in maybe_upcast_putmask(result, mask, other) 250 np.place(result, mask, other) 251 except Exception: --> 252 return changeit() 253 254 return result, False ~/miniconda3/lib/python3.5/site-packages/pandas/core/dtypes/cast.py in changeit() 222 # isn't compatible 223 r, _ = maybe_upcast(result, fill_value=other, copy=True) --> 224 np.place(r, mask, other) 225 226 return r, True ~/miniconda3/lib/python3.5/site-packages/numpy/lib/function_base.py in place(arr, mask, vals) 2370 if not isinstance(arr, np.ndarray): 2371 raise TypeError("argument 1 must be numpy.ndarray, " -> 2372 "not {name}".format(name=type(arr).__name__)) 2373 2374 return _insert(arr, mask, vals) TypeError: argument 1 must be numpy.ndarray, not Categorical
TypeError
def construct_1d_arraylike_from_scalar( value, length: int, dtype: DtypeObj ) -> ArrayLike: """ create a np.ndarray / pandas type of specified shape and dtype filled with values Parameters ---------- value : scalar value length : int dtype : pandas_dtype or np.dtype Returns ------- np.ndarray / pandas type of length, filled with value """ if is_extension_array_dtype(dtype): cls = dtype.construct_array_type() subarr = cls._from_sequence([value] * length, dtype=dtype) else: if length and is_integer_dtype(dtype) and isna(value): # coerce if we have nan for an integer dtype dtype = np.dtype("float64") elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"): # we need to coerce to object dtype to avoid # to allow numpy to take our string as a scalar value dtype = np.dtype("object") if not isna(value): value = ensure_str(value) elif dtype.kind in ["M", "m"] and is_valid_nat_for_dtype(value, dtype): # GH36541: can't fill array directly with pd.NaT # > np.empty(10, dtype="datetime64[64]").fill(pd.NaT) # ValueError: cannot convert float NaN to integer value = np.datetime64("NaT") subarr = np.empty(length, dtype=dtype) subarr.fill(value) return subarr
def construct_1d_arraylike_from_scalar( value, length: int, dtype: DtypeObj ) -> ArrayLike: """ create a np.ndarray / pandas type of specified shape and dtype filled with values Parameters ---------- value : scalar value length : int dtype : pandas_dtype or np.dtype Returns ------- np.ndarray / pandas type of length, filled with value """ if is_extension_array_dtype(dtype): cls = dtype.construct_array_type() subarr = cls._from_sequence([value] * length, dtype=dtype) else: if length and is_integer_dtype(dtype) and isna(value): # coerce if we have nan for an integer dtype dtype = np.dtype("float64") elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"): # we need to coerce to object dtype to avoid # to allow numpy to take our string as a scalar value dtype = np.dtype("object") if not isna(value): value = ensure_str(value) subarr = np.empty(length, dtype=dtype) subarr.fill(value) return subarr
https://github.com/pandas-dev/pandas/issues/36541
In [18]: ix = pd.MultiIndex.from_tuples([(pd.NaT, 1), (pd.NaT, 2)], names=['a', 'b']) In [19]: ix Out[19]: MultiIndex([('NaT', 1), ('NaT', 2)], names=['a', 'b']) In [20]: d = pd.DataFrame({'x': [11, 12]}, index=ix) In [21]: d Out[21]: x a b NaT 1 11 2 12 In [22]: d.reset_index() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-22-4653618060e8> in <module> ----> 1 d.reset_index() ~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/frame.py in reset_index(self, level, drop, inplace, col_level, col_fill) 4851 name = tuple(name_lst) 4852 # to ndarray and maybe infer different dtype -> 4853 level_values = _maybe_casted_values(lev, lab) 4854 new_obj.insert(0, name, level_values) 4855 ~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/frame.py in _maybe_casted_values(index, labels) 4784 dtype = index.dtype 4785 fill_value = na_value_for_dtype(dtype) -> 4786 values = construct_1d_arraylike_from_scalar( 4787 fill_value, len(mask), dtype 4788 ) ~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/dtypes/cast.py in construct_1d_arraylike_from_scalar(value, length, dtype) 1556 1557 subarr = np.empty(length, dtype=dtype) -> 1558 subarr.fill(value) 1559 1560 return subarr ValueError: cannot convert float NaN to integer
ValueError
def _cat_compare_op(op): opname = f"__{op.__name__}__" fill_value = True if op is operator.ne else False @unpack_zerodim_and_defer(opname) def func(self, other): hashable = is_hashable(other) if is_list_like(other) and len(other) != len(self) and not hashable: # in hashable case we may have a tuple that is itself a category raise ValueError("Lengths must match.") if not self.ordered: if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError( "Unordered Categoricals can only compare equality or not" ) if isinstance(other, Categorical): # Two Categoricals can only be be compared if the categories are # the same (maybe up to ordering, depending on ordered) msg = "Categoricals can only be compared if 'categories' are the same." if not self.is_dtype_equal(other): raise TypeError(msg) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order other_codes = _get_codes_for_values(other, self.categories) else: other_codes = other._codes ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): ret[mask] = fill_value return ret if hashable: if other in self.categories: i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: # GH#29820 performance trick; get_loc will always give i>=0, # so in the cases (__ne__, __le__, __lt__) the setting # here is a no-op, so can be skipped. mask = self._codes == -1 ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons if opname not in ["__eq__", "__ne__"]: raise TypeError( f"Cannot compare a Categorical for op {opname} with " f"type {type(other)}.\nIf you want to compare values, " "use 'np.asarray(cat) <op> other'." ) if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): # We would return NotImplemented here, but that messes up # ExtensionIndex's wrapped methods return op(other, self) return getattr(np.array(self), opname)(np.array(other)) func.__name__ = opname return func
def _cat_compare_op(op): opname = f"__{op.__name__}__" fill_value = True if op is operator.ne else False @unpack_zerodim_and_defer(opname) def func(self, other): if is_list_like(other) and len(other) != len(self): # TODO: Could this fail if the categories are listlike objects? raise ValueError("Lengths must match.") if not self.ordered: if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError( "Unordered Categoricals can only compare equality or not" ) if isinstance(other, Categorical): # Two Categoricals can only be be compared if the categories are # the same (maybe up to ordering, depending on ordered) msg = "Categoricals can only be compared if 'categories' are the same." if not self.is_dtype_equal(other): raise TypeError(msg) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order other_codes = _get_codes_for_values(other, self.categories) else: other_codes = other._codes ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): ret[mask] = fill_value return ret if is_scalar(other): if other in self.categories: i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: # GH#29820 performance trick; get_loc will always give i>=0, # so in the cases (__ne__, __le__, __lt__) the setting # here is a no-op, so can be skipped. mask = self._codes == -1 ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons if opname not in ["__eq__", "__ne__"]: raise TypeError( f"Cannot compare a Categorical for op {opname} with " f"type {type(other)}.\nIf you want to compare values, " "use 'np.asarray(cat) <op> other'." ) if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): # We would return NotImplemented here, but that messes up # ExtensionIndex's wrapped methods return op(other, self) return getattr(np.array(self), opname)(np.array(other)) func.__name__ = opname return func
https://github.com/pandas-dev/pandas/issues/20439
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-4d3f2e8a3cfd> in <module>() ----> 1 c[0] = (1, 2) /Users/taugspurger/miniconda3/envs/pandas-0.19.2/lib/python3.5/site-packages/pandas/core/categorical.py in __setitem__(self, key, value) 1652 # something to np.nan 1653 if len(to_add) and not isnull(to_add).all(): -> 1654 raise ValueError("Cannot setitem on a Categorical with a new " 1655 "category, set the categories first") 1656 ValueError: Cannot setitem on a Categorical with a new category, set the categories first
ValueError
def func(self, other): hashable = is_hashable(other) if is_list_like(other) and len(other) != len(self) and not hashable: # in hashable case we may have a tuple that is itself a category raise ValueError("Lengths must match.") if not self.ordered: if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError("Unordered Categoricals can only compare equality or not") if isinstance(other, Categorical): # Two Categoricals can only be be compared if the categories are # the same (maybe up to ordering, depending on ordered) msg = "Categoricals can only be compared if 'categories' are the same." if not self.is_dtype_equal(other): raise TypeError(msg) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order other_codes = _get_codes_for_values(other, self.categories) else: other_codes = other._codes ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): ret[mask] = fill_value return ret if hashable: if other in self.categories: i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: # GH#29820 performance trick; get_loc will always give i>=0, # so in the cases (__ne__, __le__, __lt__) the setting # here is a no-op, so can be skipped. mask = self._codes == -1 ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons if opname not in ["__eq__", "__ne__"]: raise TypeError( f"Cannot compare a Categorical for op {opname} with " f"type {type(other)}.\nIf you want to compare values, " "use 'np.asarray(cat) <op> other'." ) if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): # We would return NotImplemented here, but that messes up # ExtensionIndex's wrapped methods return op(other, self) return getattr(np.array(self), opname)(np.array(other))
def func(self, other): if is_list_like(other) and len(other) != len(self): # TODO: Could this fail if the categories are listlike objects? raise ValueError("Lengths must match.") if not self.ordered: if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError("Unordered Categoricals can only compare equality or not") if isinstance(other, Categorical): # Two Categoricals can only be be compared if the categories are # the same (maybe up to ordering, depending on ordered) msg = "Categoricals can only be compared if 'categories' are the same." if not self.is_dtype_equal(other): raise TypeError(msg) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order other_codes = _get_codes_for_values(other, self.categories) else: other_codes = other._codes ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): ret[mask] = fill_value return ret if is_scalar(other): if other in self.categories: i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: # GH#29820 performance trick; get_loc will always give i>=0, # so in the cases (__ne__, __le__, __lt__) the setting # here is a no-op, so can be skipped. mask = self._codes == -1 ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons if opname not in ["__eq__", "__ne__"]: raise TypeError( f"Cannot compare a Categorical for op {opname} with " f"type {type(other)}.\nIf you want to compare values, " "use 'np.asarray(cat) <op> other'." ) if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): # We would return NotImplemented here, but that messes up # ExtensionIndex's wrapped methods return op(other, self) return getattr(np.array(self), opname)(np.array(other))
https://github.com/pandas-dev/pandas/issues/20439
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-4d3f2e8a3cfd> in <module>() ----> 1 c[0] = (1, 2) /Users/taugspurger/miniconda3/envs/pandas-0.19.2/lib/python3.5/site-packages/pandas/core/categorical.py in __setitem__(self, key, value) 1652 # something to np.nan 1653 if len(to_add) and not isnull(to_add).all(): -> 1654 raise ValueError("Cannot setitem on a Categorical with a new " 1655 "category, set the categories first") 1656 ValueError: Cannot setitem on a Categorical with a new category, set the categories first
ValueError
def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) # require identical categories set if isinstance(value, Categorical): if not is_dtype_equal(self, value): raise ValueError( "Cannot set a Categorical with another, without identical categories" ) new_codes = self._validate_listlike(value) value = Categorical.from_codes(new_codes, dtype=self.dtype) # wrap scalars and hashable-listlikes in list rvalue = value if not is_hashable(value) else [value] from pandas import Index to_add = Index(rvalue).difference(self.categories) # no assignments of values not in categories, but it's always ok to set # something to np.nan if len(to_add) and not isna(to_add).all(): raise ValueError( "Cannot setitem on a Categorical with a new " "category, set the categories first" ) return self._unbox_listlike(rvalue)
def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) # require identical categories set if isinstance(value, Categorical): if not is_dtype_equal(self, value): raise ValueError( "Cannot set a Categorical with another, without identical categories" ) new_codes = self._validate_listlike(value) value = Categorical.from_codes(new_codes, dtype=self.dtype) rvalue = value if is_list_like(value) else [value] from pandas import Index to_add = Index(rvalue).difference(self.categories) # no assignments of values not in categories, but it's always ok to set # something to np.nan if len(to_add) and not isna(to_add).all(): raise ValueError( "Cannot setitem on a Categorical with a new " "category, set the categories first" ) return self._unbox_listlike(rvalue)
https://github.com/pandas-dev/pandas/issues/20439
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-4d3f2e8a3cfd> in <module>() ----> 1 c[0] = (1, 2) /Users/taugspurger/miniconda3/envs/pandas-0.19.2/lib/python3.5/site-packages/pandas/core/categorical.py in __setitem__(self, key, value) 1652 # something to np.nan 1653 if len(to_add) and not isnull(to_add).all(): -> 1654 raise ValueError("Cannot setitem on a Categorical with a new " 1655 "category, set the categories first") 1656 ValueError: Cannot setitem on a Categorical with a new category, set the categories first
ValueError
def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err
def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.T.dot(np.transpose(other)).T
https://github.com/pandas-dev/pandas/issues/21581
# a.__matmul__ is called ... a @ df Traceback (most recent call last): File "<stdin>", line 2, in <module> ValueError: shapes (10,4) and (5,3) not aligned: 4 (dim 1) != 5 (dim 0) # df.__rmatmul__ is called ... a.tolist() @ df Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/Users/mingli/GitHub/open/pandas/pandas/core/frame.py", line 901, in __rmatmul__ return self.T.dot(np.transpose(other)).T File "/Users/mingli/GitHub/open/pandas/pandas/core/frame.py", line 879, in dot r=rvals.shape)) ValueError: Dot product shape mismatch, (3, 5) vs (4, 10)
ValueError
def get_grouper( obj: FrameOrSeries, key=None, axis: int = 0, level=None, sort: bool = True, observed: bool = False, mutated: bool = False, validate: bool = True, dropna: bool = True, ) -> Tuple["ops.BaseGrouper", Set[Label], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError("No group keys passed!") else: raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj._get_axis(axis).name != level: raise ValueError( f"level name {level} is not the name " f"of the {obj._get_axis_name(axis)}" ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, set(), obj else: return grouper, {key.key}, obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): return key, set(), obj if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any( isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys ) # is this an index replacement? if ( not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None ): if isinstance(obj, DataFrame): all_in_columns_index = all( g in obj.columns or g in obj.index.names for g in keys ) else: assert isinstance(obj, Series) all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: List[Grouping] = [] exclusions: Set[Label] = set() # if the actual grouper should be obj[key] def is_in_axis(key) -> bool: if not _is_label_like(key): # items -> .columns for DataFrame, .index for Series items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): # TypeError shows up here if we pass e.g. Int64Index return False return True # if the grouper is obj[name] def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError): # IndexError reached in e.g. test_skip_group_keys when we pass # lambda here return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.add(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.add(name) elif obj._is_level_reference(gpr, axis=axis): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.add(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) " "must be same length" ) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = ( Grouping( group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr ) groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") elif len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper grouper = ops.BaseGrouper( group_axis, groupings, sort=sort, mutated=mutated, dropna=dropna ) return grouper, exclusions, obj
def get_grouper( obj: FrameOrSeries, key=None, axis: int = 0, level=None, sort: bool = True, observed: bool = False, mutated: bool = False, validate: bool = True, dropna: bool = True, ) -> Tuple["ops.BaseGrouper", Set[Label], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError("No group keys passed!") else: raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj._get_axis(axis).name != level: raise ValueError( f"level name {level} is not the name " f"of the {obj._get_axis_name(axis)}" ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, set(), obj else: return grouper, {key.key}, obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): return key, set(), obj if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any( isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys ) # is this an index replacement? if ( not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None ): if isinstance(obj, DataFrame): all_in_columns_index = all( g in obj.columns or g in obj.index.names for g in keys ) else: assert isinstance(obj, Series) all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: List[Grouping] = [] exclusions: Set[Label] = set() # if the actual grouper should be obj[key] def is_in_axis(key) -> bool: if not _is_label_like(key): # items -> .columns for DataFrame, .index for Series items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): # TypeError shows up here if we pass e.g. Int64Index return False return True # if the grouper is obj[name] def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError): # IndexError reached in e.g. test_skip_group_keys when we pass # lambda here return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.add(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.add(name) elif obj._is_level_reference(gpr, axis=axis): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.add(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) " "must be same length" ) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = ( Grouping( group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr ) groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") elif len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated) return grouper, exclusions, obj
https://github.com/pandas-dev/pandas/issues/36620
In [1]: import pandas as pd ...: df = pd.DataFrame({"A": [0, 0, 1, None], "B": [10, 2, 10, None]}) ...: gb = df.groupby('A', dropna=False) In [2]: gb.dropna Out[2]: False In [3]: gb.grouper.dropna --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-3-4d8081f9c78e> in <module> ----> 1 gb.grouper.dropna AttributeError: 'BaseGrouper' object has no attribute 'dropna'
AttributeError
def __init__( self, axis: Index, groupings: Sequence["grouper.Grouping"], sort: bool = True, group_keys: bool = True, mutated: bool = False, indexer: Optional[np.ndarray] = None, dropna: bool = True, ): assert isinstance(axis, Index), axis self._filter_empty_groups = self.compressed = len(groupings) != 1 self.axis = axis self._groupings: List[grouper.Grouping] = list(groupings) self.sort = sort self.group_keys = group_keys self.mutated = mutated self.indexer = indexer self.dropna = dropna
def __init__( self, axis: Index, groupings: Sequence["grouper.Grouping"], sort: bool = True, group_keys: bool = True, mutated: bool = False, indexer: Optional[np.ndarray] = None, ): assert isinstance(axis, Index), axis self._filter_empty_groups = self.compressed = len(groupings) != 1 self.axis = axis self._groupings: List[grouper.Grouping] = list(groupings) self.sort = sort self.group_keys = group_keys self.mutated = mutated self.indexer = indexer
https://github.com/pandas-dev/pandas/issues/36620
In [1]: import pandas as pd ...: df = pd.DataFrame({"A": [0, 0, 1, None], "B": [10, 2, 10, None]}) ...: gb = df.groupby('A', dropna=False) In [2]: gb.dropna Out[2]: False In [3]: gb.grouper.dropna --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-3-4d8081f9c78e> in <module> ----> 1 gb.grouper.dropna AttributeError: 'BaseGrouper' object has no attribute 'dropna'
AttributeError
def _bins_to_cuts( x, bins, right: bool = True, labels=None, precision: int = 3, include_lowest: bool = False, dtype=None, duplicates: str = "raise", ordered: bool = True, ): if not ordered and labels is None: raise ValueError("'labels' must be provided if 'ordered = False'") if duplicates not in ["raise", "drop"]: raise ValueError( "invalid value for 'duplicates' parameter, valid options are: raise, drop" ) if isinstance(bins, IntervalIndex): # we have a fast-path here ids = bins.get_indexer(x) result = Categorical.from_codes(ids, categories=bins, ordered=True) return result, bins unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == "raise": raise ValueError( f"Bin edges must be unique: {repr(bins)}.\n" f"You can drop duplicate edges by setting the 'duplicates' kwarg" ) else: bins = unique_bins side = "left" if right else "right" ids = ensure_int64(bins.searchsorted(x, side=side)) if include_lowest: ids[x == bins[0]] = 1 na_mask = isna(x) | (ids == len(bins)) | (ids == 0) has_nas = na_mask.any() if labels is not False: if not (labels is None or is_list_like(labels)): raise ValueError( "Bin labels must either be False, None or passed in as a " "list-like argument" ) elif labels is None: labels = _format_labels( bins, precision, right=right, include_lowest=include_lowest, dtype=dtype ) elif ordered and len(set(labels)) != len(labels): raise ValueError( "labels must be unique if ordered=True; pass ordered=False for duplicate labels" # noqa ) else: if len(labels) != len(bins) - 1: raise ValueError( "Bin labels must be one fewer than the number of bin edges" ) if not is_categorical_dtype(labels): labels = Categorical( labels, categories=labels if len(set(labels)) == len(labels) else None, ordered=ordered, ) # TODO: handle mismatch between categorical label order and pandas.cut order. np.putmask(ids, na_mask, 0) result = algos.take_nd(labels, ids - 1) else: result = ids - 1 if has_nas: result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) return result, bins
def _bins_to_cuts( x, bins, right: bool = True, labels=None, precision: int = 3, include_lowest: bool = False, dtype=None, duplicates: str = "raise", ordered: bool = True, ): if not ordered and not labels: raise ValueError("'labels' must be provided if 'ordered = False'") if duplicates not in ["raise", "drop"]: raise ValueError( "invalid value for 'duplicates' parameter, valid options are: raise, drop" ) if isinstance(bins, IntervalIndex): # we have a fast-path here ids = bins.get_indexer(x) result = Categorical.from_codes(ids, categories=bins, ordered=True) return result, bins unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == "raise": raise ValueError( f"Bin edges must be unique: {repr(bins)}.\n" f"You can drop duplicate edges by setting the 'duplicates' kwarg" ) else: bins = unique_bins side = "left" if right else "right" ids = ensure_int64(bins.searchsorted(x, side=side)) if include_lowest: ids[x == bins[0]] = 1 na_mask = isna(x) | (ids == len(bins)) | (ids == 0) has_nas = na_mask.any() if labels is not False: if not (labels is None or is_list_like(labels)): raise ValueError( "Bin labels must either be False, None or passed in as a " "list-like argument" ) elif labels is None: labels = _format_labels( bins, precision, right=right, include_lowest=include_lowest, dtype=dtype ) elif ordered and len(set(labels)) != len(labels): raise ValueError( "labels must be unique if ordered=True; pass ordered=False for duplicate labels" # noqa ) else: if len(labels) != len(bins) - 1: raise ValueError( "Bin labels must be one fewer than the number of bin edges" ) if not is_categorical_dtype(labels): labels = Categorical( labels, categories=labels if len(set(labels)) == len(labels) else None, ordered=ordered, ) # TODO: handle mismatch between categorical label order and pandas.cut order. np.putmask(ids, na_mask, 0) result = algos.take_nd(labels, ids - 1) else: result = ids - 1 if has_nas: result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) return result, bins
https://github.com/pandas-dev/pandas/issues/36603
Traceback (most recent call last): File "/Users/mark/PycharmProjects/temp/bug1/cut_test.py", line 8, in <module> print(pd.cut(test.row_value, cuts.cut_value, labels=cuts.cut_label[:-1], ordered=False)) File "/Users/mark/PycharmProjects/temp/bug1/venv/lib/python3.7/site-packages/pandas/core/reshape/tile.py", line 284, in cut ordered=ordered, File "/Users/mark/PycharmProjects/temp/bug1/venv/lib/python3.7/site-packages/pandas/core/reshape/tile.py", line 384, in _bins_to_cuts if not ordered and not labels: File "/Users/mark/PycharmProjects/temp/bug1/venv/lib/python3.7/site-packages/pandas/core/generic.py", line 1327, in __nonzero__ f"The truth value of a {type(self).__name__} is ambiguous. " ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). Process finished with exit code 1
ValueError
def _get_cell_string_value(self, cell) -> str: """ Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ from odf.element import Element from odf.namespaces import TEXTNS from odf.text import S text_s = S().qname value = [] for fragment in cell.childNodes: if isinstance(fragment, Element): if fragment.qname == text_s: spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) value.append(" " * spaces) else: # recursive impl needed in case of nested fragments # with multiple spaces # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 value.append(self._get_cell_string_value(fragment)) else: value.append(str(fragment)) return "".join(value)
def _get_cell_string_value(self, cell) -> str: """ Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ from odf.element import Element, Text from odf.namespaces import TEXTNS from odf.text import P, S text_p = P().qname text_s = S().qname p = cell.childNodes[0] value = [] if p.qname == text_p: for k, fragment in enumerate(p.childNodes): if isinstance(fragment, Text): value.append(fragment.data) elif isinstance(fragment, Element): if fragment.qname == text_s: spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) value.append(" " * spaces) return "".join(value)
https://github.com/pandas-dev/pandas/issues/35802
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/util/_decorators.py", line 296, in wrapper return func(*args, **kwargs) File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_base.py", line 311, in read_excel return io.parse( File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_base.py", line 906, in parse return self._reader.parse( File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_base.py", line 443, in parse data = self.get_sheet_data(sheet, convert_float) File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_odfreader.py", line 91, in get_sheet_data value = self._get_cell_value(sheet_cell, convert_float) File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_odfreader.py", line 175, in _get_cell_value return self._get_cell_string_value(cell) File "/home/michael/.local/share/virtualenvs/merge-csv-NFbvYFrS/lib/python3.8/site-packages/pandas/io/excel/_odfreader.py", line 211, in _get_cell_string_value value.append(" " * spaces) UnboundLocalError: local variable 'spaces' referenced before assignment
UnboundLocalError
def _aggregate(self, arg, *args, **kwargs): """ provide an implementation for the aggregators Parameters ---------- arg : string, dict, function *args : args to pass on to the function **kwargs : kwargs to pass on to the function Returns ------- tuple of result, how Notes ----- how can be a string describe the required post-processing, or None if not required """ is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) _axis = kwargs.pop("_axis", None) if _axis is None: _axis = getattr(self, "axis", 0) if isinstance(arg, str): return self._try_aggregate_string_function(arg, *args, **kwargs), None if isinstance(arg, dict): # aggregate based on the passed dict if _axis != 0: # pragma: no cover raise ValueError("Can only pass dict with axis=0") obj = self._selected_obj # if we have a dict of any non-scalars # eg. {'A' : ['mean']}, normalize all to # be list-likes if any(is_aggregator(x) for x in arg.values()): new_arg = {} for k, v in arg.items(): if not isinstance(v, (tuple, list, dict)): new_arg[k] = [v] else: new_arg[k] = v # the keys must be in the columns # for ndim=2, or renamers for ndim=1 # ok for now, but deprecated # {'A': { 'ra': 'mean' }} # {'A': { 'ra': ['mean'] }} # {'ra': ['mean']} # not ok # {'ra' : { 'A' : 'mean' }} if isinstance(v, dict): raise SpecificationError("nested renamer is not supported") elif isinstance(obj, ABCSeries): raise SpecificationError("nested renamer is not supported") elif isinstance(obj, ABCDataFrame) and k not in obj.columns: raise KeyError(f"Column '{k}' does not exist!") arg = new_arg else: # deprecation of renaming keys # GH 15931 keys = list(arg.keys()) if isinstance(obj, ABCDataFrame) and len( obj.columns.intersection(keys) ) != len(keys): cols = sorted(set(keys) - set(obj.columns.intersection(keys))) raise SpecificationError(f"Column(s) {cols} do not exist") from pandas.core.reshape.concat import concat def _agg_1dim(name, how, subset=None): """ aggregate a 1-dim with how """ colg = self._gotitem(name, ndim=1, subset=subset) if colg.ndim != 1: raise SpecificationError( "nested dictionary is ambiguous in aggregation" ) return colg.aggregate(how) def _agg_2dim(how): """ aggregate a 2-dim with how """ colg = self._gotitem(self._selection, ndim=2, subset=obj) return colg.aggregate(how) def _agg(arg, func): """ run the aggregations over the arg with func return a dict """ result = {} for fname, agg_how in arg.items(): result[fname] = func(fname, agg_how) return result # set the final keys keys = list(arg.keys()) result = {} if self._selection is not None: sl = set(self._selection_list) # we are a Series like object, # but may have multiple aggregations if len(sl) == 1: result = _agg( arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how) ) # we are selecting the same set as we are aggregating elif not len(sl - set(keys)): result = _agg(arg, _agg_1dim) # we are a DataFrame, with possibly multiple aggregations else: result = _agg(arg, _agg_2dim) # no selection else: try: result = _agg(arg, _agg_1dim) except SpecificationError: # we are aggregating expecting all 1d-returns # but we have 2d result = _agg(arg, _agg_2dim) # combine results def is_any_series() -> bool: # return a boolean if we have *any* nested series return any(isinstance(r, ABCSeries) for r in result.values()) def is_any_frame() -> bool: # return a boolean if we have *any* nested series return any(isinstance(r, ABCDataFrame) for r in result.values()) if isinstance(result, list): return concat(result, keys=keys, axis=1, sort=True), True elif is_any_frame(): # we have a dict of DataFrames # return a MI DataFrame keys_to_use = [k for k in keys if not result[k].empty] # Have to check, if at least one DataFrame is not empty. keys_to_use = keys_to_use if keys_to_use != [] else keys return ( concat([result[k] for k in keys_to_use], keys=keys_to_use, axis=1), True, ) elif isinstance(self, ABCSeries) and is_any_series(): # we have a dict of Series # return a MI Series try: result = concat(result) except TypeError as err: # we want to give a nice error here if # we have non-same sized objects, so # we don't automatically broadcast raise ValueError( "cannot perform both aggregation " "and transformation operations " "simultaneously" ) from err return result, True # fall thru from pandas import DataFrame, Series try: result = DataFrame(result) except ValueError: # we have a dict of scalars # GH 36212 use name only if self is a series name = self.name if (self.ndim == 1) else None result = Series(result, name=name) return result, True elif is_list_like(arg): # we require a list, but not an 'str' return self._aggregate_multiple_funcs(arg, _axis=_axis), None else: result = None f = self._get_cython_func(arg) if f and not args and not kwargs: return getattr(self, f)(), None # caller can react return result, True
def _aggregate(self, arg, *args, **kwargs): """ provide an implementation for the aggregators Parameters ---------- arg : string, dict, function *args : args to pass on to the function **kwargs : kwargs to pass on to the function Returns ------- tuple of result, how Notes ----- how can be a string describe the required post-processing, or None if not required """ is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) _axis = kwargs.pop("_axis", None) if _axis is None: _axis = getattr(self, "axis", 0) if isinstance(arg, str): return self._try_aggregate_string_function(arg, *args, **kwargs), None if isinstance(arg, dict): # aggregate based on the passed dict if _axis != 0: # pragma: no cover raise ValueError("Can only pass dict with axis=0") obj = self._selected_obj # if we have a dict of any non-scalars # eg. {'A' : ['mean']}, normalize all to # be list-likes if any(is_aggregator(x) for x in arg.values()): new_arg = {} for k, v in arg.items(): if not isinstance(v, (tuple, list, dict)): new_arg[k] = [v] else: new_arg[k] = v # the keys must be in the columns # for ndim=2, or renamers for ndim=1 # ok for now, but deprecated # {'A': { 'ra': 'mean' }} # {'A': { 'ra': ['mean'] }} # {'ra': ['mean']} # not ok # {'ra' : { 'A' : 'mean' }} if isinstance(v, dict): raise SpecificationError("nested renamer is not supported") elif isinstance(obj, ABCSeries): raise SpecificationError("nested renamer is not supported") elif isinstance(obj, ABCDataFrame) and k not in obj.columns: raise KeyError(f"Column '{k}' does not exist!") arg = new_arg else: # deprecation of renaming keys # GH 15931 keys = list(arg.keys()) if isinstance(obj, ABCDataFrame) and len( obj.columns.intersection(keys) ) != len(keys): cols = sorted(set(keys) - set(obj.columns.intersection(keys))) raise SpecificationError(f"Column(s) {cols} do not exist") from pandas.core.reshape.concat import concat def _agg_1dim(name, how, subset=None): """ aggregate a 1-dim with how """ colg = self._gotitem(name, ndim=1, subset=subset) if colg.ndim != 1: raise SpecificationError( "nested dictionary is ambiguous in aggregation" ) return colg.aggregate(how) def _agg_2dim(how): """ aggregate a 2-dim with how """ colg = self._gotitem(self._selection, ndim=2, subset=obj) return colg.aggregate(how) def _agg(arg, func): """ run the aggregations over the arg with func return a dict """ result = {} for fname, agg_how in arg.items(): result[fname] = func(fname, agg_how) return result # set the final keys keys = list(arg.keys()) result = {} if self._selection is not None: sl = set(self._selection_list) # we are a Series like object, # but may have multiple aggregations if len(sl) == 1: result = _agg( arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how) ) # we are selecting the same set as we are aggregating elif not len(sl - set(keys)): result = _agg(arg, _agg_1dim) # we are a DataFrame, with possibly multiple aggregations else: result = _agg(arg, _agg_2dim) # no selection else: try: result = _agg(arg, _agg_1dim) except SpecificationError: # we are aggregating expecting all 1d-returns # but we have 2d result = _agg(arg, _agg_2dim) # combine results def is_any_series() -> bool: # return a boolean if we have *any* nested series return any(isinstance(r, ABCSeries) for r in result.values()) def is_any_frame() -> bool: # return a boolean if we have *any* nested series return any(isinstance(r, ABCDataFrame) for r in result.values()) if isinstance(result, list): return concat(result, keys=keys, axis=1, sort=True), True elif is_any_frame(): # we have a dict of DataFrames # return a MI DataFrame keys_to_use = [k for k in keys if not result[k].empty] # Have to check, if at least one DataFrame is not empty. keys_to_use = keys_to_use if keys_to_use != [] else keys return ( concat([result[k] for k in keys_to_use], keys=keys_to_use, axis=1), True, ) elif isinstance(self, ABCSeries) and is_any_series(): # we have a dict of Series # return a MI Series try: result = concat(result) except TypeError as err: # we want to give a nice error here if # we have non-same sized objects, so # we don't automatically broadcast raise ValueError( "cannot perform both aggregation " "and transformation operations " "simultaneously" ) from err return result, True # fall thru from pandas import DataFrame, Series try: result = DataFrame(result) except ValueError: # we have a dict of scalars result = Series(result, name=getattr(self, "name", None)) return result, True elif is_list_like(arg): # we require a list, but not an 'str' return self._aggregate_multiple_funcs(arg, _axis=_axis), None else: result = None f = self._get_cython_func(arg) if f and not args and not kwargs: return getattr(self, f)(), None # caller can react return result, True
https://github.com/pandas-dev/pandas/issues/36212
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs) 470 try: --> 471 result = DataFrame(result) 472 except ValueError: ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/frame.py in __init__(self, data, index, columns, dtype, copy) 467 elif isinstance(data, dict): --> 468 mgr = init_dict(data, index, columns, dtype=dtype) 469 elif isinstance(data, ma.MaskedArray): ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/internals/construction.py in init_dict(data, index, columns, dtype) 282 ] --> 283 return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) 284 ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/internals/construction.py in arrays_to_mgr(arrays, arr_names, index, columns, dtype, verify_integrity) 77 if index is None: ---> 78 index = extract_index(arrays) 79 else: ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/internals/construction.py in extract_index(data) 386 if not indexes and not raw_lengths: --> 387 raise ValueError("If using all scalar values, you must pass an index") 388 ValueError: If using all scalar values, you must pass an index During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/frame.py in aggregate(self, func, axis, *args, **kwargs) 7358 try: -> 7359 result, how = self._aggregate(func, axis=axis, *args, **kwargs) 7360 except TypeError as err: ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/frame.py in _aggregate(self, arg, axis, *args, **kwargs) 7383 return result, how -> 7384 return super()._aggregate(arg, *args, **kwargs) 7385 ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/base.py in _aggregate(self, arg, *args, **kwargs) 474 # we have a dict of scalars --> 475 result = Series(result, name=getattr(self, "name", None)) 476 ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 228 --> 229 name = ibase.maybe_extract_name(name, data, type(self)) 230 ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/indexes/base.py in maybe_extract_name(name, obj, cls) 5658 if not is_hashable(name): -> 5659 raise TypeError(f"{cls.__name__}.name must be a hashable type") 5660 TypeError: Series.name must be a hashable type The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) <ipython-input-6-efe06ed8dce0> in <module> 2 data = {"name": ["abc", "xyz"]} 3 df = pd.DataFrame(data) ----> 4 print(df.agg({'name': 'count'})) ~/.virtualenvs/dimensions-connectors/lib/python3.7/site-packages/pandas/core/frame.py in aggregate(self, func, axis, *args, **kwargs) 7363 f"incompatible data and dtype: {err}" 7364 ) -> 7365 raise exc from err 7366 if result is None: 7367 return self.apply(func, axis=axis, args=args, **kwargs) TypeError: DataFrame constructor called with incompatible data and dtype: Series.name must be a hashable type
ValueError
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]: """ Interpret the dtype from a scalar. Parameters ---------- pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, scalar belongs to pandas extension types is inferred as object """ dtype: DtypeObj = np.dtype(object) # a 1-element ndarray if isinstance(val, np.ndarray): msg = "invalid ndarray passed to infer_dtype_from_scalar" if val.ndim != 0: raise ValueError(msg) dtype = val.dtype val = val.item() elif isinstance(val, str): # If we create an empty array using a string to infer # the dtype, NumPy will only allocate one character per entry # so this is kind of bad. Alternately we could use np.repeat # instead of np.empty (but then you still don't want things # coming out as np.str_! dtype = np.dtype(object) elif isinstance(val, (np.datetime64, datetime)): val = tslibs.Timestamp(val) if val is tslibs.NaT or val.tz is None: dtype = np.dtype("M8[ns]") else: if pandas_dtype: dtype = DatetimeTZDtype(unit="ns", tz=val.tz) else: # return datetimetz as object return np.dtype(object), val val = val.value elif isinstance(val, (np.timedelta64, timedelta)): val = tslibs.Timedelta(val).value dtype = np.dtype("m8[ns]") elif is_bool(val): dtype = np.dtype(np.bool_) elif is_integer(val): if isinstance(val, np.integer): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.int64) try: np.array(val, dtype=dtype) except OverflowError: dtype = np.array(val).dtype elif is_float(val): if isinstance(val, np.floating): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.float64) elif is_complex(val): dtype = np.dtype(np.complex_) elif pandas_dtype: if lib.is_period(val): dtype = PeriodDtype(freq=val.freq) elif lib.is_interval(val): subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0] dtype = IntervalDtype(subtype=subtype) return dtype, val
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]: """ Interpret the dtype from a scalar. Parameters ---------- pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, scalar belongs to pandas extension types is inferred as object """ dtype: DtypeObj = np.dtype(object) # a 1-element ndarray if isinstance(val, np.ndarray): msg = "invalid ndarray passed to infer_dtype_from_scalar" if val.ndim != 0: raise ValueError(msg) dtype = val.dtype val = val.item() elif isinstance(val, str): # If we create an empty array using a string to infer # the dtype, NumPy will only allocate one character per entry # so this is kind of bad. Alternately we could use np.repeat # instead of np.empty (but then you still don't want things # coming out as np.str_! dtype = np.dtype(object) elif isinstance(val, (np.datetime64, datetime)): val = tslibs.Timestamp(val) if val is tslibs.NaT or val.tz is None: dtype = np.dtype("M8[ns]") else: if pandas_dtype: dtype = DatetimeTZDtype(unit="ns", tz=val.tz) else: # return datetimetz as object return np.dtype(object), val val = val.value elif isinstance(val, (np.timedelta64, timedelta)): val = tslibs.Timedelta(val).value dtype = np.dtype("m8[ns]") elif is_bool(val): dtype = np.dtype(np.bool_) elif is_integer(val): if isinstance(val, np.integer): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.int64) elif is_float(val): if isinstance(val, np.floating): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.float64) elif is_complex(val): dtype = np.dtype(np.complex_) elif pandas_dtype: if lib.is_period(val): dtype = PeriodDtype(freq=val.freq) elif lib.is_interval(val): subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0] dtype = IntervalDtype(subtype=subtype) return dtype, val
https://github.com/pandas-dev/pandas/issues/36291
import pandas as pd pd.Series(1000000000000000000000) 0 1000000000000000000000 dtype: object pd.Series(1000000000000000000000, index = pd.date_range(pd.Timestamp.now().floor("1D"), pd.Timestamp.now(), freq='T')) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/matt/opt/anaconda3/lib/python3.7/site-packages/pandas/core/series.py", line 327, in __init__ data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True) File "/Users/matt/opt/anaconda3/lib/python3.7/site-packages/pandas/core/construction.py", line 475, in sanitize_array subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype) File "/Users/matt/opt/anaconda3/lib/python3.7/site-packages/pandas/core/dtypes/cast.py", line 1555, in construct_1d_arraylike_from_scalar subarr.fill(value) OverflowError: int too big to convert pd.Series(1000000000000000000000.0, index = pd.date_range(pd.Timestamp.now().floor("1D"), pd.Timestamp.now(), freq='T')) 2020-09-11 00:00:00 1.000000e+21 2020-09-11 00:01:00 1.000000e+21 2020-09-11 00:02:00 1.000000e+21 2020-09-11 00:03:00 1.000000e+21 2020-09-11 00:04:00 1.000000e+21 ... 2020-09-11 11:24:00 1.000000e+21 2020-09-11 11:25:00 1.000000e+21 2020-09-11 11:26:00 1.000000e+21 2020-09-11 11:27:00 1.000000e+21 2020-09-11 11:28:00 1.000000e+21 Freq: T, Length: 689, dtype: float64
OverflowError
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError("axis must be 0") if self._from_selection: raise ValueError( "Upsampling from level= or on= selection " "is not supported, use .set_index(...) " "to explicitly set index to datetime-like" ) ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if ( limit is None and to_offset(ax.inferred_freq) == self.freq and len(obj) == len(res_index) ): result = obj.copy() result.index = res_index else: result = obj.reindex( res_index, method=method, limit=limit, fill_value=fill_value ) result = self._apply_loffset(result) return self._wrap_result(result)
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError("axis must be 0") if self._from_selection: raise ValueError( "Upsampling from level= or on= selection " "is not supported, use .set_index(...) " "to explicitly set index to datetime-like" ) ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex( res_index, method=method, limit=limit, fill_value=fill_value ) result = self._apply_loffset(result) return self._wrap_result(result)
https://github.com/pandas-dev/pandas/issues/35219
import pandas as pd # works as expected (note the daylight savings transitions in the head and tail) pd.Series(1., pd.date_range('2020-03-28','2020-10-27', freq='D', tz="Europe/Amsterdam")).resample('24H').pad() 2020-03-28 00:00:00+01:00 1.0 2020-03-29 00:00:00+01:00 1.0 2020-03-30 01:00:00+02:00 1.0 2020-03-31 01:00:00+02:00 1.0 2020-04-01 01:00:00+02:00 1.0 ... 2020-10-23 01:00:00+02:00 1.0 2020-10-24 01:00:00+02:00 1.0 2020-10-25 01:00:00+02:00 1.0 2020-10-26 00:00:00+01:00 1.0 2020-10-27 00:00:00+01:00 1.0 Freq: 24H, Length: 214, dtype: float64 # fails unexpectedly pd.Series(1., pd.date_range('2020-03-28','2020-03-31', freq='D', tz="Europe/Amsterdam")).resample('24H').pad() Traceback (most recent call last): File "<input>", line 1, in <module> File "/home/felix/anaconda3/envs/bvp-venv/lib/python3.6/site-packages/pandas/core/resample.py", line 453, in pad return self._upsample("pad", limit=limit) File "/home/felix/anaconda3/envs/bvp-venv/lib/python3.6/site-packages/pandas/core/resample.py", line 1092, in _upsample result.index = res_index File "/home/felix/anaconda3/envs/bvp-venv/lib/python3.6/site-packages/pandas/core/generic.py", line 5287, in __setattr__ return object.__setattr__(self, name, value) File "pandas/_libs/properties.pyx", line 67, in pandas._libs.properties.AxisProperty.__set__ File "/home/felix/anaconda3/envs/bvp-venv/lib/python3.6/site-packages/pandas/core/series.py", line 401, in _set_axis self._data.set_axis(axis, labels) File "/home/felix/anaconda3/envs/bvp-venv/lib/python3.6/site-packages/pandas/core/internals/managers.py", line 178, in set_axis f"Length mismatch: Expected axis has {old_len} elements, new " ValueError: Length mismatch: Expected axis has 4 elements, new values have 3 elements
ValueError
def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ # Ensure the object we're rolling over is monotonically sorted relative # to the groups # GH 36197 if not obj.empty: groupby_order = np.concatenate( list(self._groupby.grouper.indices.values()) ).astype(np.int64) obj = obj.take(groupby_order) return super()._create_blocks(obj)
def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ # Ensure the object we're rolling over is monotonically sorted relative # to the groups groupby_order = np.concatenate(list(self._groupby.grouper.indices.values())).astype( np.int64 ) obj = obj.take(groupby_order) return super()._create_blocks(obj)
https://github.com/pandas-dev/pandas/issues/36197
pd.DataFrame({"s1": []}).groupby("s1").rolling(window=1).sum()' Traceback (most recent call last): File "<string>", line 1, in <module> File "site-packages/pandas/core/window/rolling.py", line 2072, in sum return super().sum(*args, **kwargs) File "site-packages/pandas/core/window/rolling.py", line 1424, in sum window_func, center=self.center, floor=0, name="sum", **kwargs File "site-packages/pandas/core/window/rolling.py", line 2194, in _apply **kwargs, File "site-packages/pandas/core/window/rolling.py", line 528, in _apply blocks, obj = self._create_blocks(self._selected_obj) File "site-packages/pandas/core/window/rolling.py", line 2230, in _create_blocks list(self._groupby.grouper.indices.values()) File "<__array_function__ internals>", line 6, in concatenate ValueError: need at least one array to concatenate
ValueError
def sort_values( self, return_indexer=False, ascending=True, na_position: str_t = "last", key: Optional[Callable] = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MutiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort(items=idx, ascending=ascending, na_position=na_position, key=key) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
def sort_values( self, return_indexer=False, ascending=True, key: Optional[Callable] = None ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
https://github.com/pandas-dev/pandas/issues/35584
import pandas as pd pd.__version__ '1.1.0' sr = pd.Series(['a',None,'c',None,'e']) sr.sort_values() 0 a 2 c 4 e 1 None 3 None dtype: object idx = pd.Index(['a',None,'c',None,'e']) idx Index(['a', None, 'c', None, 'e'], dtype='object') idx.sort_values() Traceback (most recent call last): File "<input>", line 1, in <module> File "/Users/pgali/PycharmProjects/del/venv1/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 4448, in sort_values _as = idx.argsort() File "/Users/pgali/PycharmProjects/del/venv1/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 4563, in argsort return result.argsort(*args, **kwargs) TypeError: '<' not supported between instances of 'NoneType' and 'str'
TypeError
def replace( self, to_replace=None, value=None, inplace: bool_t = False, limit: Optional[int] = None, regex=False, method="pad", ): """ Replace values given in `to_replace` with `value`. Values of the {klass} are replaced with other values dynamically. This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value. Parameters ---------- to_replace : str, regex, list, dict, Series, int, float, or None How to find the values that will be replaced. * numeric, str or regex: - numeric: numeric values equal to `to_replace` will be replaced with `value` - str: string exactly matching `to_replace` will be replaced with `value` - regex: regexs matching `to_replace` will be replaced with `value` * list of str, regex, or numeric: - First, if `to_replace` and `value` are both lists, they **must** be the same length. - Second, if ``regex=True`` then all of the strings in **both** lists will be interpreted as regexs otherwise they will match directly. This doesn't matter much for `value` since there are only a few possible substitution regexes you can use. - str, regex and numeric rules apply as above. * dict: - Dicts can be used to specify different replacement values for different existing values. For example, ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and 'y' with 'z'. To use a dict in this way the `value` parameter should be `None`. - For a DataFrame a dict can specify that different values should be replaced in different columns. For example, ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a' and the value 'z' in column 'b' and replaces these values with whatever is specified in `value`. The `value` parameter should not be ``None`` in this case. You can treat this as a special case of passing two lists except that you are specifying the column to search in. - For a DataFrame nested dictionaries, e.g., ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column 'a' for the value 'b' and replace it with NaN. The `value` parameter should be ``None`` to use a nested dict in this way. You can nest regular expressions as well. Note that column names (the top-level dictionary keys in a nested dictionary) **cannot** be regular expressions. * None: - This means that the `regex` argument must be a string, compiled regular expression, or list, dict, ndarray or Series of such elements. If `value` is also ``None`` then this **must** be a nested dictionary or Series. See the examples section for examples of each of these. value : scalar, dict, list, str, regex, default None Value to replace any values matching `to_replace` with. For a DataFrame a dict of values can be used to specify which value to use for each column (columns not in the dict will not be filled). Regular expressions, strings and lists or dicts of such objects are also allowed. inplace : bool, default False If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. limit : int or None, default None Maximum size gap to forward or backward fill. regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular expressions. If this is ``True`` then `to_replace` *must* be a string. Alternatively, this could be a regular expression or a list, dict, or array of regular expressions in which case `to_replace` must be ``None``. method : {{'pad', 'ffill', 'bfill', `None`}} The method to use when for replacement, when `to_replace` is a scalar, list or tuple and `value` is ``None``. .. versionchanged:: 0.23.0 Added to DataFrame. Returns ------- {klass} Object after replacement. Raises ------ AssertionError * If `regex` is not a ``bool`` and `to_replace` is not ``None``. TypeError * If `to_replace` is not a scalar, array-like, ``dict``, or ``None`` * If `to_replace` is a ``dict`` and `value` is not a ``list``, ``dict``, ``ndarray``, or ``Series`` * If `to_replace` is ``None`` and `regex` is not compilable into a regular expression or is a list, dict, ndarray, or Series. * When replacing multiple ``bool`` or ``datetime64`` objects and the arguments to `to_replace` does not match the type of the value being replaced ValueError * If a ``list`` or an ``ndarray`` is passed to `to_replace` and `value` but they are not the same length. See Also -------- {klass}.fillna : Fill NA values. {klass}.where : Replace values based on boolean condition. Series.str.replace : Simple string replacement. Notes ----- * Regex substitution is performed under the hood with ``re.sub``. The rules for substitution for ``re.sub`` are the same. * Regular expressions will only substitute on strings, meaning you cannot provide, for example, a regular expression matching floating point numbers and expect the columns in your frame that have a numeric dtype to be matched. However, if those floating point numbers *are* strings, then you can do this. * This method has *a lot* of options. You are encouraged to experiment and play with this method to gain intuition about how it works. * When dict is used as the `to_replace` value, it is like key(s) in the dict are the to_replace part and value(s) in the dict are the value parameter. Examples -------- **Scalar `to_replace` and `value`** >>> s = pd.Series([0, 1, 2, 3, 4]) >>> s.replace(0, 5) 0 5 1 1 2 2 3 3 4 4 dtype: int64 >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4], ... 'B': [5, 6, 7, 8, 9], ... 'C': ['a', 'b', 'c', 'd', 'e']}}) >>> df.replace(0, 5) A B C 0 5 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e **List-like `to_replace`** >>> df.replace([0, 1, 2, 3], 4) A B C 0 4 5 a 1 4 6 b 2 4 7 c 3 4 8 d 4 4 9 e >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1]) A B C 0 4 5 a 1 3 6 b 2 2 7 c 3 1 8 d 4 4 9 e >>> s.replace([1, 2], method='bfill') 0 0 1 3 2 3 3 3 4 4 dtype: int64 **dict-like `to_replace`** >>> df.replace({{0: 10, 1: 100}}) A B C 0 10 5 a 1 100 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({{'A': 0, 'B': 5}}, 100) A B C 0 100 100 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({{'A': {{0: 100, 4: 400}}}}) A B C 0 100 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 400 9 e **Regular expression `to_replace`** >>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'], ... 'B': ['abc', 'bar', 'xyz']}}) >>> df.replace(to_replace=r'^ba.$', value='new', regex=True) A B 0 new abc 1 foo new 2 bait xyz >>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True) A B 0 new abc 1 foo bar 2 bait xyz >>> df.replace(regex=r'^ba.$', value='new') A B 0 new abc 1 foo new 2 bait xyz >>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}}) A B 0 new abc 1 xyz new 2 bait xyz >>> df.replace(regex=[r'^ba.$', 'foo'], value='new') A B 0 new abc 1 new new 2 bait xyz Compare the behavior of ``s.replace({{'a': None}})`` and ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) When one uses a dict as the `to_replace` value, it is like the value(s) in the dict are equal to the `value` parameter. ``s.replace({{'a': None}})`` is equivalent to ``s.replace(to_replace={{'a': None}}, value=None, method=None)``: >>> s.replace({{'a': None}}) 0 10 1 None 2 None 3 b 4 None dtype: object When ``value=None`` and `to_replace` is a scalar, list or tuple, `replace` uses the method parameter (default 'pad') to do the replacement. So this is why the 'a' values are being replaced by 10 in rows 1 and 2 and 'b' in row 4 in this case. The command ``s.replace('a', None)`` is actually equivalent to ``s.replace(to_replace='a', value=None, method='pad')``: >>> s.replace('a', None) 0 10 1 10 2 10 3 b 4 b dtype: object """ if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is None: # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): if isinstance(self, ABCDataFrame): return self.apply( _single_replace, args=(to_replace, method, inplace, limit) ) return _single_replace(self, to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = {col: (to_rep, value) for col, to_rep in to_replace.items()} return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) self._consolidate_inplace() new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) else: # [NA, ''] -> 0 new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace(regex, value, inplace=inplace, limit=limit, regex=True) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace")
def replace( self, to_replace=None, value=None, inplace: bool_t = False, limit: Optional[int] = None, regex=False, method="pad", ): """ Replace values given in `to_replace` with `value`. Values of the {klass} are replaced with other values dynamically. This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value. Parameters ---------- to_replace : str, regex, list, dict, Series, int, float, or None How to find the values that will be replaced. * numeric, str or regex: - numeric: numeric values equal to `to_replace` will be replaced with `value` - str: string exactly matching `to_replace` will be replaced with `value` - regex: regexs matching `to_replace` will be replaced with `value` * list of str, regex, or numeric: - First, if `to_replace` and `value` are both lists, they **must** be the same length. - Second, if ``regex=True`` then all of the strings in **both** lists will be interpreted as regexs otherwise they will match directly. This doesn't matter much for `value` since there are only a few possible substitution regexes you can use. - str, regex and numeric rules apply as above. * dict: - Dicts can be used to specify different replacement values for different existing values. For example, ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and 'y' with 'z'. To use a dict in this way the `value` parameter should be `None`. - For a DataFrame a dict can specify that different values should be replaced in different columns. For example, ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a' and the value 'z' in column 'b' and replaces these values with whatever is specified in `value`. The `value` parameter should not be ``None`` in this case. You can treat this as a special case of passing two lists except that you are specifying the column to search in. - For a DataFrame nested dictionaries, e.g., ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column 'a' for the value 'b' and replace it with NaN. The `value` parameter should be ``None`` to use a nested dict in this way. You can nest regular expressions as well. Note that column names (the top-level dictionary keys in a nested dictionary) **cannot** be regular expressions. * None: - This means that the `regex` argument must be a string, compiled regular expression, or list, dict, ndarray or Series of such elements. If `value` is also ``None`` then this **must** be a nested dictionary or Series. See the examples section for examples of each of these. value : scalar, dict, list, str, regex, default None Value to replace any values matching `to_replace` with. For a DataFrame a dict of values can be used to specify which value to use for each column (columns not in the dict will not be filled). Regular expressions, strings and lists or dicts of such objects are also allowed. inplace : bool, default False If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. limit : int or None, default None Maximum size gap to forward or backward fill. regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular expressions. If this is ``True`` then `to_replace` *must* be a string. Alternatively, this could be a regular expression or a list, dict, or array of regular expressions in which case `to_replace` must be ``None``. method : {{'pad', 'ffill', 'bfill', `None`}} The method to use when for replacement, when `to_replace` is a scalar, list or tuple and `value` is ``None``. .. versionchanged:: 0.23.0 Added to DataFrame. Returns ------- {klass} Object after replacement. Raises ------ AssertionError * If `regex` is not a ``bool`` and `to_replace` is not ``None``. TypeError * If `to_replace` is not a scalar, array-like, ``dict``, or ``None`` * If `to_replace` is a ``dict`` and `value` is not a ``list``, ``dict``, ``ndarray``, or ``Series`` * If `to_replace` is ``None`` and `regex` is not compilable into a regular expression or is a list, dict, ndarray, or Series. * When replacing multiple ``bool`` or ``datetime64`` objects and the arguments to `to_replace` does not match the type of the value being replaced ValueError * If a ``list`` or an ``ndarray`` is passed to `to_replace` and `value` but they are not the same length. See Also -------- {klass}.fillna : Fill NA values. {klass}.where : Replace values based on boolean condition. Series.str.replace : Simple string replacement. Notes ----- * Regex substitution is performed under the hood with ``re.sub``. The rules for substitution for ``re.sub`` are the same. * Regular expressions will only substitute on strings, meaning you cannot provide, for example, a regular expression matching floating point numbers and expect the columns in your frame that have a numeric dtype to be matched. However, if those floating point numbers *are* strings, then you can do this. * This method has *a lot* of options. You are encouraged to experiment and play with this method to gain intuition about how it works. * When dict is used as the `to_replace` value, it is like key(s) in the dict are the to_replace part and value(s) in the dict are the value parameter. Examples -------- **Scalar `to_replace` and `value`** >>> s = pd.Series([0, 1, 2, 3, 4]) >>> s.replace(0, 5) 0 5 1 1 2 2 3 3 4 4 dtype: int64 >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4], ... 'B': [5, 6, 7, 8, 9], ... 'C': ['a', 'b', 'c', 'd', 'e']}}) >>> df.replace(0, 5) A B C 0 5 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e **List-like `to_replace`** >>> df.replace([0, 1, 2, 3], 4) A B C 0 4 5 a 1 4 6 b 2 4 7 c 3 4 8 d 4 4 9 e >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1]) A B C 0 4 5 a 1 3 6 b 2 2 7 c 3 1 8 d 4 4 9 e >>> s.replace([1, 2], method='bfill') 0 0 1 3 2 3 3 3 4 4 dtype: int64 **dict-like `to_replace`** >>> df.replace({{0: 10, 1: 100}}) A B C 0 10 5 a 1 100 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({{'A': 0, 'B': 5}}, 100) A B C 0 100 100 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({{'A': {{0: 100, 4: 400}}}}) A B C 0 100 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 400 9 e **Regular expression `to_replace`** >>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'], ... 'B': ['abc', 'bar', 'xyz']}}) >>> df.replace(to_replace=r'^ba.$', value='new', regex=True) A B 0 new abc 1 foo new 2 bait xyz >>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True) A B 0 new abc 1 foo bar 2 bait xyz >>> df.replace(regex=r'^ba.$', value='new') A B 0 new abc 1 foo new 2 bait xyz >>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}}) A B 0 new abc 1 xyz new 2 bait xyz >>> df.replace(regex=[r'^ba.$', 'foo'], value='new') A B 0 new abc 1 new new 2 bait xyz Note that when replacing multiple ``bool`` or ``datetime64`` objects, the data types in the `to_replace` parameter must match the data type of the value being replaced: >>> df = pd.DataFrame({{'A': [True, False, True], ... 'B': [False, True, False]}}) >>> df.replace({{'a string': 'new value', True: False}}) # raises Traceback (most recent call last): ... TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' This raises a ``TypeError`` because one of the ``dict`` keys is not of the correct type for replacement. Compare the behavior of ``s.replace({{'a': None}})`` and ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) When one uses a dict as the `to_replace` value, it is like the value(s) in the dict are equal to the `value` parameter. ``s.replace({{'a': None}})`` is equivalent to ``s.replace(to_replace={{'a': None}}, value=None, method=None)``: >>> s.replace({{'a': None}}) 0 10 1 None 2 None 3 b 4 None dtype: object When ``value=None`` and `to_replace` is a scalar, list or tuple, `replace` uses the method parameter (default 'pad') to do the replacement. So this is why the 'a' values are being replaced by 10 in rows 1 and 2 and 'b' in row 4 in this case. The command ``s.replace('a', None)`` is actually equivalent to ``s.replace(to_replace='a', value=None, method='pad')``: >>> s.replace('a', None) 0 10 1 10 2 10 3 b 4 b dtype: object """ if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is None: # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): if isinstance(self, ABCDataFrame): return self.apply( _single_replace, args=(to_replace, method, inplace, limit) ) return _single_replace(self, to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = {col: (to_rep, value) for col, to_rep in to_replace.items()} return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) self._consolidate_inplace() new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) else: # [NA, ''] -> 0 new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace(regex, value, inplace=inplace, limit=limit, regex=True) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace")
https://github.com/pandas-dev/pandas/issues/34789
Traceback (most recent call last): File "/home/developer/.config/JetBrains/PyCharmCE2020.1/scratches/scratch_1.py", line 7, in <module> df = df.replace({"replace_string": "test"}) File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6598, in replace return self.replace( File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6641, in replace new_data = self._mgr.replace_list( File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in replace_list masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in <listcomp> masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 614, in comp return _compare_or_regex_search(values, s, regex) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1946, in _compare_or_regex_search _check_comparison_types(False, a, b) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1925, in _check_comparison_types raise TypeError( TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str' Process finished with exit code 1
TypeError
def _replace_list( self, src_list: List[Any], dest_list: List[Any], inplace: bool = False, regex: bool = False, ) -> List["Block"]: """ See BlockManager._replace_list docstring. """ src_len = len(src_list) - 1 def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return ~mask s = com.maybe_box_datetimelike(s) return compare_or_regex_search(self.values, s, regex, mask) # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations mask = ~isna(self.values) masks = [comp(s, mask, regex) for s in src_list] rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(zip(src_list, dest_list)): new_rb: List["Block"] = [] for blk in rb: m = masks[i] convert = i == src_len # only convert once at the end result = blk._replace_coerce( mask=m, to_replace=src, value=dest, inplace=inplace, convert=convert, regex=regex, ) if m.any() or convert: if isinstance(result, list): new_rb.extend(result) else: new_rb.append(result) else: new_rb.append(blk) rb = new_rb return rb
def _replace_list( self, src_list: List[Any], dest_list: List[Any], masks: List[np.ndarray], inplace: bool = False, regex: bool = False, ) -> List["Block"]: """ See BlockManager._replace_list docstring. """ src_len = len(src_list) - 1 rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(zip(src_list, dest_list)): new_rb: List["Block"] = [] for blk in rb: m = masks[i][blk.mgr_locs.indexer] convert = i == src_len # only convert once at the end result = blk._replace_coerce( mask=m, to_replace=src, value=dest, inplace=inplace, convert=convert, regex=regex, ) if m.any() or convert: if isinstance(result, list): new_rb.extend(result) else: new_rb.append(result) else: new_rb.append(blk) rb = new_rb return rb
https://github.com/pandas-dev/pandas/issues/34789
Traceback (most recent call last): File "/home/developer/.config/JetBrains/PyCharmCE2020.1/scratches/scratch_1.py", line 7, in <module> df = df.replace({"replace_string": "test"}) File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6598, in replace return self.replace( File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6641, in replace new_data = self._mgr.replace_list( File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in replace_list masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in <listcomp> masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 614, in comp return _compare_or_regex_search(values, s, regex) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1946, in _compare_or_regex_search _check_comparison_types(False, a, b) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1925, in _check_comparison_types raise TypeError( TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str' Process finished with exit code 1
TypeError
def _extract_bool_array(mask: ArrayLike) -> np.ndarray: """ If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. """ if isinstance(mask, ExtensionArray): # We could have BooleanArray, Sparse[bool], ... # Except for BooleanArray, this is equivalent to just # np.asarray(mask, dtype=bool) mask = mask.to_numpy(dtype=bool, na_value=False) assert isinstance(mask, np.ndarray), type(mask) assert mask.dtype == bool, mask.dtype return mask
def _extract_bool_array(mask: ArrayLike) -> np.ndarray: """ If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. """ if isinstance(mask, ExtensionArray): # We could have BooleanArray, Sparse[bool], ... mask = np.asarray(mask, dtype=np.bool_) assert isinstance(mask, np.ndarray), type(mask) assert mask.dtype == bool, mask.dtype return mask
https://github.com/pandas-dev/pandas/issues/34789
Traceback (most recent call last): File "/home/developer/.config/JetBrains/PyCharmCE2020.1/scratches/scratch_1.py", line 7, in <module> df = df.replace({"replace_string": "test"}) File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6598, in replace return self.replace( File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6641, in replace new_data = self._mgr.replace_list( File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in replace_list masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in <listcomp> masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 614, in comp return _compare_or_regex_search(values, s, regex) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1946, in _compare_or_regex_search _check_comparison_types(False, a, b) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1925, in _check_comparison_types raise TypeError( TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str' Process finished with exit code 1
TypeError
def replace_list( self: T, src_list: List[Any], dest_list: List[Any], inplace: bool = False, regex: bool = False, ) -> T: """do a list replace""" inplace = validate_bool_kwarg(inplace, "inplace") bm = self.apply( "_replace_list", src_list=src_list, dest_list=dest_list, inplace=inplace, regex=regex, ) bm._consolidate_inplace() return bm
def replace_list( self: T, src_list: List[Any], dest_list: List[Any], inplace: bool = False, regex: bool = False, ) -> T: """do a list replace""" inplace = validate_bool_kwarg(inplace, "inplace") # figure out our mask apriori to avoid repeated replacements values = self.as_array() def comp(s: Scalar, mask: np.ndarray, regex: bool = False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return ~mask s = com.maybe_box_datetimelike(s) return _compare_or_regex_search(values, s, regex, mask) # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations mask = ~isna(values) masks = [comp(s, mask, regex) for s in src_list] bm = self.apply( "_replace_list", src_list=src_list, dest_list=dest_list, masks=masks, inplace=inplace, regex=regex, ) bm._consolidate_inplace() return bm
https://github.com/pandas-dev/pandas/issues/34789
Traceback (most recent call last): File "/home/developer/.config/JetBrains/PyCharmCE2020.1/scratches/scratch_1.py", line 7, in <module> df = df.replace({"replace_string": "test"}) File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6598, in replace return self.replace( File "/home/developer/PycharmProjects/pandas/pandas/core/frame.py", line 4277, in replace return super().replace( File "/home/developer/PycharmProjects/pandas/pandas/core/generic.py", line 6641, in replace new_data = self._mgr.replace_list( File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in replace_list masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 616, in <listcomp> masks = [comp(s, regex) for s in src_list] File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 614, in comp return _compare_or_regex_search(values, s, regex) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1946, in _compare_or_regex_search _check_comparison_types(False, a, b) File "/home/developer/PycharmProjects/pandas/pandas/core/internals/managers.py", line 1925, in _check_comparison_types raise TypeError( TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str' Process finished with exit code 1
TypeError
def _maybe_cast_for_get_loc(self, key) -> Timestamp: # needed to localize naive datetimes or dates (GH 35690) key = Timestamp(key) if key.tzinfo is None: key = key.tz_localize(self.tz) else: key = key.tz_convert(self.tz) return key
def _maybe_cast_for_get_loc(self, key) -> Timestamp: # needed to localize naive datetimes key = Timestamp(key) if key.tzinfo is None: key = key.tz_localize(self.tz) else: key = key.tz_convert(self.tz) return key
https://github.com/pandas-dev/pandas/issues/34077
In [1]: import pandas as pd, datetime as dt In [2]: pd.__version__ Out[2]: '1.0.3' In [3]: a = pd.DatetimeIndex(['2010-01-01', '2010-01-03']) In [4]: a.slice_locs(dt.datetime(2010, 1, 1), dt.datetime(2010, 1, 3)) Out[4]: (0, 2) In [5]: a.slice_locs(dt.datetime(2010, 1, 1), dt.datetime(2010, 1, 2)) Out[5]: (0, 1) In [6]: a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 3)) Out[6]: (0, 2) In [7]: a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 2)) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() TypeError: an integer is required During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2645 try: -> 2646 return self._engine.get_loc(key) 2647 except KeyError: pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine._date_check_type() KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() TypeError: an integer is required During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 714 try: --> 715 return Index.get_loc(self, key, method, tolerance) 716 except (KeyError, ValueError, TypeError): ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2647 except KeyError: -> 2648 return self._engine.get_loc(self._maybe_cast_indexer(key)) 2649 indexer = self.get_indexer([key], method=method, tolerance=tolerance) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine._date_check_type() KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() KeyError: 1262390400000000000 During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2645 try: -> 2646 return self._engine.get_loc(key) 2647 except KeyError: pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() KeyError: Timestamp('2010-01-02 00:00:00') During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() KeyError: 1262390400000000000 During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 727 stamp = stamp.tz_localize(self.tz) --> 728 return Index.get_loc(self, stamp, method, tolerance) 729 except KeyError: ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2647 except KeyError: -> 2648 return self._engine.get_loc(self._maybe_cast_indexer(key)) 2649 indexer = self.get_indexer([key], method=method, tolerance=tolerance) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() KeyError: Timestamp('2010-01-02 00:00:00') During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind) 4840 try: -> 4841 slc = self.get_loc(label) 4842 except KeyError as err: ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 729 except KeyError: --> 730 raise KeyError(key) 731 except ValueError as e: KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-7-f3a1babc1d65> in <module> ----> 1 a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 2)) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in slice_locs(self, start, end, step, kind) 4929 end_slice = None 4930 if end is not None: -> 4931 end_slice = self.get_slice_bound(end, "right", kind) 4932 if end_slice is None: 4933 end_slice = len(self) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind) 4842 except KeyError as err: 4843 try: -> 4844 return self._searchsorted_monotonic(label, side) 4845 except ValueError: 4846 # raise the original KeyError ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in _searchsorted_monotonic(self, label, side) 4793 def _searchsorted_monotonic(self, label, side="left"): 4794 if self.is_monotonic_increasing: -> 4795 return self.searchsorted(label, side=side) 4796 elif self.is_monotonic_decreasing: 4797 # np.searchsorted expects ascending sort order, have to reverse ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in searchsorted(self, value, side, sorter) 858 elif not isinstance(value, DatetimeArray): 859 raise TypeError( --> 860 "searchsorted requires compatible dtype or scalar, " 861 f"not {type(value).__name__}" 862 ) TypeError: searchsorted requires compatible dtype or scalar, not date
TypeError
def _maybe_cast_slice_bound(self, label, side: str, kind): """ If label is a string, cast it to datetime according to resolution. Parameters ---------- label : object side : {'left', 'right'} kind : {'loc', 'getitem'} or None Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ assert kind in ["loc", "getitem", None] if is_float(label) or isinstance(label, time) or is_integer(label): self._invalid_indexer("slice", label) if isinstance(label, str): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) parsed, reso = parsing.parse_time_string(label, freq) reso = Resolution.from_attrname(reso) lower, upper = self._parsed_string_to_bounds(reso, parsed) # lower, upper form the half-open interval: # [parsed, parsed + 1 freq) # because label may be passed to searchsorted # the bounds need swapped if index is reverse sorted and has a # length > 1 (is_monotonic_decreasing gives True for empty # and length 1 index) if self._is_strictly_monotonic_decreasing and len(self) > 1: return upper if side == "left" else lower return lower if side == "left" else upper return self._maybe_cast_for_get_loc(label)
def _maybe_cast_slice_bound(self, label, side: str, kind): """ If label is a string, cast it to datetime according to resolution. Parameters ---------- label : object side : {'left', 'right'} kind : {'loc', 'getitem'} or None Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ assert kind in ["loc", "getitem", None] if is_float(label) or isinstance(label, time) or is_integer(label): self._invalid_indexer("slice", label) if isinstance(label, str): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) parsed, reso = parsing.parse_time_string(label, freq) reso = Resolution.from_attrname(reso) lower, upper = self._parsed_string_to_bounds(reso, parsed) # lower, upper form the half-open interval: # [parsed, parsed + 1 freq) # because label may be passed to searchsorted # the bounds need swapped if index is reverse sorted and has a # length > 1 (is_monotonic_decreasing gives True for empty # and length 1 index) if self._is_strictly_monotonic_decreasing and len(self) > 1: return upper if side == "left" else lower return lower if side == "left" else upper else: return label
https://github.com/pandas-dev/pandas/issues/34077
In [1]: import pandas as pd, datetime as dt In [2]: pd.__version__ Out[2]: '1.0.3' In [3]: a = pd.DatetimeIndex(['2010-01-01', '2010-01-03']) In [4]: a.slice_locs(dt.datetime(2010, 1, 1), dt.datetime(2010, 1, 3)) Out[4]: (0, 2) In [5]: a.slice_locs(dt.datetime(2010, 1, 1), dt.datetime(2010, 1, 2)) Out[5]: (0, 1) In [6]: a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 3)) Out[6]: (0, 2) In [7]: a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 2)) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() TypeError: an integer is required During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2645 try: -> 2646 return self._engine.get_loc(key) 2647 except KeyError: pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine._date_check_type() KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() TypeError: an integer is required During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 714 try: --> 715 return Index.get_loc(self, key, method, tolerance) 716 except (KeyError, ValueError, TypeError): ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2647 except KeyError: -> 2648 return self._engine.get_loc(self._maybe_cast_indexer(key)) 2649 indexer = self.get_indexer([key], method=method, tolerance=tolerance) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine._date_check_type() KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() KeyError: 1262390400000000000 During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2645 try: -> 2646 return self._engine.get_loc(key) 2647 except KeyError: pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() KeyError: Timestamp('2010-01-02 00:00:00') During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.Int64HashTable.get_item() KeyError: 1262390400000000000 During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 727 stamp = stamp.tz_localize(self.tz) --> 728 return Index.get_loc(self, stamp, method, tolerance) 729 except KeyError: ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2647 except KeyError: -> 2648 return self._engine.get_loc(self._maybe_cast_indexer(key)) 2649 indexer = self.get_indexer([key], method=method, tolerance=tolerance) pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.DatetimeEngine.get_loc() KeyError: Timestamp('2010-01-02 00:00:00') During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind) 4840 try: -> 4841 slc = self.get_loc(label) 4842 except KeyError as err: ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in get_loc(self, key, method, tolerance) 729 except KeyError: --> 730 raise KeyError(key) 731 except ValueError as e: KeyError: datetime.date(2010, 1, 2) During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-7-f3a1babc1d65> in <module> ----> 1 a.slice_locs(dt.date(2010, 1, 1), dt.date(2010, 1, 2)) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in slice_locs(self, start, end, step, kind) 4929 end_slice = None 4930 if end is not None: -> 4931 end_slice = self.get_slice_bound(end, "right", kind) 4932 if end_slice is None: 4933 end_slice = len(self) ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind) 4842 except KeyError as err: 4843 try: -> 4844 return self._searchsorted_monotonic(label, side) 4845 except ValueError: 4846 # raise the original KeyError ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/base.py in _searchsorted_monotonic(self, label, side) 4793 def _searchsorted_monotonic(self, label, side="left"): 4794 if self.is_monotonic_increasing: -> 4795 return self.searchsorted(label, side=side) 4796 elif self.is_monotonic_decreasing: 4797 # np.searchsorted expects ascending sort order, have to reverse ~/.conda/envs/build/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in searchsorted(self, value, side, sorter) 858 elif not isinstance(value, DatetimeArray): 859 raise TypeError( --> 860 "searchsorted requires compatible dtype or scalar, " 861 f"not {type(value).__name__}" 862 ) TypeError: searchsorted requires compatible dtype or scalar, not date
TypeError
def get_grouper( obj: FrameOrSeries, key=None, axis: int = 0, level=None, sort: bool = True, observed: bool = False, mutated: bool = False, validate: bool = True, dropna: bool = True, ) -> Tuple["ops.BaseGrouper", List[Hashable], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError("No group keys passed!") else: raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj._get_axis(axis).name != level: raise ValueError( f"level name {level} is not the name " f"of the {obj._get_axis_name(axis)}" ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, [], obj else: return grouper, [key.key], obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): return key, [], obj if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any( isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys ) # is this an index replacement? if ( not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None ): if isinstance(obj, DataFrame): all_in_columns_index = all( g in obj.columns or g in obj.index.names for g in keys ) else: assert isinstance(obj, Series) all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: List[Grouping] = [] exclusions: List[Hashable] = [] # if the actual grouper should be obj[key] def is_in_axis(key) -> bool: if not _is_label_like(key): # items -> .columns for DataFrame, .index for Series items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): # TypeError shows up here if we pass e.g. Int64Index return False return True # if the grouper is obj[name] def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError): # IndexError reached in e.g. test_skip_group_keys when we pass # lambda here return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.append(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) elif obj._is_level_reference(gpr, axis=axis): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.append(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) " "must be same length" ) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = ( Grouping( group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr ) groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") elif len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated) return grouper, exclusions, obj
def get_grouper( obj: FrameOrSeries, key=None, axis: int = 0, level=None, sort: bool = True, observed: bool = False, mutated: bool = False, validate: bool = True, dropna: bool = True, ) -> Tuple["ops.BaseGrouper", List[Hashable], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values. If validate, then check for key/level overlaps. """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError("No group keys passed!") else: raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj._get_axis(axis).name != level: raise ValueError( f"level name {level} is not the name " f"of the {obj._get_axis_name(axis)}" ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, [], obj else: return grouper, [key.key], obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): return key, [], obj if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any( isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys ) # is this an index replacement? if ( not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None ): if isinstance(obj, DataFrame): all_in_columns_index = all( g in obj.columns or g in obj.index.names for g in keys ) else: assert isinstance(obj, Series) all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: List[Grouping] = [] exclusions: List[Hashable] = [] # if the actual grouper should be obj[key] def is_in_axis(key) -> bool: if not _is_label_like(key): # items -> .columns for DataFrame, .index for Series items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): # TypeError shows up here if we pass e.g. Int64Index return False return True # if the grouper is obj[name] def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError, ValueError): # TODO: ValueError: Given date string not likely a datetime. # should be KeyError? return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.append(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) elif obj._is_level_reference(gpr, axis=axis): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.append(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) " "must be same length" ) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = ( Grouping( group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna, ) if not isinstance(gpr, Grouping) else gpr ) groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") elif len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated) return grouper, exclusions, obj
https://github.com/pandas-dev/pandas/issues/34240
In [11]: s = pd.Series([1, 2, 3], index=pd.period_range('2000', periods=3, name='A')) In [12]: s.index.get_loc('A') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-12-3a472f211c46> in <module> ----> 1 s.index.get_loc('A') ~/sandbox/pandas/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance) 501 502 try: --> 503 asdt, reso = parse_time_string(key, self.freq) 504 except DateParseError as err: 505 # A string with invalid format ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_time_string() ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso() ValueError: Given date string not likely a datetime.
ValueError
def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError): # IndexError reached in e.g. test_skip_group_keys when we pass # lambda here return False
def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False try: return gpr is obj[gpr.name] except (KeyError, IndexError, ValueError): # TODO: ValueError: Given date string not likely a datetime. # should be KeyError? return False
https://github.com/pandas-dev/pandas/issues/34240
In [11]: s = pd.Series([1, 2, 3], index=pd.period_range('2000', periods=3, name='A')) In [12]: s.index.get_loc('A') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-12-3a472f211c46> in <module> ----> 1 s.index.get_loc('A') ~/sandbox/pandas/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance) 501 502 try: --> 503 asdt, reso = parse_time_string(key, self.freq) 504 except DateParseError as err: 505 # A string with invalid format ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_time_string() ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso() ValueError: Given date string not likely a datetime.
ValueError
def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label. Parameters ---------- key : Period, NaT, str, or datetime String or datetime key must be parsable as Period. Returns ------- loc : int or ndarray[int64] Raises ------ KeyError Key is not present in the index. TypeError If key is listlike or otherwise not hashable. """ orig_key = key if not is_scalar(key): raise InvalidIndexError(key) if isinstance(key, str): try: loc = self._get_string_slice(key) return loc except (TypeError, ValueError): pass try: asdt, reso = parse_time_string(key, self.freq) except (ValueError, DateParseError) as err: # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err reso = Resolution.from_attrname(reso) grp = reso.freq_group freqn = self.dtype.freq_group # _get_string_slice will handle cases where grp < freqn assert grp >= freqn # BusinessDay is a bit strange. It has a *lower* code, but we never parse # a string as "BusinessDay" resolution, just Day. if grp == freqn or ( reso == Resolution.RESO_DAY and self.dtype.freq.name == "B" ): key = Period(asdt, freq=self.freq) loc = self.get_loc(key, method=method, tolerance=tolerance) return loc elif method is None: raise KeyError(key) else: key = asdt elif is_integer(key): # Period constructor will cast to string, which we dont want raise KeyError(key) try: key = Period(key, freq=self.freq) except ValueError as err: # we cannot construct the Period raise KeyError(orig_key) from err try: return Index.get_loc(self, key, method, tolerance) except KeyError as err: raise KeyError(orig_key) from err
def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label. Parameters ---------- key : Period, NaT, str, or datetime String or datetime key must be parsable as Period. Returns ------- loc : int or ndarray[int64] Raises ------ KeyError Key is not present in the index. TypeError If key is listlike or otherwise not hashable. """ orig_key = key if not is_scalar(key): raise InvalidIndexError(key) if isinstance(key, str): try: loc = self._get_string_slice(key) return loc except (TypeError, ValueError): pass try: asdt, reso = parse_time_string(key, self.freq) except DateParseError as err: # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err reso = Resolution.from_attrname(reso) grp = reso.freq_group freqn = self.dtype.freq_group # _get_string_slice will handle cases where grp < freqn assert grp >= freqn # BusinessDay is a bit strange. It has a *lower* code, but we never parse # a string as "BusinessDay" resolution, just Day. if grp == freqn or ( reso == Resolution.RESO_DAY and self.dtype.freq.name == "B" ): key = Period(asdt, freq=self.freq) loc = self.get_loc(key, method=method, tolerance=tolerance) return loc elif method is None: raise KeyError(key) else: key = asdt elif is_integer(key): # Period constructor will cast to string, which we dont want raise KeyError(key) try: key = Period(key, freq=self.freq) except ValueError as err: # we cannot construct the Period raise KeyError(orig_key) from err try: return Index.get_loc(self, key, method, tolerance) except KeyError as err: raise KeyError(orig_key) from err
https://github.com/pandas-dev/pandas/issues/34240
In [11]: s = pd.Series([1, 2, 3], index=pd.period_range('2000', periods=3, name='A')) In [12]: s.index.get_loc('A') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-12-3a472f211c46> in <module> ----> 1 s.index.get_loc('A') ~/sandbox/pandas/pandas/core/indexes/period.py in get_loc(self, key, method, tolerance) 501 502 try: --> 503 asdt, reso = parse_time_string(key, self.freq) 504 except DateParseError as err: 505 # A string with invalid format ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_time_string() ~/sandbox/pandas/pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.parse_datetime_string_with_reso() ValueError: Given date string not likely a datetime.
ValueError
def _disallow_scalar_only_bool_ops(self): rhs = self.rhs lhs = self.lhs # GH#24883 unwrap dtype if necessary to ensure we have a type object rhs_rt = rhs.return_type rhs_rt = getattr(rhs_rt, "type", rhs_rt) lhs_rt = lhs.return_type lhs_rt = getattr(lhs_rt, "type", lhs_rt) if ( (lhs.is_scalar or rhs.is_scalar) and self.op in _bool_ops_dict and ( not ( issubclass(rhs_rt, (bool, np.bool_)) and issubclass(lhs_rt, (bool, np.bool_)) ) ) ): raise NotImplementedError("cannot evaluate scalar only bool ops")
def _disallow_scalar_only_bool_ops(self): if ( (self.lhs.is_scalar or self.rhs.is_scalar) and self.op in _bool_ops_dict and ( not ( issubclass(self.rhs.return_type, (bool, np.bool_)) and issubclass(self.lhs.return_type, (bool, np.bool_)) ) ) ): raise NotImplementedError("cannot evaluate scalar only bool ops")
https://github.com/pandas-dev/pandas/issues/24883
# Your code here Scenario 1: df2=pd.DataFrame({'a1':[10,20]}) df2.eval("c=((a1>10) &amp; True )") Out[8]: a1 c 0 10 False 1 20 True Scenario 2: df2=pd.DataFrame({'a1':['Y','N']}) df2.eval("c=((a1 == 'Y') &amp; True )") Traceback (most recent call last): TypeError: issubclass() arg 1 must be a class
TypeError
def interpolate( self, method="linear", axis=0, limit=None, inplace=False, limit_direction="forward", limit_area=None, downcast=None, **kwargs, ): """ Interpolate values according to different methods. """ result = self._upsample("asfreq") return result.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs, )
def interpolate( self, method="linear", axis=0, limit=None, inplace=False, limit_direction="forward", limit_area=None, downcast=None, **kwargs, ): """ Interpolate values according to different methods. """ result = self._upsample(None) return result.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs, )
https://github.com/pandas-dev/pandas/issues/35325
TypeError Traceback (most recent call last) ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/groupby.py in apply(self, func, *args, **kwargs) 735 try: --> 736 result = self._python_apply_general(f) 737 except TypeError: ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/groupby.py in _python_apply_general(self, f) 751 def _python_apply_general(self, f): --> 752 keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis) 753 ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/ops.py in apply(self, f, data, axis) 205 group_axes = group.axes --> 206 res = f(group) 207 if not _is_indexed_like(res, group_axes): ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in func(x) 988 --> 989 return x.apply(f, *args, **kwargs) 990 ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in aggregate(self, func, *args, **kwargs) 284 grouper = None --> 285 result = self._groupby_and_aggregate(how, grouper, *args, **kwargs) 286 ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in _groupby_and_aggregate(self, how, grouper, *args, **kwargs) 360 else: --> 361 result = grouped.aggregate(how, *args, **kwargs) 362 except DataError: ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/generic.py in aggregate(self, func, *args, **kwargs) 923 # nicer error message --> 924 raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") 925 TypeError: Must provide 'func' or tuples of '(column, aggfunc). During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-15-5d08bff36e4d> in <module> ----> 1 df = df \ 2 .set_index("week_starting") \ 3 .groupby("volume") \ 4 .resample("1D") \ 5 .interpolate(method="linear") ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in interpolate(self, method, axis, limit, inplace, limit_direction, limit_area, downcast, **kwargs) 797 Interpolate values according to different methods. 798 """ --> 799 result = self._upsample(None) 800 return result.interpolate( 801 method=method, ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in _apply(self, f, grouper, *args, **kwargs) 989 return x.apply(f, *args, **kwargs) 990 --> 991 result = self._groupby.apply(func) 992 return self._wrap_result(result) 993 ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/groupby.py in apply(self, func, *args, **kwargs) 745 746 with _group_selection_context(self): --> 747 return self._python_apply_general(f) 748 749 return result ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/groupby.py in _python_apply_general(self, f) 750 751 def _python_apply_general(self, f): --> 752 keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis) 753 754 return self._wrap_applied_output( ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/ops.py in apply(self, f, data, axis) 204 # group might be modified 205 group_axes = group.axes --> 206 res = f(group) 207 if not _is_indexed_like(res, group_axes): 208 mutated = True ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in func(x) 987 return getattr(x, f)(**kwargs) 988 --> 989 return x.apply(f, *args, **kwargs) 990 991 result = self._groupby.apply(func) ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in aggregate(self, func, *args, **kwargs) 283 how = func 284 grouper = None --> 285 result = self._groupby_and_aggregate(how, grouper, *args, **kwargs) 286 287 result = self._apply_loffset(result) ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/resample.py in _groupby_and_aggregate(self, how, grouper, *args, **kwargs) 359 result = grouped._aggregate_item_by_item(how, *args, **kwargs) 360 else: --> 361 result = grouped.aggregate(how, *args, **kwargs) 362 except DataError: 363 # we have a non-reducing function; try to evaluate ~/.cache/pypoetry/virtualenvs/lagps-6BuXYM4Y-py3.8/lib/python3.8/site-packages/pandas/core/groupby/generic.py in aggregate(self, func, *args, **kwargs) 922 elif func is None: 923 # nicer error message --> 924 raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") 925 926 func = _maybe_mangle_lambdas(func) TypeError: Must provide 'func' or tuples of '(column, aggfunc).
TypeError
def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs ): """ Transform with a non-str `func`. """ if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_transform" ) klass = type(self._selected_obj) results = [] for name, group in self: object.__setattr__(group, "name", name) if maybe_use_numba(engine): values, index = split_for_numba(group) res = numba_func(values, index, *args) if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_func else: res = func(group, *args, **kwargs) if isinstance(res, (ABCDataFrame, ABCSeries)): res = res._values results.append(klass(res, index=group.index)) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat concatenated = concat(results) result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) # we will only try to coerce the result type if # we have a numeric dtype, as these are *always* user-defined funcs # the cython take a different path (and casting) if is_numeric_dtype(result.dtype): common_dtype = find_common_type([self._selected_obj.dtype, result.dtype]) if common_dtype is result.dtype: result = maybe_downcast_numeric(result, self._selected_obj.dtype) result.name = self._selected_obj.name result.index = self._selected_obj.index return result
def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs ): """ Transform with a non-str `func`. """ if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_transform" ) klass = type(self._selected_obj) results = [] for name, group in self: object.__setattr__(group, "name", name) if maybe_use_numba(engine): values, index = split_for_numba(group) res = numba_func(values, index, *args) if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_func else: res = func(group, *args, **kwargs) if isinstance(res, (ABCDataFrame, ABCSeries)): res = res._values indexer = self._get_index(name) ser = klass(res, indexer) results.append(ser) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat result = concat(results).sort_index() else: result = self.obj._constructor(dtype=np.float64) # we will only try to coerce the result type if # we have a numeric dtype, as these are *always* user-defined funcs # the cython take a different path (and casting) dtype = self._selected_obj.dtype if is_numeric_dtype(dtype): result = maybe_downcast_to_dtype(result, dtype) result.name = self._selected_obj.name result.index = self._selected_obj.index return result
https://github.com/pandas-dev/pandas/issues/35014
In [1]: import pandas as pd In [2]: df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}) In [3]: gb = df.groupby("A", dropna=False) In [6]: gb['B'].transform(len) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-6-3bae7d67a46f> in <module> ----> 1 gb['B'].transform(len) ~/sandbox/pandas/pandas/core/groupby/generic.py in transform(self, func, engine, engine_kwargs, *args, **kwargs) 471 if not isinstance(func, str): 472 return self._transform_general( --> 473 func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs 474 ) 475 ~/sandbox/pandas/pandas/core/groupby/generic.py in _transform_general(self, func, engine, engine_kwargs, *args, **kwargs) 537 538 result.name = self._selected_obj.name --> 539 result.index = self._selected_obj.index 540 return result 541 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5141 try: 5142 object.__getattribute__(self, name) -> 5143 return object.__setattr__(self, name, value) 5144 except AttributeError: 5145 pass ~/sandbox/pandas/pandas/_libs/properties.pyx in pandas._libs.properties.AxisProperty.__set__() 64 65 def __set__(self, obj, value): ---> 66 obj._set_axis(self.axis, value) ~/sandbox/pandas/pandas/core/series.py in _set_axis(self, axis, labels, fastpath) 422 if not fastpath: 423 # The ensure_index call above ensures we have an Index object --> 424 self._mgr.set_axis(axis, labels) 425 426 # ndarray compatibility ~/sandbox/pandas/pandas/core/internals/managers.py in set_axis(self, axis, new_labels) 213 if new_len != old_len: 214 raise ValueError( --> 215 f"Length mismatch: Expected axis has {old_len} elements, new " 216 f"values have {new_len} elements" 217 ) ValueError: Length mismatch: Expected axis has 3 elements, new values have 4 elements
ValueError
def __floordiv__(self, other): if is_scalar(other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) if other is NaT: # treat this specifically as timedelta-NaT result = np.empty(self.shape, dtype=np.float64) result.fill(np.nan) return result # dispatch to Timedelta implementation result = other.__rfloordiv__(self._data) return result # at this point we should only have numeric scalars; anything # else will raise result = self.asi8 // other result[self._isnan] = iNaT freq = None if self.freq is not None: # Note: freq gets division, not floor-division freq = self.freq / other if freq.nanos == 0 and self.freq.nanos != 0: # e.g. if self.freq is Nano(1) then dividing by 2 # rounds down to zero freq = None return type(self)(result.view("m8[ns]"), freq=freq) if not hasattr(other, "dtype"): # list, tuple other = np.array(other) if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") elif is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate # on the i8 values result = self.asi8 // other.asi8 mask = self._isnan | other._isnan if mask.any(): result = result.astype(np.float64) result[mask] = np.nan return result elif is_object_dtype(other.dtype): result = [self[n] // other[n] for n in range(len(self))] result = np.array(result) if lib.infer_dtype(result, skipna=False) == "timedelta": result, _ = sequence_to_td64ns(result) return type(self)(result) return result elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype): result = self._data // other return type(self)(result) else: dtype = getattr(other, "dtype", type(other).__name__) raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
def __floordiv__(self, other): if is_scalar(other): if isinstance(other, (timedelta, np.timedelta64, Tick)): other = Timedelta(other) if other is NaT: # treat this specifically as timedelta-NaT result = np.empty(self.shape, dtype=np.float64) result.fill(np.nan) return result # dispatch to Timedelta implementation result = other.__rfloordiv__(self._data) return result # at this point we should only have numeric scalars; anything # else will raise result = self.asi8 // other result[self._isnan] = iNaT freq = None if self.freq is not None: # Note: freq gets division, not floor-division freq = self.freq / other if freq.nanos == 0 and self.freq.nanos != 0: # e.g. if self.freq is Nano(1) then dividing by 2 # rounds down to zero freq = None return type(self)(result.view("m8[ns]"), freq=freq) if not hasattr(other, "dtype"): # list, tuple other = np.array(other) if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") elif is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate # on the i8 values result = self.asi8 // other.asi8 mask = self._isnan | other._isnan if mask.any(): result = result.astype(np.int64) result[mask] = np.nan return result elif is_object_dtype(other.dtype): result = [self[n] // other[n] for n in range(len(self))] result = np.array(result) if lib.infer_dtype(result, skipna=False) == "timedelta": result, _ = sequence_to_td64ns(result) return type(self)(result) return result elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype): result = self._data // other return type(self)(result) else: dtype = getattr(other, "dtype", type(other).__name__) raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")
https://github.com/pandas-dev/pandas/issues/35529
import pandas as pd sr = pd.Series([10, 20, 30], dtype='timedelta64[ns]') sr 0 00:00:00.000000 1 00:00:00.000000 2 00:00:00.000000 dtype: timedelta64[ns] sr = pd.Series([1000, 20, 30], dtype='timedelta64[ns]') sr 0 00:00:00.000001 1 00:00:00.000000 2 00:00:00.000000 dtype: timedelta64[ns] sr = pd.Series([1000, 222330, 30], dtype='timedelta64[ns]') sr 0 00:00:00.000001 1 00:00:00.000222 2 00:00:00.000000 dtype: timedelta64[ns] sr1 = pd.Series([1000, 222330, None], dtype='timedelta64[ns]') sr1 0 00:00:00.000001 1 00:00:00.000222 2 NaT dtype: timedelta64[ns] sr / sr1 0 1.0 1 1.0 2 NaN dtype: float64 sr // sr1 Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/nvme/0/pgali/envs/cudfdev1/lib/python3.7/site-packages/pandas/core/ops/common.py", line 64, in new_method return method(self, other) File "/nvme/0/pgali/envs/cudfdev1/lib/python3.7/site-packages/pandas/core/ops/__init__.py", line 503, in wrapper result = arithmetic_op(lvalues, rvalues, op, str_rep) File "/nvme/0/pgali/envs/cudfdev1/lib/python3.7/site-packages/pandas/core/ops/array_ops.py", line 193, in arithmetic_op res_values = dispatch_to_extension_op(op, lvalues, rvalues) File "/nvme/0/pgali/envs/cudfdev1/lib/python3.7/site-packages/pandas/core/ops/dispatch.py", line 125, in dispatch_to_extension_op res_values = op(left, right) File "/nvme/0/pgali/envs/cudfdev1/lib/python3.7/site-packages/pandas/core/arrays/timedeltas.py", line 637, in __floordiv__ result[mask] = np.nan ValueError: cannot convert float NaN to integer
ValueError
def _generate_range( cls, start, end, periods, freq, tz=None, normalize=False, ambiguous="raise", nonexistent="raise", closed=None, ): periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]): raise ValueError("Must provide freq argument if no data is supplied") if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, " "and freq, exactly three must be specified" ) freq = to_offset(freq) if start is not None: start = Timestamp(start) if end is not None: end = Timestamp(end) if start is NaT or end is NaT: raise ValueError("Neither `start` nor `end` can be NaT") left_closed, right_closed = dtl.validate_endpoints(closed) start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize) tz = _infer_tz_from_endpoints(start, end, tz) if tz is not None: # Localize the start and end arguments start_tz = None if start is None else start.tz end_tz = None if end is None else end.tz start = _maybe_localize_point( start, start_tz, start, freq, tz, ambiguous, nonexistent ) end = _maybe_localize_point(end, end_tz, end, freq, tz, ambiguous, nonexistent) if freq is not None: # We break Day arithmetic (fixed 24 hour) here and opt for # Day to mean calendar day (23/24/25 hour). Therefore, strip # tz info from start and day to avoid DST arithmetic if isinstance(freq, Day): if start is not None: start = start.tz_localize(None) if end is not None: end = end.tz_localize(None) if isinstance(freq, Tick): values = generate_regular_range(start, end, periods, freq) else: xdr = generate_range(start=start, end=end, periods=periods, offset=freq) values = np.array([x.value for x in xdr], dtype=np.int64) _tz = start.tz if start is not None else end.tz index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz)) if tz is not None and index.tz is None: arr = tzconversion.tz_localize_to_utc( index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent ) index = cls(arr) # index is localized datetime64 array -> have to convert # start/end as well to compare if start is not None: start = start.tz_localize(tz, ambiguous, nonexistent).asm8 if end is not None: end = end.tz_localize(tz, ambiguous, nonexistent).asm8 else: # Create a linearly spaced date_range in local time # Nanosecond-granularity timestamps aren't always correctly # representable with doubles, so we limit the range that we # pass to np.linspace as much as possible arr = ( np.linspace(0, end.value - start.value, periods, dtype="int64") + start.value ) dtype = tz_to_dtype(tz) index = cls._simple_new( arr.astype("M8[ns]", copy=False), freq=None, dtype=dtype ) if not left_closed and len(index) and index[0] == start: index = index[1:] if not right_closed and len(index) and index[-1] == end: index = index[:-1] dtype = tz_to_dtype(tz) return cls._simple_new(index.asi8, freq=freq, dtype=dtype)
def _generate_range( cls, start, end, periods, freq, tz=None, normalize=False, ambiguous="raise", nonexistent="raise", closed=None, ): periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]): raise ValueError("Must provide freq argument if no data is supplied") if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, " "and freq, exactly three must be specified" ) freq = to_offset(freq) if start is not None: start = Timestamp(start) if end is not None: end = Timestamp(end) if start is NaT or end is NaT: raise ValueError("Neither `start` nor `end` can be NaT") left_closed, right_closed = dtl.validate_endpoints(closed) start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize) tz = _infer_tz_from_endpoints(start, end, tz) if tz is not None: # Localize the start and end arguments start_tz = None if start is None else start.tz end_tz = None if end is None else end.tz start = _maybe_localize_point( start, start_tz, start, freq, tz, ambiguous, nonexistent ) end = _maybe_localize_point(end, end_tz, end, freq, tz, ambiguous, nonexistent) if freq is not None: # We break Day arithmetic (fixed 24 hour) here and opt for # Day to mean calendar day (23/24/25 hour). Therefore, strip # tz info from start and day to avoid DST arithmetic if isinstance(freq, Day): if start is not None: start = start.tz_localize(None) if end is not None: end = end.tz_localize(None) if isinstance(freq, Tick): values = generate_regular_range(start, end, periods, freq) else: xdr = generate_range(start=start, end=end, periods=periods, offset=freq) values = np.array([x.value for x in xdr], dtype=np.int64) _tz = start.tz if start is not None else end.tz index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz)) if tz is not None and index.tz is None: arr = tzconversion.tz_localize_to_utc( index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent ) index = cls(arr) # index is localized datetime64 array -> have to convert # start/end as well to compare if start is not None: start = start.tz_localize(tz).asm8 if end is not None: end = end.tz_localize(tz).asm8 else: # Create a linearly spaced date_range in local time # Nanosecond-granularity timestamps aren't always correctly # representable with doubles, so we limit the range that we # pass to np.linspace as much as possible arr = ( np.linspace(0, end.value - start.value, periods, dtype="int64") + start.value ) dtype = tz_to_dtype(tz) index = cls._simple_new( arr.astype("M8[ns]", copy=False), freq=None, dtype=dtype ) if not left_closed and len(index) and index[0] == start: index = index[1:] if not right_closed and len(index) and index[-1] == end: index = index[:-1] dtype = tz_to_dtype(tz) return cls._simple_new(index.asi8, freq=freq, dtype=dtype)
https://github.com/pandas-dev/pandas/issues/35297
import pandas as pd timezone = 'America/New_York' start = pd.Timestamp(year=2020, month=11, day=1, hour=1).tz_localize(timezone, ambiguous=False) pd.date_range(start, periods=2, ambiguous=False) --------------------------------------------------------------------------- AmbiguousTimeError Traceback (most recent call last) <ipython-input-5-d142fd70e406> in <module> 2 timezone = 'America/New_York' 3 start = pd.Timestamp(year=2020, month=11, day=1, hour=1).tz_localize(timezone, ambiguous=False) ----> 4 pd.date_range(start, periods=2, ambiguous=False) ~/anaconda3/envs/oids_tst/lib/python3.6/site-packages/pandas/core/indexes/datetimes.py in date_range(start, end, periods, freq, tz, normalize, name, closed, **kwargs) 1178 normalize=normalize, 1179 closed=closed, -> 1180 **kwargs, 1181 ) 1182 return DatetimeIndex._simple_new(dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name) ~/anaconda3/envs/oids_tst/lib/python3.6/site-packages/pandas/core/arrays/datetimes.py in _generate_range(cls, start, end, periods, freq, tz, normalize, ambiguous, nonexistent, closed) 419 # start/end as well to compare 420 if start is not None: --> 421 start = start.tz_localize(tz).asm8 422 if end is not None: 423 end = end.tz_localize(tz).asm8 pandas/_libs/tslibs/timestamps.pyx in pandas._libs.tslibs.timestamps.Timestamp.tz_localize() pandas/_libs/tslibs/tzconversion.pyx in pandas._libs.tslibs.tzconversion.tz_localize_to_utc() AmbiguousTimeError: Cannot infer dst time from 2020-11-01 01:00:00, try using the 'ambiguous' argument
AmbiguousTimeError
def _extract_multi_indexer_columns( self, header, index_names, col_names, passed_names=False ): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: return header[0], index_names, col_names, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) # clean the index_names index_names = header.pop(-1) index_names, names, index_col = _clean_index_names( index_names, self.index_col, self.unnamed_cols ) # extract the columns field_count = len(header[0]) def extract(r): return tuple(r[i] for i in range(field_count) if i not in sic) columns = list(zip(*(extract(r) for r in header))) names = ic + columns # If we find unnamed columns all in a single # level, then our header was too long. for n in range(len(columns[0])): if all(ensure_str(col[n]) in self.unnamed_cols for col in columns): header = ",".join(str(x) for x in self.header) raise ParserError( f"Passed header=[{header}] are too many rows " "for this multi_index of columns" ) # Clean the column names (if we have an index_col). if len(ic): col_names = [ r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None for r in header ] else: col_names = [None] * len(header) passed_names = True return names, index_names, col_names, passed_names
def _extract_multi_indexer_columns( self, header, index_names, col_names, passed_names=False ): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: return header[0], index_names, col_names, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) # clean the index_names index_names = header.pop(-1) index_names, names, index_col = _clean_index_names( index_names, self.index_col, self.unnamed_cols ) # extract the columns field_count = len(header[0]) def extract(r): return tuple(r[i] for i in range(field_count) if i not in sic) columns = list(zip(*(extract(r) for r in header))) names = ic + columns # If we find unnamed columns all in a single # level, then our header was too long. for n in range(len(columns[0])): if all(ensure_str(col[n]) in self.unnamed_cols for col in columns): header = ",".join(str(x) for x in self.header) raise ParserError( f"Passed header=[{header}] are too many rows " "for this multi_index of columns" ) # Clean the column names (if we have an index_col). if len(ic): col_names = [ r[0] if (len(r[0]) and r[0] not in self.unnamed_cols) else None for r in header ] else: col_names = [None] * len(header) passed_names = True return names, index_names, col_names, passed_names
https://github.com/pandas-dev/pandas/issues/34748
Traceback (most recent call last): File "test.py", line 3, in <module> pd.read_excel('test.xlsx', header=[0, 1], index_col=0, engine='xlrd') File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 334, in read_excel **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 888, in parse **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 512, in parse **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 2201, in TextParser return TextFileReader(*args, **kwds) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 880, in __init__ self._make_engine(self.engine) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 1126, in _make_engine self._engine = klass(self.f, **self.options) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 2298, in __init__ self.columns, self.index_names, self.col_names File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 1508, in _extract_multi_indexer_columns for r in header File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/parsers.py", line 1508, in <listcomp> for r in header TypeError: object of type 'datetime.datetime' has no len()
TypeError
def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ data = filepath_or_buffer exists = False if isinstance(data, str): try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass if exists or self.compression is not None: data, _ = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, ) self.should_close = True self.open_stream = data if isinstance(data, BytesIO): data = data.getvalue().decode() return data
def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ data = filepath_or_buffer exists = False if isinstance(data, str): try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass if exists or self.compression is not None: data, _ = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, ) self.should_close = True self.open_stream = data return data
https://github.com/pandas-dev/pandas/issues/27135
# output when using fileurl: TypeError Traceback (most recent call last) <ipython-input-82-605ab8a466fd> in <module> ----> 1 for chunk in reader: 2 print(chunk) 3 ~/anaconda3/envs/test_latest_pandas_json/lib/python3.7/site-packages/pandas/io/json/json.py in __next__(self) 579 lines = list(islice(self.data, self.chunksize)) 580 if lines: --> 581 lines_json = self._combine_lines(lines) 582 obj = self._get_object_parser(lines_json) 583 ~/anaconda3/envs/test_latest_pandas_json/lib/python3.7/site-packages/pandas/io/json/json.py in _combine_lines(self, lines) 520 """ 521 lines = filter(None, map(lambda x: x.strip(), lines)) --> 522 return '[' + ','.join(lines) + ']' 523 524 def read(self): TypeError: sequence item 0: expected str instance, bytes found
TypeError
def append_to_multiple( self, d: Dict, value, selector, data_columns=None, axes=None, dropna=False, **kwargs, ): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError( "axes is currently not accepted as a parameter to append_to_multiple; " "you can create the tables independently instead" ) if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values: List = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how="all").index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] min_itemsize = kwargs.pop("min_itemsize", None) # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) filtered = ( {key: value for (key, value) in min_itemsize.items() if key in v} if min_itemsize is not None else None ) self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def append_to_multiple( self, d: Dict, value, selector, data_columns=None, axes=None, dropna=False, **kwargs, ): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError( "axes is currently not accepted as a parameter to append_to_multiple; " "you can create the tables independently instead" ) if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values: List = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how="all").index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs)
https://github.com/pandas-dev/pandas/issues/11238
store.append_to_multiple({ ... 'index': ["IX"], ... 'nums': ["Num", "BigNum", "RandNum"], ... "strs": ["Str", "LongStr"] ... }, d.iloc[[0]], 'index', min_itemsize={"Str": 10, "LongStr": 100}) Traceback (most recent call last): File "<pyshell#52>", line 5, in <module> }, d.iloc[[0]], 'index', min_itemsize={"Str": 10, "LongStr": 100}) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 1002, in append_to_multiple self.append(k, val, data_columns=dc, **kwargs) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 920, in append **kwargs) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 1265, in _write_to_group s.write(obj=value, append=append, complib=complib, **kwargs) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 3773, in write **kwargs) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 3460, in create_axes self.validate_min_itemsize(min_itemsize) File "c:\users\brenbarn\documents\python\extensions\pandas\pandas\io\pytables.py", line 3101, in validate_min_itemsize "data_column" % k) ValueError: min_itemsize has the key [LongStr] which is not an axis or data_column
ValueError
def setup(self, orient, frame): N = 10**5 ncols = 5 index = date_range("20000101", periods=N, freq="H") timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) longints = sys.maxsize * np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index) self.df_td_int_ts = DataFrame( { "td_1": timedeltas, "td_2": timedeltas, "int_1": ints, "int_2": ints, "ts_1": datetimes, "ts_2": datetimes, }, index=index, ) self.df_int_floats = DataFrame( { "int_1": ints, "int_2": ints, "int_3": ints, "float_1": floats, "float_2": floats, "float_3": floats, }, index=index, ) self.df_int_float_str = DataFrame( { "int_1": ints, "int_2": ints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, ) self.df_longint_float_str = DataFrame( { "longint_1": longints, "longint_2": longints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, )
def setup(self, orient, frame): N = 10**5 ncols = 5 index = date_range("20000101", periods=N, freq="H") timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index) self.df_td_int_ts = DataFrame( { "td_1": timedeltas, "td_2": timedeltas, "int_1": ints, "int_2": ints, "ts_1": datetimes, "ts_2": datetimes, }, index=index, ) self.df_int_floats = DataFrame( { "int_1": ints, "int_2": ints, "int_3": ints, "float_1": floats, "float_2": floats, "float_3": floats, }, index=index, ) self.df_int_float_str = DataFrame( { "int_1": ints, "int_2": ints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, )
https://github.com/pandas-dev/pandas/issues/34395
dumps(sys.maxsize) '9223372036854775807' dumps(sys.maxsize + 1, default_handler=str) Traceback (most recent call last): File "<stdin>", line 1, in <module> OverflowError: int too big to convert
OverflowError
def setup(self): N = 10**5 ncols = 5 index = date_range("20000101", periods=N, freq="H") timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) longints = sys.maxsize * np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index) self.df_td_int_ts = DataFrame( { "td_1": timedeltas, "td_2": timedeltas, "int_1": ints, "int_2": ints, "ts_1": datetimes, "ts_2": datetimes, }, index=index, ) self.df_int_floats = DataFrame( { "int_1": ints, "int_2": ints, "int_3": ints, "float_1": floats, "float_2": floats, "float_3": floats, }, index=index, ) self.df_int_float_str = DataFrame( { "int_1": ints, "int_2": ints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, ) self.df_longint_float_str = DataFrame( { "longint_1": longints, "longint_2": longints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, )
def setup(self): N = 10**5 ncols = 5 index = date_range("20000101", periods=N, freq="H") timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index) self.df_td_int_ts = DataFrame( { "td_1": timedeltas, "td_2": timedeltas, "int_1": ints, "int_2": ints, "ts_1": datetimes, "ts_2": datetimes, }, index=index, ) self.df_int_floats = DataFrame( { "int_1": ints, "int_2": ints, "int_3": ints, "float_1": floats, "float_2": floats, "float_3": floats, }, index=index, ) self.df_int_float_str = DataFrame( { "int_1": ints, "int_2": ints, "float_1": floats, "float_2": floats, "str_1": strings, "str_2": strings, }, index=index, )
https://github.com/pandas-dev/pandas/issues/34395
dumps(sys.maxsize) '9223372036854775807' dumps(sys.maxsize + 1, default_handler=str) Traceback (most recent call last): File "<stdin>", line 1, in <module> OverflowError: int too big to convert
OverflowError
def to_csv( self, path_or_buf: Optional[FilePathOrBuffer] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, columns: Optional[Sequence[Label]] = None, header: Union[bool_t, List[str]] = True, index: bool_t = True, index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, mode: str = "w", encoding: Optional[str] = None, compression: Optional[Union[str, Mapping[str, str]]] = "infer", quoting: Optional[int] = None, quotechar: str = '"', line_terminator: Optional[str] = None, chunksize: Optional[int] = None, date_format: Optional[str] = None, doublequote: bool_t = True, escapechar: Optional[str] = None, decimal: Optional[str] = ".", errors: str = "strict", ) -> Optional[str]: r""" Write object to a comma-separated values (csv) file. .. versionchanged:: 0.24.0 The order of arguments for Series was changed. Parameters ---------- path_or_buf : str or file handle, default None File path or object, if None is provided the result is returned as a string. If a file object is passed it should be opened with `newline=''`, disabling universal newlines. .. versionchanged:: 0.24.0 Was previously named "path" for Series. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. .. versionchanged:: 0.24.0 Previously defaulted to False for Series. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. compression : str or dict, default 'infer' If str, represents compression mode. If dict, value at 'method' is the compression mode. Compression mode may be any of the following possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `path_or_buf` is path-like, then detect compression mode from the following extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given and mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip' and 'bz2' as well as 'zip'. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. line_terminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\n' for linux, '\r\n' for Windows, i.e.). .. versionchanged:: 0.24.0 chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter( df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal, ) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() return None
def to_csv( self, path_or_buf: Optional[FilePathOrBuffer] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, columns: Optional[Sequence[Label]] = None, header: Union[bool_t, List[str]] = True, index: bool_t = True, index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, mode: str = "w", encoding: Optional[str] = None, compression: Optional[Union[str, Mapping[str, str]]] = "infer", quoting: Optional[int] = None, quotechar: str = '"', line_terminator: Optional[str] = None, chunksize: Optional[int] = None, date_format: Optional[str] = None, doublequote: bool_t = True, escapechar: Optional[str] = None, decimal: Optional[str] = ".", ) -> Optional[str]: r""" Write object to a comma-separated values (csv) file. .. versionchanged:: 0.24.0 The order of arguments for Series was changed. Parameters ---------- path_or_buf : str or file handle, default None File path or object, if None is provided the result is returned as a string. If a file object is passed it should be opened with `newline=''`, disabling universal newlines. .. versionchanged:: 0.24.0 Was previously named "path" for Series. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. .. versionchanged:: 0.24.0 Previously defaulted to False for Series. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. compression : str or dict, default 'infer' If str, represents compression mode. If dict, value at 'method' is the compression mode. Compression mode may be any of the following possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `path_or_buf` is path-like, then detect compression mode from the following extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given and mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip' and 'bz2' as well as 'zip'. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. line_terminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\n' for linux, '\r\n' for Windows, i.e.). .. versionchanged:: 0.24.0 chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter( df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal, ) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() return None
https://github.com/pandas-dev/pandas/issues/22610
--------------------------------------------------------------------------- UnicodeEncodeError Traceback (most recent call last) <ipython-input-50-769583baba38> in <module>() 4 srs = pd.Series() 5 srs.loc[ 0 ] = s ----> 6 srs.to_csv('testcase.csv') /opt/conda/lib/python3.6/site-packages/pandas/core/series.py in to_csv(self, path, index, sep, na_rep, float_format, header, index_label, mode, encoding, compression, date_format, decimal) 3779 index_label=index_label, mode=mode, 3780 encoding=encoding, compression=compression, -> 3781 date_format=date_format, decimal=decimal) 3782 if path is None: 3783 return result /opt/conda/lib/python3.6/site-packages/pandas/core/frame.py in to_csv(self, path_or_buf, sep, na_rep, float_format, columns, header, index, index_label, mode, encoding, compression, quoting, quotechar, line_terminator, chunksize, tupleize_cols, date_format, doublequote, escapechar, decimal) 1743 doublequote=doublequote, 1744 escapechar=escapechar, decimal=decimal) -> 1745 formatter.save() 1746 1747 if path_or_buf is None: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in save(self) 169 self.writer = UnicodeWriter(f, **writer_kwargs) 170 --> 171 self._save() 172 173 finally: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save(self) 284 break 285 --> 286 self._save_chunk(start_i, end_i) 287 288 def _save_chunk(self, start_i, end_i): /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save_chunk(self, start_i, end_i) 311 312 libwriters.write_csv_rows(self.data, ix, self.nlevels, --> 313 self.cols, self.writer) pandas/_libs/writers.pyx in pandas._libs.writers.write_csv_rows() UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 2: surrogates not allowed
UnicodeEncodeError
def get_handle( path_or_buf, mode: str, encoding=None, compression: Optional[Union[str, Mapping[str, Any]]] = None, memory_map: bool = False, is_text: bool = True, errors=None, ): """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. compression : str or dict, default None If string, specifies compression mode. If dict, value at key 'method' specifies compression mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip' and 'bz2' as well as 'zip'. memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True whether file/buffer is in text format (csv, json, etc.), or in binary mode (pickle, etc.). errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 Returns ------- f : file-like A file-like object. handles : list of file-like objects A list of file-like object that were opened in this function. """ need_text_wrapping: Tuple[Type["IOBase"], ...] try: from s3fs import S3File need_text_wrapping = (BufferedIOBase, RawIOBase, S3File) except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) handles: List[IO] = list() f = path_or_buf # Convert pathlib.Path/py.path.local or string path_or_buf = stringify_path(path_or_buf) is_path = isinstance(path_or_buf, str) compression, compression_args = get_compression_method(compression) if is_path: compression = infer_compression(path_or_buf, compression) if compression: # GH33398 the type ignores here seem related to mypy issue #5382; # it may be possible to remove them once that is resolved. # GZ Compression if compression == "gzip": if is_path: f = gzip.open( path_or_buf, mode, **compression_args, # type: ignore ) else: f = gzip.GzipFile( fileobj=path_or_buf, **compression_args, # type: ignore ) # BZ Compression elif compression == "bz2": if is_path: f = bz2.BZ2File( path_or_buf, mode, **compression_args, # type: ignore ) else: f = bz2.BZ2File(path_or_buf, **compression_args) # type: ignore # ZIP Compression elif compression == "zip": zf = _BytesZipFile(path_or_buf, mode, **compression_args) # Ensure the container is closed as well. handles.append(zf) if zf.mode == "w": f = zf elif zf.mode == "r": zip_names = zf.namelist() if len(zip_names) == 1: f = zf.open(zip_names.pop()) elif len(zip_names) == 0: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # XZ Compression elif compression == "xz": f = _get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) handles.append(f) elif is_path: if encoding: # Encoding f = open(path_or_buf, mode, encoding=encoding, errors=errors, newline="") elif is_text: # No explicit encoding f = open(path_or_buf, mode, errors="replace", newline="") else: # Binary mode f = open(path_or_buf, mode) handles.append(f) # Convert BytesIO or file objects passed with an encoding if is_text and (compression or isinstance(f, need_text_wrapping)): from io import TextIOWrapper g = TextIOWrapper(f, encoding=encoding, errors=errors, newline="") if not isinstance(f, (BufferedIOBase, RawIOBase)): handles.append(g) f = g if memory_map and hasattr(f, "fileno"): try: wrapped = _MMapWrapper(f) f.close() f = wrapped except Exception: # we catch any errors that may have occurred # because that is consistent with the lower-level # functionality of the C engine (pd.read_csv), so # leave the file handler as is then pass return f, handles
def get_handle( path_or_buf, mode: str, encoding=None, compression: Optional[Union[str, Mapping[str, Any]]] = None, memory_map: bool = False, is_text: bool = True, ): """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. compression : str or dict, default None If string, specifies compression mode. If dict, value at key 'method' specifies compression mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip' and 'bz2' as well as 'zip'. memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True whether file/buffer is in text format (csv, json, etc.), or in binary mode (pickle, etc.). Returns ------- f : file-like A file-like object. handles : list of file-like objects A list of file-like object that were opened in this function. """ need_text_wrapping: Tuple[Type["IOBase"], ...] try: from s3fs import S3File need_text_wrapping = (BufferedIOBase, RawIOBase, S3File) except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) handles: List[IO] = list() f = path_or_buf # Convert pathlib.Path/py.path.local or string path_or_buf = stringify_path(path_or_buf) is_path = isinstance(path_or_buf, str) compression, compression_args = get_compression_method(compression) if is_path: compression = infer_compression(path_or_buf, compression) if compression: # GH33398 the type ignores here seem related to mypy issue #5382; # it may be possible to remove them once that is resolved. # GZ Compression if compression == "gzip": if is_path: f = gzip.open( path_or_buf, mode, **compression_args, # type: ignore ) else: f = gzip.GzipFile( fileobj=path_or_buf, **compression_args, # type: ignore ) # BZ Compression elif compression == "bz2": if is_path: f = bz2.BZ2File( path_or_buf, mode, **compression_args, # type: ignore ) else: f = bz2.BZ2File(path_or_buf, **compression_args) # type: ignore # ZIP Compression elif compression == "zip": zf = _BytesZipFile(path_or_buf, mode, **compression_args) # Ensure the container is closed as well. handles.append(zf) if zf.mode == "w": f = zf elif zf.mode == "r": zip_names = zf.namelist() if len(zip_names) == 1: f = zf.open(zip_names.pop()) elif len(zip_names) == 0: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # XZ Compression elif compression == "xz": f = _get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) handles.append(f) elif is_path: if encoding: # Encoding f = open(path_or_buf, mode, encoding=encoding, newline="") elif is_text: # No explicit encoding f = open(path_or_buf, mode, errors="replace", newline="") else: # Binary mode f = open(path_or_buf, mode) handles.append(f) # Convert BytesIO or file objects passed with an encoding if is_text and (compression or isinstance(f, need_text_wrapping)): from io import TextIOWrapper g = TextIOWrapper(f, encoding=encoding, newline="") if not isinstance(f, (BufferedIOBase, RawIOBase)): handles.append(g) f = g if memory_map and hasattr(f, "fileno"): try: wrapped = _MMapWrapper(f) f.close() f = wrapped except Exception: # we catch any errors that may have occurred # because that is consistent with the lower-level # functionality of the C engine (pd.read_csv), so # leave the file handler as is then pass return f, handles
https://github.com/pandas-dev/pandas/issues/22610
--------------------------------------------------------------------------- UnicodeEncodeError Traceback (most recent call last) <ipython-input-50-769583baba38> in <module>() 4 srs = pd.Series() 5 srs.loc[ 0 ] = s ----> 6 srs.to_csv('testcase.csv') /opt/conda/lib/python3.6/site-packages/pandas/core/series.py in to_csv(self, path, index, sep, na_rep, float_format, header, index_label, mode, encoding, compression, date_format, decimal) 3779 index_label=index_label, mode=mode, 3780 encoding=encoding, compression=compression, -> 3781 date_format=date_format, decimal=decimal) 3782 if path is None: 3783 return result /opt/conda/lib/python3.6/site-packages/pandas/core/frame.py in to_csv(self, path_or_buf, sep, na_rep, float_format, columns, header, index, index_label, mode, encoding, compression, quoting, quotechar, line_terminator, chunksize, tupleize_cols, date_format, doublequote, escapechar, decimal) 1743 doublequote=doublequote, 1744 escapechar=escapechar, decimal=decimal) -> 1745 formatter.save() 1746 1747 if path_or_buf is None: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in save(self) 169 self.writer = UnicodeWriter(f, **writer_kwargs) 170 --> 171 self._save() 172 173 finally: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save(self) 284 break 285 --> 286 self._save_chunk(start_i, end_i) 287 288 def _save_chunk(self, start_i, end_i): /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save_chunk(self, start_i, end_i) 311 312 libwriters.write_csv_rows(self.data, ix, self.nlevels, --> 313 self.cols, self.writer) pandas/_libs/writers.pyx in pandas._libs.writers.write_csv_rows() UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 2: surrogates not allowed
UnicodeEncodeError
def __init__( self, obj, path_or_buf: Optional[FilePathOrBuffer[str]] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, cols=None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", compression: Union[str, Mapping[str, str], None] = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, quotechar='"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, decimal=".", ): self.obj = obj if path_or_buf is None: path_or_buf = StringIO() # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode ) self.sep = sep self.na_rep = na_rep self.float_format = float_format self.decimal = decimal self.header = header self.index = index self.index_label = index_label self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.errors = errors self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL self.quoting = quoting if quoting == csvlib.QUOTE_NONE: # prevents crash in _csv quotechar = None self.quotechar = quotechar self.doublequote = doublequote self.escapechar = escapechar self.line_terminator = line_terminator or os.linesep self.date_format = date_format self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) # validate mi options if self.has_mi_columns: if cols is not None: raise TypeError("cannot specify cols with a MultiIndex on the columns") if cols is not None: if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) # save it self.cols = cols # preallocate data 2d list ncols = self.obj.shape[-1] self.data = [None] * ncols if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 self.chunksize = int(chunksize) self.data_index = obj.index if ( isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None ): from pandas import Index self.data_index = Index( [x.strftime(date_format) if notna(x) else "" for x in self.data_index] ) self.nlevels = getattr(self.data_index, "nlevels", 1) if not index: self.nlevels = 0
def __init__( self, obj, path_or_buf: Optional[FilePathOrBuffer[str]] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, cols=None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, mode: str = "w", encoding: Optional[str] = None, compression: Union[str, Mapping[str, str], None] = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, quotechar='"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, decimal=".", ): self.obj = obj if path_or_buf is None: path_or_buf = StringIO() # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode ) self.sep = sep self.na_rep = na_rep self.float_format = float_format self.decimal = decimal self.header = header self.index = index self.index_label = index_label self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL self.quoting = quoting if quoting == csvlib.QUOTE_NONE: # prevents crash in _csv quotechar = None self.quotechar = quotechar self.doublequote = doublequote self.escapechar = escapechar self.line_terminator = line_terminator or os.linesep self.date_format = date_format self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) # validate mi options if self.has_mi_columns: if cols is not None: raise TypeError("cannot specify cols with a MultiIndex on the columns") if cols is not None: if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) # save it self.cols = cols # preallocate data 2d list ncols = self.obj.shape[-1] self.data = [None] * ncols if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 self.chunksize = int(chunksize) self.data_index = obj.index if ( isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None ): from pandas import Index self.data_index = Index( [x.strftime(date_format) if notna(x) else "" for x in self.data_index] ) self.nlevels = getattr(self.data_index, "nlevels", 1) if not index: self.nlevels = 0
https://github.com/pandas-dev/pandas/issues/22610
--------------------------------------------------------------------------- UnicodeEncodeError Traceback (most recent call last) <ipython-input-50-769583baba38> in <module>() 4 srs = pd.Series() 5 srs.loc[ 0 ] = s ----> 6 srs.to_csv('testcase.csv') /opt/conda/lib/python3.6/site-packages/pandas/core/series.py in to_csv(self, path, index, sep, na_rep, float_format, header, index_label, mode, encoding, compression, date_format, decimal) 3779 index_label=index_label, mode=mode, 3780 encoding=encoding, compression=compression, -> 3781 date_format=date_format, decimal=decimal) 3782 if path is None: 3783 return result /opt/conda/lib/python3.6/site-packages/pandas/core/frame.py in to_csv(self, path_or_buf, sep, na_rep, float_format, columns, header, index, index_label, mode, encoding, compression, quoting, quotechar, line_terminator, chunksize, tupleize_cols, date_format, doublequote, escapechar, decimal) 1743 doublequote=doublequote, 1744 escapechar=escapechar, decimal=decimal) -> 1745 formatter.save() 1746 1747 if path_or_buf is None: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in save(self) 169 self.writer = UnicodeWriter(f, **writer_kwargs) 170 --> 171 self._save() 172 173 finally: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save(self) 284 break 285 --> 286 self._save_chunk(start_i, end_i) 287 288 def _save_chunk(self, start_i, end_i): /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save_chunk(self, start_i, end_i) 311 312 libwriters.write_csv_rows(self.data, ix, self.nlevels, --> 313 self.cols, self.writer) pandas/_libs/writers.pyx in pandas._libs.writers.write_csv_rows() UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 2: surrogates not allowed
UnicodeEncodeError
def save(self) -> None: """ Create the writer & save. """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, "write"): warnings.warn( "compression has no effect when passing file-like object as input.", RuntimeWarning, stacklevel=2, ) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, "write") and self.compression == "zip" ) if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, "write"): f = self.path_or_buf close = False else: f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, errors=self.errors, compression=dict(self.compression_args, method=self.compression), ) close = True try: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( f, lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, ) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, "write"): self.path_or_buf.write(buf) else: compression = dict(self.compression_args, method=self.compression) f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, errors=self.errors, compression=compression, ) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close() elif self.should_close: f.close()
def save(self) -> None: """ Create the writer & save. """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, "write"): warnings.warn( "compression has no effect when passing file-like object as input.", RuntimeWarning, stacklevel=2, ) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, "write") and self.compression == "zip" ) if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, "write"): f = self.path_or_buf close = False else: f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=dict(self.compression_args, method=self.compression), ) close = True try: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( f, lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, ) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, "write"): self.path_or_buf.write(buf) else: compression = dict(self.compression_args, method=self.compression) f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=compression, ) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close() elif self.should_close: f.close()
https://github.com/pandas-dev/pandas/issues/22610
--------------------------------------------------------------------------- UnicodeEncodeError Traceback (most recent call last) <ipython-input-50-769583baba38> in <module>() 4 srs = pd.Series() 5 srs.loc[ 0 ] = s ----> 6 srs.to_csv('testcase.csv') /opt/conda/lib/python3.6/site-packages/pandas/core/series.py in to_csv(self, path, index, sep, na_rep, float_format, header, index_label, mode, encoding, compression, date_format, decimal) 3779 index_label=index_label, mode=mode, 3780 encoding=encoding, compression=compression, -> 3781 date_format=date_format, decimal=decimal) 3782 if path is None: 3783 return result /opt/conda/lib/python3.6/site-packages/pandas/core/frame.py in to_csv(self, path_or_buf, sep, na_rep, float_format, columns, header, index, index_label, mode, encoding, compression, quoting, quotechar, line_terminator, chunksize, tupleize_cols, date_format, doublequote, escapechar, decimal) 1743 doublequote=doublequote, 1744 escapechar=escapechar, decimal=decimal) -> 1745 formatter.save() 1746 1747 if path_or_buf is None: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in save(self) 169 self.writer = UnicodeWriter(f, **writer_kwargs) 170 --> 171 self._save() 172 173 finally: /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save(self) 284 break 285 --> 286 self._save_chunk(start_i, end_i) 287 288 def _save_chunk(self, start_i, end_i): /opt/conda/lib/python3.6/site-packages/pandas/io/formats/csvs.py in _save_chunk(self, start_i, end_i) 311 312 libwriters.write_csv_rows(self.data, ix, self.nlevels, --> 313 self.cols, self.writer) pandas/_libs/writers.pyx in pandas._libs.writers.write_csv_rows() UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 2: surrogates not allowed
UnicodeEncodeError
def _where( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=False, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join="right", broadcast_axis=1) else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond # try to align with other try_quick = True if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: _, other = self.align( other, join="left", axis=axis, level=level, fill_value=np.nan ) # if we are NOT aligned, raise as we cannot where index if axis is None and not all( other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes) ): raise InvalidIndexError # slice me out of the other else: raise NotImplementedError("cannot align with a higher dimensional NDFrame") if isinstance(other, np.ndarray): if other.shape != self.shape: if self.ndim == 1: icond = cond._values # GH 2745 / GH 4192 # treat like a scalar if len(other) == 1: other = other[0] # GH 3235 # match True cond to other elif len(cond[icond]) == len(other): # try to not change dtype at first (if try_quick) if try_quick: new_other = np.asarray(self) new_other = new_other.copy() new_other[icond] = other other = new_other else: raise ValueError("Length of replacements must equal series length") else: raise ValueError("other must be the same shape as self when an ndarray") # we are the same shape, so create an actual object for alignment else: other = self._constructor(other, **self._construct_axes_dict()) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if align and isinstance(other, NDFrame): other = other.reindex(self._info_axis, axis=self._info_axis_number) if isinstance(cond, NDFrame): cond = cond.reindex(self._info_axis, axis=self._info_axis_number) block_axis = self._get_block_manager_axis(axis) if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask( mask=cond, new=other, align=align, axis=block_axis, ) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, errors=errors, try_cast=try_cast, axis=block_axis, ) result = self._constructor(new_data) return result.__finalize__(self)
def _where( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=False, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join="right", broadcast_axis=1) else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) elif not cond.empty: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) else: # GH#21947 we have an empty DataFrame, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond # try to align with other try_quick = True if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: _, other = self.align( other, join="left", axis=axis, level=level, fill_value=np.nan ) # if we are NOT aligned, raise as we cannot where index if axis is None and not all( other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes) ): raise InvalidIndexError # slice me out of the other else: raise NotImplementedError("cannot align with a higher dimensional NDFrame") if isinstance(other, np.ndarray): if other.shape != self.shape: if self.ndim == 1: icond = cond._values # GH 2745 / GH 4192 # treat like a scalar if len(other) == 1: other = other[0] # GH 3235 # match True cond to other elif len(cond[icond]) == len(other): # try to not change dtype at first (if try_quick) if try_quick: new_other = np.asarray(self) new_other = new_other.copy() new_other[icond] = other other = new_other else: raise ValueError("Length of replacements must equal series length") else: raise ValueError("other must be the same shape as self when an ndarray") # we are the same shape, so create an actual object for alignment else: other = self._constructor(other, **self._construct_axes_dict()) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if align and isinstance(other, NDFrame): other = other.reindex(self._info_axis, axis=self._info_axis_number) if isinstance(cond, NDFrame): cond = cond.reindex(self._info_axis, axis=self._info_axis_number) block_axis = self._get_block_manager_axis(axis) if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask( mask=cond, new=other, align=align, axis=block_axis, ) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, errors=errors, try_cast=try_cast, axis=block_axis, ) result = self._constructor(new_data) return result.__finalize__(self)
https://github.com/pandas-dev/pandas/issues/34592
pd.Series([], dtype=float).where([]) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/nix/store/vqd2pmsja84h36wz6c878bgbpz46cy2h-python3.7-pandas-1.0.3/lib/python3.7/site-packages/pandas/core/generic.py", line 8919, in where cond, other, inplace, axis, level, errors=errors, try_cast=try_cast File "/nix/store/vqd2pmsja84h36wz6c878bgbpz46cy2h-python3.7-pandas-1.0.3/lib/python3.7/site-packages/pandas/core/generic.py", line 8673, in _where raise ValueError(msg.format(dtype=cond.dtype)) ValueError: Boolean array expected for the condition, not float64
ValueError
def _is_potential_multi_index( columns, index_col: Optional[Union[bool, Sequence[int]]] = None ): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex index_col : None, bool or list, optional Column or columns to use as the (possibly hierarchical) index Returns ------- boolean : Whether or not columns could become a MultiIndex """ if index_col is None or isinstance(index_col, bool): index_col = [] return ( len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns if c not in list(index_col)) )
def _is_potential_multi_index(columns): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex """ return ( len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns) )
https://github.com/pandas-dev/pandas/issues/33476
$ ~/miniconda3/bin/python read.py Traceback (most recent call last): File "read.py", line 4, in <module> df = pd.read_excel('df_empty.xlsx', engine='openpyxl', sheet_name='Sheet1', index_col=0, header=[0, 1]) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 334, in read_excel **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 888, in parse **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 521, in parse ].columns.set_names(header_names) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 1325, in set_names idx._set_names(names, level=level) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 1239, in _set_names raise ValueError(f"Length of new names must be 1, got {len(values)}") ValueError: Length of new names must be 1, got 2
ValueError
def _maybe_dedup_names(self, names): # see gh-7160 and gh-9424: this helps to provide # immediate alleviation of the duplicate names # issue and appears to be satisfactory to users, # but ultimately, not needing to butcher the names # would be nice! if self.mangle_dupe_cols: names = list(names) # so we can index counts = defaultdict(int) is_potential_mi = _is_potential_multi_index(names, self.index_col) for i, col in enumerate(names): cur_count = counts[col] while cur_count > 0: counts[col] = cur_count + 1 if is_potential_mi: col = col[:-1] + (f"{col[-1]}.{cur_count}",) else: col = f"{col}.{cur_count}" cur_count = counts[col] names[i] = col counts[col] = cur_count + 1 return names
def _maybe_dedup_names(self, names): # see gh-7160 and gh-9424: this helps to provide # immediate alleviation of the duplicate names # issue and appears to be satisfactory to users, # but ultimately, not needing to butcher the names # would be nice! if self.mangle_dupe_cols: names = list(names) # so we can index counts = defaultdict(int) is_potential_mi = _is_potential_multi_index(names) for i, col in enumerate(names): cur_count = counts[col] while cur_count > 0: counts[col] = cur_count + 1 if is_potential_mi: col = col[:-1] + (f"{col[-1]}.{cur_count}",) else: col = f"{col}.{cur_count}" cur_count = counts[col] names[i] = col counts[col] = cur_count + 1 return names
https://github.com/pandas-dev/pandas/issues/33476
$ ~/miniconda3/bin/python read.py Traceback (most recent call last): File "read.py", line 4, in <module> df = pd.read_excel('df_empty.xlsx', engine='openpyxl', sheet_name='Sheet1', index_col=0, header=[0, 1]) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 334, in read_excel **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 888, in parse **kwds, File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/io/excel/_base.py", line 521, in parse ].columns.set_names(header_names) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 1325, in set_names idx._set_names(names, level=level) File "/home/sasha/miniconda3/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 1239, in _set_names raise ValueError(f"Length of new names must be 1, got {len(values)}") ValueError: Length of new names must be 1, got 2
ValueError
def melt( frame: DataFrame, id_vars=None, value_vars=None, var_name=None, value_name="value", col_level=None, ) -> DataFrame: # TODO: what about the existing index? # If multiindex, gather names of columns on all level for checking presence # of `id_vars` and `value_vars` if isinstance(frame.columns, MultiIndex): cols = [x for c in frame.columns for x in c] else: cols = list(frame.columns) if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list): raise ValueError( "id_vars must be a list of tuples when columns are a MultiIndex" ) else: # Check that `id_vars` are in frame id_vars = list(id_vars) missing = Index(com.flatten(id_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'id_vars' are not present " f"in the DataFrame: {list(missing)}" ) else: id_vars = [] if value_vars is not None: if not is_list_like(value_vars): value_vars = [value_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list): raise ValueError( "value_vars must be a list of tuples when columns are a MultiIndex" ) else: value_vars = list(value_vars) # Check that `value_vars` are in frame missing = Index(com.flatten(value_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'value_vars' are not present in " f"the DataFrame: {list(missing)}" ) if col_level is not None: idx = frame.columns.get_level_values(col_level).get_indexer( id_vars + value_vars ) else: idx = frame.columns.get_indexer(id_vars + value_vars) frame = frame.iloc[:, idx] else: frame = frame.copy() if col_level is not None: # allow list or other? # frame is a copy frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] else: var_name = [ frame.columns.name if frame.columns.name is not None else "variable" ] if isinstance(var_name, str): var_name = [var_name] N, K = frame.shape K -= len(id_vars) mdata = {} for col in id_vars: id_data = frame.pop(col) if is_extension_array_dtype(id_data): id_data = concat([id_data] * K, ignore_index=True) else: id_data = np.tile(id_data._values, K) mdata[col] = id_data mcolumns = id_vars + var_name + [value_name] mdata[value_name] = frame._values.ravel("F") for i, col in enumerate(var_name): # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) return frame._constructor(mdata, columns=mcolumns)
def melt( frame: DataFrame, id_vars=None, value_vars=None, var_name=None, value_name="value", col_level=None, ) -> DataFrame: # TODO: what about the existing index? # If multiindex, gather names of columns on all level for checking presence # of `id_vars` and `value_vars` if isinstance(frame.columns, MultiIndex): cols = [x for c in frame.columns for x in c] else: cols = list(frame.columns) if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list): raise ValueError( "id_vars must be a list of tuples when columns are a MultiIndex" ) else: # Check that `id_vars` are in frame id_vars = list(id_vars) missing = Index(com.flatten(id_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'id_vars' are not present " f"in the DataFrame: {list(missing)}" ) else: id_vars = [] if value_vars is not None: if not is_list_like(value_vars): value_vars = [value_vars] elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list): raise ValueError( "value_vars must be a list of tuples when columns are a MultiIndex" ) else: value_vars = list(value_vars) # Check that `value_vars` are in frame missing = Index(com.flatten(value_vars)).difference(cols) if not missing.empty: raise KeyError( "The following 'value_vars' are not present in " f"the DataFrame: {list(missing)}" ) frame = frame.loc[:, id_vars + value_vars] else: frame = frame.copy() if col_level is not None: # allow list or other? # frame is a copy frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] else: var_name = [ frame.columns.name if frame.columns.name is not None else "variable" ] if isinstance(var_name, str): var_name = [var_name] N, K = frame.shape K -= len(id_vars) mdata = {} for col in id_vars: id_data = frame.pop(col) if is_extension_array_dtype(id_data): id_data = concat([id_data] * K, ignore_index=True) else: id_data = np.tile(id_data._values, K) mdata[col] = id_data mcolumns = id_vars + var_name + [value_name] mdata[value_name] = frame._values.ravel("F") for i, col in enumerate(var_name): # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) return frame._constructor(mdata, columns=mcolumns)
https://github.com/pandas-dev/pandas/issues/34129
--------------------------------------------------------------------------- KeyError Traceback (most recent call last) ~/Repos/spec17/venv/lib/python3.8/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2896 try: -> 2897 return self._engine.get_loc(key) 2898 except KeyError: pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 'ind1' During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) in 17 print("\nL1 index insert:\n", df_l1) 18 # NOTE: THIS FAILS! ---> 19 df_l1 = pd.melt(df_l1, col_level=1, 20 id_vars=['ind1'], value_vars=['D','E']) 21 print("\nL1 melt:\n", df_l1) ~/Repos/spec17/venv/lib/python3.8/site-packages/pandas/core/reshape/melt.py in melt(frame, id_vars, value_vars, var_name, value_name, col_level) 102 mdata = {} 103 for col in id_vars: --> 104 id_data = frame.pop(col) 105 if is_extension_type(id_data): 106 id_data = concat([id_data] * K, ignore_index=True) ~/Repos/spec17/venv/lib/python3.8/site-packages/pandas/core/generic.py in pop(self, item) 860 3 monkey NaN 861 """ --> 862 result = self[item] 863 del self[item] 864 try: ~/Repos/spec17/venv/lib/python3.8/site-packages/pandas/core/frame.py in __getitem__(self, key) 2993 if self.columns.nlevels > 1: 2994 return self._getitem_multilevel(key) -> 2995 indexer = self.columns.get_loc(key) 2996 if is_integer(indexer): 2997 indexer = [indexer] ~/Repos/spec17/venv/lib/python3.8/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2897 return self._engine.get_loc(key) 2898 except KeyError: -> 2899 return self._engine.get_loc(self._maybe_cast_indexer(key)) 2900 indexer = self.get_indexer([key], method=method, tolerance=tolerance) 2901 if indexer.ndim > 1 or indexer.size > 1: pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 'ind1'
KeyError
def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": """ Cast to DatetimeIndex of Timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. copy : bool, default True Whether or not to return a copy. Returns ------- Series with DatetimeIndex """ new_values = self._values if copy: new_values = new_values.copy() if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_timestamp" )
def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": """ Cast to DatetimeIndex of Timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. copy : bool, default True Whether or not to return a copy. Returns ------- Series with DatetimeIndex """ new_values = self._values if copy: new_values = new_values.copy() assert isinstance(self.index, PeriodIndex) new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_timestamp" )
https://github.com/pandas-dev/pandas/issues/33327
In [5]: import pandas as pd In [6]: pd.Series([0]).to_timestamp() --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-6-1a9d08d7daba> in <module> ----> 1 pd.Series([0]).to_timestamp() ~/sandbox/pandas/pandas/core/series.py in to_timestamp(self, freq, how, copy) 4565 new_values = new_values.copy() 4566 -> 4567 assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex)) 4568 new_index = self.index.to_timestamp(freq=freq, how=how) 4569 return self._constructor(new_values, index=new_index).__finalize__( AssertionError:
AssertionError
def to_period(self, freq=None, copy=True) -> "Series": """ Convert Series from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default None Frequency associated with the PeriodIndex. copy : bool, default True Whether or not to return a copy. Returns ------- Series Series with index converted to PeriodIndex. """ new_values = self._values if copy: new_values = new_values.copy() if not isinstance(self.index, DatetimeIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") new_index = self.index.to_period(freq=freq) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_period" )
def to_period(self, freq=None, copy=True) -> "Series": """ Convert Series from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default None Frequency associated with the PeriodIndex. copy : bool, default True Whether or not to return a copy. Returns ------- Series Series with index converted to PeriodIndex. """ new_values = self._values if copy: new_values = new_values.copy() assert isinstance(self.index, DatetimeIndex) new_index = self.index.to_period(freq=freq) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_period" )
https://github.com/pandas-dev/pandas/issues/33327
In [5]: import pandas as pd In [6]: pd.Series([0]).to_timestamp() --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-6-1a9d08d7daba> in <module> ----> 1 pd.Series([0]).to_timestamp() ~/sandbox/pandas/pandas/core/series.py in to_timestamp(self, freq, how, copy) 4565 new_values = new_values.copy() 4566 -> 4567 assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex)) 4568 new_index = self.index.to_timestamp(freq=freq, how=how) 4569 return self._constructor(new_values, index=new_index).__finalize__( AssertionError:
AssertionError
def sum(self, axis: int = 0, min_count: int = 0, *args, **kwargs) -> Scalar: """ Sum of non-NA/null values Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. min_count : int, default 0 The required number of valid values to perform the summation. If fewer than ``min_count`` valid values are present, the result will be the missing value indicator for subarray type. *args, **kwargs Not Used. NumPy compatibility. Returns ------- scalar """ nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() if self._null_fill_value: if check_below_min_count(valid_vals.shape, None, min_count): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum else: nsparse = self.sp_index.ngaps if check_below_min_count(valid_vals.shape, None, min_count - nsparse): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum + self.fill_value * nsparse
def sum(self, axis=0, *args, **kwargs): """ Sum of non-NA/null values Returns ------- sum : float """ nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() if self._null_fill_value: return sp_sum else: nsparse = self.sp_index.ngaps return sp_sum + self.fill_value * nsparse
https://github.com/pandas-dev/pandas/issues/25777
In [202]: sparse_series.dtype Out[203]: Sparse[bool, False] In [208]: sparse_series.value_counts() Out[208]: False 51386 True 13 Name: C_3D Printing, dtype: int64 In [209]: sparse_series.sum() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-209-8f6162710840> in <module> ----> 1 sparse_series.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/generic.py in stat_func(self, axis, skipna, level, numeric_only, min_count, **kwargs) 10929 skipna=skipna, min_count=min_count) 10930 return self._reduce(f, name, axis=axis, skipna=skipna, 10931 numeric_only=numeric_only, min_count=min_count) 10932 10933 return set_function_name(stat_func, name, cls) ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/series.py in _reduce(self, op, name, axis, skipna, numeric_only, filter_type, **kwds) 3613 # dispatch to ExtensionArray interface 3614 if isinstance(delegate, ExtensionArray): -> 3615 return delegate._reduce(name, skipna=skipna, **kwds) 3616 elif is_datetime64_dtype(delegate): 3617 # use DatetimeIndex implementation to handle skipna correctly ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in _reduce(self, name, skipna, **kwargs) 1439 kwargs.pop('numeric_only', None) 1440 kwargs.pop('op', None) -> 1441 return getattr(arr, name)(**kwargs) 1442 1443 def all(self, axis=None, *args, **kwargs): ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in sum(self, axis, *args, **kwargs) 1491 sum : float 1492 """ -> 1493 nv.validate_sum(args, kwargs) 1494 valid_vals = self._valid_sp_values 1495 sp_sum = valid_vals.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/compat/numpy/function.py in __call__(self, args, kwargs, fname, max_fname_arg_count, method) 54 validate_args_and_kwargs(fname, args, kwargs, 55 max_fname_arg_count, ---> 56 self.defaults) 57 else: 58 raise ValueError("invalid validation method " ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args) 216 217 kwargs.update(args_dict) --> 218 validate_kwargs(fname, kwargs, compat_args) 219 220 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_kwargs(fname, kwargs, compat_args) 154 """ 155 kwds = kwargs.copy() --> 156 _check_for_invalid_keys(fname, kwargs, compat_args) 157 _check_for_default_values(fname, kwds, compat_args) 158 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in _check_for_invalid_keys(fname, kwargs, compat_args) 125 raise TypeError(("{fname}() got an unexpected " 126 "keyword argument '{arg}'". --> 127 format(fname=fname, arg=bad_arg))) 128 129 TypeError: sum() got an unexpected keyword argument 'min_count' In [210]:
TypeError
def na_value_for_dtype(dtype, compat: bool = True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : bool, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) NaT """ dtype = pandas_dtype(dtype) if is_extension_array_dtype(dtype): return dtype.na_value if needs_i8_conversion(dtype): return NaT elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): if compat: return 0 return np.nan elif is_bool_dtype(dtype): if compat: return False return np.nan return np.nan
def na_value_for_dtype(dtype, compat: bool = True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : bool, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) NaT """ dtype = pandas_dtype(dtype) if is_extension_array_dtype(dtype): return dtype.na_value if needs_i8_conversion(dtype): return NaT elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): if compat: return 0 return np.nan elif is_bool_dtype(dtype): return False return np.nan
https://github.com/pandas-dev/pandas/issues/25777
In [202]: sparse_series.dtype Out[203]: Sparse[bool, False] In [208]: sparse_series.value_counts() Out[208]: False 51386 True 13 Name: C_3D Printing, dtype: int64 In [209]: sparse_series.sum() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-209-8f6162710840> in <module> ----> 1 sparse_series.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/generic.py in stat_func(self, axis, skipna, level, numeric_only, min_count, **kwargs) 10929 skipna=skipna, min_count=min_count) 10930 return self._reduce(f, name, axis=axis, skipna=skipna, 10931 numeric_only=numeric_only, min_count=min_count) 10932 10933 return set_function_name(stat_func, name, cls) ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/series.py in _reduce(self, op, name, axis, skipna, numeric_only, filter_type, **kwds) 3613 # dispatch to ExtensionArray interface 3614 if isinstance(delegate, ExtensionArray): -> 3615 return delegate._reduce(name, skipna=skipna, **kwds) 3616 elif is_datetime64_dtype(delegate): 3617 # use DatetimeIndex implementation to handle skipna correctly ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in _reduce(self, name, skipna, **kwargs) 1439 kwargs.pop('numeric_only', None) 1440 kwargs.pop('op', None) -> 1441 return getattr(arr, name)(**kwargs) 1442 1443 def all(self, axis=None, *args, **kwargs): ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in sum(self, axis, *args, **kwargs) 1491 sum : float 1492 """ -> 1493 nv.validate_sum(args, kwargs) 1494 valid_vals = self._valid_sp_values 1495 sp_sum = valid_vals.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/compat/numpy/function.py in __call__(self, args, kwargs, fname, max_fname_arg_count, method) 54 validate_args_and_kwargs(fname, args, kwargs, 55 max_fname_arg_count, ---> 56 self.defaults) 57 else: 58 raise ValueError("invalid validation method " ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args) 216 217 kwargs.update(args_dict) --> 218 validate_kwargs(fname, kwargs, compat_args) 219 220 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_kwargs(fname, kwargs, compat_args) 154 """ 155 kwds = kwargs.copy() --> 156 _check_for_invalid_keys(fname, kwargs, compat_args) 157 _check_for_default_values(fname, kwds, compat_args) 158 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in _check_for_invalid_keys(fname, kwargs, compat_args) 125 raise TypeError(("{fname}() got an unexpected " 126 "keyword argument '{arg}'". --> 127 format(fname=fname, arg=bad_arg))) 128 129 TypeError: sum() got an unexpected keyword argument 'min_count' In [210]:
TypeError
def check_below_min_count( shape: Tuple[int, ...], mask: Optional[np.ndarray], min_count: int ) -> bool: """ Check for the `min_count` keyword. Returns True if below `min_count` (when missing value should be returned from the reduction). Parameters ---------- shape : tuple The shape of the values (`values.shape`). mask : ndarray or None Boolean numpy array (typically of same shape as `shape`) or None. min_count : int Keyword passed through from sum/prod call. Returns ------- bool """ if min_count > 0: if mask is None: # no missing values, only check size non_nulls = np.prod(shape) else: non_nulls = mask.size - mask.sum() if non_nulls < min_count: return True return False
def check_below_min_count( shape: Tuple[int, ...], mask: Optional[np.ndarray], min_count: int ): """ Check for the `min_count` keyword. Returns True if below `min_count` (when missing value should be returned from the reduction). Parameters ---------- shape : tuple The shape of the values (`values.shape`). mask : ndarray or None Boolean numpy array (typically of same shape as `shape`) or None. min_count : int Keyword passed through from sum/prod call. Returns ------- bool """ if min_count > 0: if mask is None: # no missing values, only check size non_nulls = np.prod(shape) else: non_nulls = mask.size - mask.sum() if non_nulls < min_count: return True return False
https://github.com/pandas-dev/pandas/issues/25777
In [202]: sparse_series.dtype Out[203]: Sparse[bool, False] In [208]: sparse_series.value_counts() Out[208]: False 51386 True 13 Name: C_3D Printing, dtype: int64 In [209]: sparse_series.sum() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-209-8f6162710840> in <module> ----> 1 sparse_series.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/generic.py in stat_func(self, axis, skipna, level, numeric_only, min_count, **kwargs) 10929 skipna=skipna, min_count=min_count) 10930 return self._reduce(f, name, axis=axis, skipna=skipna, 10931 numeric_only=numeric_only, min_count=min_count) 10932 10933 return set_function_name(stat_func, name, cls) ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/series.py in _reduce(self, op, name, axis, skipna, numeric_only, filter_type, **kwds) 3613 # dispatch to ExtensionArray interface 3614 if isinstance(delegate, ExtensionArray): -> 3615 return delegate._reduce(name, skipna=skipna, **kwds) 3616 elif is_datetime64_dtype(delegate): 3617 # use DatetimeIndex implementation to handle skipna correctly ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in _reduce(self, name, skipna, **kwargs) 1439 kwargs.pop('numeric_only', None) 1440 kwargs.pop('op', None) -> 1441 return getattr(arr, name)(**kwargs) 1442 1443 def all(self, axis=None, *args, **kwargs): ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/core/arrays/sparse.py in sum(self, axis, *args, **kwargs) 1491 sum : float 1492 """ -> 1493 nv.validate_sum(args, kwargs) 1494 valid_vals = self._valid_sp_values 1495 sp_sum = valid_vals.sum() ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/compat/numpy/function.py in __call__(self, args, kwargs, fname, max_fname_arg_count, method) 54 validate_args_and_kwargs(fname, args, kwargs, 55 max_fname_arg_count, ---> 56 self.defaults) 57 else: 58 raise ValueError("invalid validation method " ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args) 216 217 kwargs.update(args_dict) --> 218 validate_kwargs(fname, kwargs, compat_args) 219 220 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in validate_kwargs(fname, kwargs, compat_args) 154 """ 155 kwds = kwargs.copy() --> 156 _check_for_invalid_keys(fname, kwargs, compat_args) 157 _check_for_default_values(fname, kwds, compat_args) 158 ~/anaconda3/envs/idp/lib/python3.6/site-packages/pandas/util/_validators.py in _check_for_invalid_keys(fname, kwargs, compat_args) 125 raise TypeError(("{fname}() got an unexpected " 126 "keyword argument '{arg}'". --> 127 format(fname=fname, arg=bad_arg))) 128 129 TypeError: sum() got an unexpected keyword argument 'min_count' In [210]:
TypeError
def _na_for_min_count( values: np.ndarray, axis: Optional[int] ) -> Union[Scalar, np.ndarray]: """ Return the missing value for `values`. Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype("float64") fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value else: assert axis is not None # assertion to make mypy happy result_shape = values.shape[:axis] + values.shape[axis + 1 :] # calling np.full with dtype parameter throws an ValueError when called # with dtype=np.datetime64 and and fill_value=pd.NaT try: result = np.full(result_shape, fill_value, dtype=values.dtype) except ValueError: result = np.full(result_shape, fill_value) return result
def _na_for_min_count( values: np.ndarray, axis: Optional[int] ) -> Union[Scalar, np.ndarray]: """ Return the missing value for `values`. Parameters ---------- values : ndarray axis : int or None axis for the reduction, required if values.ndim > 1. Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype("float64") fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value else: assert axis is not None # assertion to make mypy happy result_shape = values.shape[:axis] + values.shape[axis + 1 :] result = np.empty(result_shape, dtype=values.dtype) result.fill(fill_value) return result
https://github.com/pandas-dev/pandas/issues/33704
Traceback (most recent call last): File "<ipython-input-17-be9940feb663>", line 1, in <module> df.max() File "pandas/core/generic.py", line 11215, in stat_func f, name, axis=axis, skipna=skipna, numeric_only=numeric_only File "pandas/core/frame.py", line 7907, in _reduce result = f(values) File "pandas/core/frame.py", line 7865, in f return op(x, axis=axis, skipna=skipna, **kwds) File "pandas/core/nanops.py", line 109, in f return _na_for_min_count(values, axis) File "pandas/core/nanops.py", line 392, in _na_for_min_count result.fill(fill_value) ValueError: cannot convert float NaN to integer
ValueError
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ arrays: Union[Sequence[Any], "Series"] if columns is not None: from pandas.core.series import Series # noqa:F811 arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isna() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or ( not is_extension_array_dtype(dtype) and np.issubdtype(dtype, np.flexible) ): # GH#1783 nan_dtype = np.dtype(object) else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = list(data.keys()) columns = data_names = Index(keys) arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [ arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays ] arrays = [ arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays ] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ arrays: Union[Sequence[Any], "Series"] if columns is not None: from pandas.core.series import Series # noqa:F811 arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isna() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = np.dtype(object) else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = list(data.keys()) columns = data_names = Index(keys) arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [ arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays ] arrays = [ arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays ] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
https://github.com/pandas-dev/pandas/issues/33623
In [1]: import pandas as pd In [2]: pd.DataFrame({'a': ['b']}, dtype='string') Out[2]: a 0 b In [3]: pd.DataFrame(columns=['a'], dtype='string') --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-3-2e577cc30885> in <module> ----> 1 pd.DataFrame(columns=['a'], dtype='string') ~/lib/miniconda/envs/xport/lib/python3.8/site-packages/pandas/core/frame.py in __init__(self, data, index, columns, dtype, copy) 433 ) 434 elif isinstance(data, dict): --> 435 mgr = init_dict(data, index, columns, dtype=dtype) 436 elif isinstance(data, ma.MaskedArray): 437 import numpy.ma.mrecords as mrecords ~/lib/miniconda/envs/xport/lib/python3.8/site-packages/pandas/core/internals/construction.py in init_dict(data, index, columns, dtype) 232 # no obvious "empty" int column 233 if missing.any() and not is_integer_dtype(dtype): --> 234 if dtype is None or np.issubdtype(dtype, np.flexible): 235 # GH#1783 236 nan_dtype = object ~/lib/miniconda/envs/xport/lib/python3.8/site-packages/numpy/core/numerictypes.py in issubdtype(arg1, arg2) 391 """ 392 if not issubclass_(arg1, generic): --> 393 arg1 = dtype(arg1).type 394 if not issubclass_(arg2, generic): 395 arg2_orig = arg2 TypeError: data type not understood In [4]: pd.DataFrame(columns=['a'], dtype='float') Out[4]: Empty DataFrame Columns: [a] Index: []
TypeError
def putmask( self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, ) -> List["Block"]: """ See Block.putmask.__doc__ """ inplace = validate_bool_kwarg(inplace, "inplace") mask = _extract_bool_array(mask) new_values = self.values if inplace else self.values.copy() if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): new = new[mask] mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new return [self.make_block(values=new_values)]
def putmask( self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, ) -> List["Block"]: """ See Block.putmask.__doc__ """ inplace = validate_bool_kwarg(inplace, "inplace") mask = _extract_bool_array(mask) new_values = self.values if inplace else self.values.copy() if isinstance(new, np.ndarray) and len(new) == len(mask): new = new[mask] mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new return [self.make_block(values=new_values)]
https://github.com/pandas-dev/pandas/issues/33980
Traceback (most recent call last): File "<ipython-input-15-b9da8f25067a>", line 1, in <module> a.update(b) File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\series.py", line 2810, in update self._data = self._data.putmask(mask=mask, new=other, inplace=True) File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\internals\managers.py", line 564, in putmask return self.apply("putmask", **kwargs) File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\internals\managers.py", line 442, in apply applied = getattr(b, f)(**kwargs) File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\internals\blocks.py", line 1676, in putmask new_values[mask] = new File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\arrays\string_.py", line 248, in __setitem__ super().__setitem__(key, value) File "C:\tools\anaconda3\envs\Simple\lib\site-packages\pandas\core\arrays\numpy_.py", line 252, in __setitem__ self._ndarray[key] = value ValueError: NumPy boolean array indexing assignment cannot assign 3 input values to the 1 output values where the mask is true
ValueError
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. raise_missing: bool, default False Whether to raise a KeyError if some labels were not found. Will be removed in the future, and then this method will always behave as if ``raise_missing=True``. Raises ------ KeyError If at least one key was requested but none was found, and raise_missing=True. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) return ax[indexer], indexer if ax.is_unique and not getattr(ax, "is_overlapping", False): indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) return keyarr, indexer
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. raise_missing: bool, default False Whether to raise a KeyError if some labels were not found. Will be removed in the future, and then this method will always behave as if ``raise_missing=True``. Raises ------ KeyError If at least one key was requested but none was found, and raise_missing=True. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing) return ax[indexer], indexer if ax.is_unique and not getattr(ax, "is_overlapping", False): indexer = ax.get_indexer_for(key) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) return keyarr, indexer
https://github.com/pandas-dev/pandas/issues/11278
In [10]: series.loc[['2000-01-14', '2000-01-15']] --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-10-e922e8a9ed74> in <module>() ----> 1 series.loc[['2000-01-14', '2000-01-15']] /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/indexing.py in __getitem__(self, key) 1187 return self._getitem_tuple(key) 1188 else: -> 1189 return self._getitem_axis(key, axis=0) 1190 1191 def _getitem_axis(self, key, axis=0): /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/indexing.py in _getitem_axis(self, key, axis) 1321 raise ValueError('Cannot index with multidimensional key') 1322 -> 1323 return self._getitem_iterable(key, axis=axis) 1324 1325 # nested tuple slicing /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/indexing.py in _getitem_iterable(self, key, axis) 931 def _getitem_iterable(self, key, axis=0): 932 if self._should_validate_iterable(axis): --> 933 self._has_valid_type(key, axis) 934 935 labels = self.obj._get_axis(axis) /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/indexing.py in _has_valid_type(self, key, axis) 1269 1270 raise KeyError("None of [%s] are in the [%s]" % -> 1271 (key, self.obj._get_axis_name(axis))) 1272 1273 return True KeyError: "None of [['2000-01-14', '2000-01-15']] are in the [index]"
KeyError
def sanitize_array( data, index: Optional["Index"], dtype: Optional[DtypeObj] = None, copy: bool = False, raise_cast_failure: bool = False, ) -> ArrayLike: """ Sanitize input data to an ndarray or ExtensionArray, copy if specified, coerce to the dtype if specified. """ if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() # extract ndarray or ExtensionArray, ensure we have no PandasArray data = extract_array(data, extract_numpy=True) # GH#846 if isinstance(data, np.ndarray): if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype): # possibility of nan -> garbage try: subarr = _try_cast(data, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = np.array(data, copy=False) else: # we will try to copy be-definition here subarr = _try_cast(data, dtype, copy, raise_cast_failure) elif isinstance(data, ABCExtensionArray): # it is already ensured above this is not a PandasArray subarr = data if dtype is not None: subarr = subarr.astype(dtype, copy=copy) elif copy: subarr = subarr.copy() return subarr elif isinstance(data, (list, tuple)) and len(data) > 0: if dtype is not None: subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype="int64") subarr = _try_cast(arr, dtype, copy, raise_cast_failure) elif isinstance(data, abc.Set): raise TypeError("Set type is unordered") elif lib.is_scalar(data) and index is not None and dtype is not None: data = maybe_cast_to_datetime(data, dtype) if not lib.is_scalar(data): data = data[0] subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype) else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) # scalar like, GH if getattr(subarr, "ndim", 0) == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = construct_1d_arraylike_from_scalar( subarr[0], len(index), subarr.dtype ) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception("Data must be 1-dimensional") else: subarr = com.asarray_tuplesafe(data, dtype=dtype) if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)): # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, str): # GH#16605 # If not empty convert the data to dtype # GH#19853: If data is a scalar, subarr has already the result if not lib.is_scalar(data): if not np.all(isna(data)): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype): inferred = lib.infer_dtype(subarr, skipna=False) if inferred in {"interval", "period"}: subarr = array(subarr) return subarr
def sanitize_array( data, index: Optional["Index"], dtype: Optional[DtypeObj] = None, copy: bool = False, raise_cast_failure: bool = False, ) -> ArrayLike: """ Sanitize input data to an ndarray or ExtensionArray, copy if specified, coerce to the dtype if specified. """ if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() # extract ndarray or ExtensionArray, ensure we have no PandasArray data = extract_array(data, extract_numpy=True) # GH#846 if isinstance(data, np.ndarray): if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype): # possibility of nan -> garbage try: subarr = _try_cast(data, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = np.array(data, copy=False) else: # we will try to copy be-definition here subarr = _try_cast(data, dtype, copy, raise_cast_failure) elif isinstance(data, ABCExtensionArray): # it is already ensured above this is not a PandasArray subarr = data if dtype is not None: subarr = subarr.astype(dtype, copy=copy) elif copy: subarr = subarr.copy() return subarr elif isinstance(data, (list, tuple)) and len(data) > 0: if dtype is not None: subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype="int64") subarr = _try_cast(arr, dtype, copy, raise_cast_failure) elif isinstance(data, abc.Set): raise TypeError("Set type is unordered") else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) # scalar like, GH if getattr(subarr, "ndim", 0) == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = construct_1d_arraylike_from_scalar( subarr[0], len(index), subarr.dtype ) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception("Data must be 1-dimensional") else: subarr = com.asarray_tuplesafe(data, dtype=dtype) if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)): # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, str): # GH#16605 # If not empty convert the data to dtype # GH#19853: If data is a scalar, subarr has already the result if not lib.is_scalar(data): if not np.all(isna(data)): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype): inferred = lib.infer_dtype(subarr, skipna=False) if inferred in {"interval", "period"}: subarr = array(subarr) return subarr
https://github.com/pandas-dev/pandas/issues/26469
In [8]: pd.Series(None, index=[1, 2, 3], dtype='Int64') --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 691 if is_integer_dtype(dtype): --> 692 subarr = maybe_cast_to_integer_array(arr, dtype) 693 ~/scipy/pandas/pandas/core/dtypes/cast.py in maybe_cast_to_integer_array(arr, dtype, copy) 1311 if not hasattr(arr, "astype"): -> 1312 casted = np.array(arr, dtype=dtype, copy=copy) 1313 else: TypeError: data type not understood During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-8-9447295feee6> in <module> ----> 1 pd.Series(None, index=[1, 2, 3], dtype='Int64') ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 202 data = data._data 203 elif isinstance(data, dict): --> 204 data, index = self._init_dict(data, index, dtype) 205 dtype = None 206 copy = False ~/scipy/pandas/pandas/core/series.py in _init_dict(self, data, index, dtype) 295 296 # Input is now list-like, so rely on "standard" construction: --> 297 s = Series(values, index=keys, dtype=dtype) 298 299 # Now we just make sure the order is respected, if any ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 253 else: 254 data = sanitize_array(data, index, dtype, copy, --> 255 raise_cast_failure=True) 256 257 data = SingleBlockManager(data, index, fastpath=True) ~/scipy/pandas/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure) 620 subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) 621 else: --> 622 subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) 623 624 # scalar like, GH ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 711 # create an extension array from its dtype 712 array_type = dtype.construct_array_type()._from_sequence --> 713 subarr = array_type(arr, dtype=dtype, copy=copy) 714 elif dtype is not None and raise_cast_failure: 715 raise ~/scipy/pandas/pandas/core/arrays/integer.py in _from_sequence(cls, scalars, dtype, copy) 305 @classmethod 306 def _from_sequence(cls, scalars, dtype=None, copy=False): --> 307 return integer_array(scalars, dtype=dtype, copy=copy) 308 309 @classmethod ~/scipy/pandas/pandas/core/arrays/integer.py in integer_array(values, dtype, copy) 110 TypeError if incompatible types 111 """ --> 112 values, mask = coerce_to_array(values, dtype=dtype, copy=copy) 113 return IntegerArray(values, mask) 114 ~/scipy/pandas/pandas/core/arrays/integer.py in coerce_to_array(values, dtype, mask, copy) 202 203 if not values.ndim == 1: --> 204 raise TypeError("values must be a 1D list-like") 205 if not mask.ndim == 1: 206 raise TypeError("mask must be a 1D list-like") TypeError: values must be a 1D list-like
TypeError
def _try_cast( arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool, ): """ Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- arr : ndarray, scalar, list, tuple, iterator (catchall) Excludes: ExtensionArray, Series, Index. dtype : np.dtype, ExtensionDtype or None copy : bool If False, don't copy the data if not needed. raise_cast_failure : bool If True, and if a dtype is specified, raise errors during casting. Otherwise an object array is returned. """ # perf shortcut as this is the most common case if isinstance(arr, np.ndarray): if maybe_castable(arr) and not copy and dtype is None: return arr if isinstance(dtype, ExtensionDtype) and dtype.kind != "M": # create an extension array from its dtype # DatetimeTZ case needs to go through maybe_cast_to_datetime array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) return subarr try: # GH#15832: Check if we are requesting a numeric dype and # that we can convert the data to the requested dtype. if is_integer_dtype(dtype): # this will raise if we have e.g. floats maybe_cast_to_integer_array(arr, dtype) subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) # Take care in creating object arrays (but iterators are not # supported): if is_object_dtype(dtype) and ( is_list_like(subarr) and not (is_iterator(subarr) or isinstance(subarr, np.ndarray)) ): subarr = construct_1d_object_array_from_listlike(subarr) elif not is_extension_array_dtype(subarr): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except OutOfBoundsDatetime: # in case of out of bound datetime64 -> always raise raise except (ValueError, TypeError): if dtype is not None and raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr
def _try_cast( arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool, ): """ Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- arr : ndarray, list, tuple, iterator (catchall) Excludes: ExtensionArray, Series, Index. dtype : np.dtype, ExtensionDtype or None copy : bool If False, don't copy the data if not needed. raise_cast_failure : bool If True, and if a dtype is specified, raise errors during casting. Otherwise an object array is returned. """ # perf shortcut as this is the most common case if isinstance(arr, np.ndarray): if maybe_castable(arr) and not copy and dtype is None: return arr if isinstance(dtype, ExtensionDtype) and dtype.kind != "M": # create an extension array from its dtype # DatetimeTZ case needs to go through maybe_cast_to_datetime array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) return subarr try: # GH#15832: Check if we are requesting a numeric dype and # that we can convert the data to the requested dtype. if is_integer_dtype(dtype): # this will raise if we have e.g. floats maybe_cast_to_integer_array(arr, dtype) subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) # Take care in creating object arrays (but iterators are not # supported): if is_object_dtype(dtype) and ( is_list_like(subarr) and not (is_iterator(subarr) or isinstance(subarr, np.ndarray)) ): subarr = construct_1d_object_array_from_listlike(subarr) elif not is_extension_array_dtype(subarr): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except OutOfBoundsDatetime: # in case of out of bound datetime64 -> always raise raise except (ValueError, TypeError): if dtype is not None and raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr
https://github.com/pandas-dev/pandas/issues/26469
In [8]: pd.Series(None, index=[1, 2, 3], dtype='Int64') --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 691 if is_integer_dtype(dtype): --> 692 subarr = maybe_cast_to_integer_array(arr, dtype) 693 ~/scipy/pandas/pandas/core/dtypes/cast.py in maybe_cast_to_integer_array(arr, dtype, copy) 1311 if not hasattr(arr, "astype"): -> 1312 casted = np.array(arr, dtype=dtype, copy=copy) 1313 else: TypeError: data type not understood During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-8-9447295feee6> in <module> ----> 1 pd.Series(None, index=[1, 2, 3], dtype='Int64') ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 202 data = data._data 203 elif isinstance(data, dict): --> 204 data, index = self._init_dict(data, index, dtype) 205 dtype = None 206 copy = False ~/scipy/pandas/pandas/core/series.py in _init_dict(self, data, index, dtype) 295 296 # Input is now list-like, so rely on "standard" construction: --> 297 s = Series(values, index=keys, dtype=dtype) 298 299 # Now we just make sure the order is respected, if any ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 253 else: 254 data = sanitize_array(data, index, dtype, copy, --> 255 raise_cast_failure=True) 256 257 data = SingleBlockManager(data, index, fastpath=True) ~/scipy/pandas/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure) 620 subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) 621 else: --> 622 subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) 623 624 # scalar like, GH ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 711 # create an extension array from its dtype 712 array_type = dtype.construct_array_type()._from_sequence --> 713 subarr = array_type(arr, dtype=dtype, copy=copy) 714 elif dtype is not None and raise_cast_failure: 715 raise ~/scipy/pandas/pandas/core/arrays/integer.py in _from_sequence(cls, scalars, dtype, copy) 305 @classmethod 306 def _from_sequence(cls, scalars, dtype=None, copy=False): --> 307 return integer_array(scalars, dtype=dtype, copy=copy) 308 309 @classmethod ~/scipy/pandas/pandas/core/arrays/integer.py in integer_array(values, dtype, copy) 110 TypeError if incompatible types 111 """ --> 112 values, mask = coerce_to_array(values, dtype=dtype, copy=copy) 113 return IntegerArray(values, mask) 114 ~/scipy/pandas/pandas/core/arrays/integer.py in coerce_to_array(values, dtype, mask, copy) 202 203 if not values.ndim == 1: --> 204 raise TypeError("values must be a 1D list-like") 205 if not mask.ndim == 1: 206 raise TypeError("mask must be a 1D list-like") TypeError: values must be a 1D list-like
TypeError
def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ from pandas.core.tools.timedeltas import to_timedelta from pandas.core.tools.datetimes import to_datetime if dtype is not None: if isinstance(dtype, str): dtype = np.dtype(dtype) is_datetime64 = is_datetime64_dtype(dtype) is_datetime64tz = is_datetime64tz_dtype(dtype) is_timedelta64 = is_timedelta64_dtype(dtype) if is_datetime64 or is_datetime64tz or is_timedelta64: # Force the dtype if needed. msg = ( f"The '{dtype.name}' dtype has no unit. " f"Please pass in '{dtype.name}[ns]' instead." ) if is_datetime64 and not is_dtype_equal(dtype, DT64NS_DTYPE): # pandas supports dtype whose granularity is less than [ns] # e.g., [ps], [fs], [as] if dtype <= np.dtype("M8[ns]"): if dtype.name == "datetime64": raise ValueError(msg) dtype = DT64NS_DTYPE else: raise TypeError(f"cannot convert datetimelike to dtype [{dtype}]") elif is_datetime64tz: # our NaT doesn't support tz's # this will coerce to DatetimeIndex with # a matching dtype below if is_scalar(value) and isna(value): value = [value] elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE): # pandas supports dtype whose granularity is less than [ns] # e.g., [ps], [fs], [as] if dtype <= np.dtype("m8[ns]"): if dtype.name == "timedelta64": raise ValueError(msg) dtype = TD64NS_DTYPE else: raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]") if is_scalar(value): if value == iNaT or isna(value): value = iNaT else: value = np.array(value, copy=False) # have a scalar array-like (e.g. NaT) if value.ndim == 0: value = iNaT # we have an array of datetime or timedeltas & nulls elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype): try: if is_datetime64: value = to_datetime(value, errors=errors) # GH 25843: Remove tz information since the dtype # didn't specify one if value.tz is not None: value = value.tz_localize(None) value = value._values elif is_datetime64tz: # The string check can be removed once issue #13712 # is solved. String data that is passed with a # datetime64tz is assumed to be naive which should # be localized to the timezone. is_dt_string = is_string_dtype(value) value = to_datetime(value, errors=errors).array if is_dt_string: # Strings here are naive, so directly localize value = value.tz_localize(dtype.tz) else: # Numeric values are UTC at this point, # so localize and convert value = value.tz_localize("UTC").tz_convert(dtype.tz) elif is_timedelta64: value = to_timedelta(value, errors=errors)._values except OutOfBoundsDatetime: raise except (AttributeError, ValueError, TypeError): pass # coerce datetimelike to object elif is_datetime64_dtype( getattr(value, "dtype", None) ) and not is_datetime64_dtype(dtype): if is_object_dtype(dtype): if value.dtype != DT64NS_DTYPE: value = value.astype(DT64NS_DTYPE) ints = np.asarray(value).view("i8") return tslib.ints_to_pydatetime(ints) # we have a non-castable dtype that was passed raise TypeError(f"Cannot cast datetime64 to {dtype}") else: is_array = isinstance(value, np.ndarray) # catch a datetime/timedelta that is not of ns variety # and no coercion specified if is_array and value.dtype.kind in ["M", "m"]: dtype = value.dtype if dtype.kind == "M" and dtype != DT64NS_DTYPE: value = tslibs.conversion.ensure_datetime64ns(value) elif dtype.kind == "m" and dtype != TD64NS_DTYPE: value = to_timedelta(value) # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion elif not ( is_array and not ( issubclass(value.dtype.type, np.integer) or value.dtype == np.object_ ) ): value = maybe_infer_to_datetimelike(value) return value
def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ from pandas.core.tools.timedeltas import to_timedelta from pandas.core.tools.datetimes import to_datetime if dtype is not None: if isinstance(dtype, str): dtype = np.dtype(dtype) is_datetime64 = is_datetime64_dtype(dtype) is_datetime64tz = is_datetime64tz_dtype(dtype) is_timedelta64 = is_timedelta64_dtype(dtype) if is_datetime64 or is_datetime64tz or is_timedelta64: # Force the dtype if needed. msg = ( f"The '{dtype.name}' dtype has no unit. " f"Please pass in '{dtype.name}[ns]' instead." ) if is_datetime64 and not is_dtype_equal(dtype, DT64NS_DTYPE): # pandas supports dtype whose granularity is less than [ns] # e.g., [ps], [fs], [as] if dtype <= np.dtype("M8[ns]"): if dtype.name == "datetime64": raise ValueError(msg) dtype = DT64NS_DTYPE else: raise TypeError(f"cannot convert datetimelike to dtype [{dtype}]") elif is_datetime64tz: # our NaT doesn't support tz's # this will coerce to DatetimeIndex with # a matching dtype below if is_scalar(value) and isna(value): value = [value] elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE): # pandas supports dtype whose granularity is less than [ns] # e.g., [ps], [fs], [as] if dtype <= np.dtype("m8[ns]"): if dtype.name == "timedelta64": raise ValueError(msg) dtype = TD64NS_DTYPE else: raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]") if is_scalar(value): if value == iNaT or isna(value): value = iNaT else: value = np.array(value, copy=False) # have a scalar array-like (e.g. NaT) if value.ndim == 0: value = iNaT # we have an array of datetime or timedeltas & nulls elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype): try: if is_datetime64: value = to_datetime(value, errors=errors) # GH 25843: Remove tz information since the dtype # didn't specify one if value.tz is not None: value = value.tz_localize(None) value = value._values elif is_datetime64tz: # The string check can be removed once issue #13712 # is solved. String data that is passed with a # datetime64tz is assumed to be naive which should # be localized to the timezone. is_dt_string = is_string_dtype(value) value = to_datetime(value, errors=errors).array if is_dt_string: # Strings here are naive, so directly localize value = value.tz_localize(dtype.tz) else: # Numeric values are UTC at this point, # so localize and convert value = value.tz_localize("UTC").tz_convert(dtype.tz) elif is_timedelta64: value = to_timedelta(value, errors=errors)._values except OutOfBoundsDatetime: raise except (AttributeError, ValueError, TypeError): pass # coerce datetimelike to object elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype): if is_object_dtype(dtype): if value.dtype != DT64NS_DTYPE: value = value.astype(DT64NS_DTYPE) ints = np.asarray(value).view("i8") return tslib.ints_to_pydatetime(ints) # we have a non-castable dtype that was passed raise TypeError(f"Cannot cast datetime64 to {dtype}") else: is_array = isinstance(value, np.ndarray) # catch a datetime/timedelta that is not of ns variety # and no coercion specified if is_array and value.dtype.kind in ["M", "m"]: dtype = value.dtype if dtype.kind == "M" and dtype != DT64NS_DTYPE: value = tslibs.conversion.ensure_datetime64ns(value) elif dtype.kind == "m" and dtype != TD64NS_DTYPE: value = to_timedelta(value) # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion elif not ( is_array and not ( issubclass(value.dtype.type, np.integer) or value.dtype == np.object_ ) ): value = maybe_infer_to_datetimelike(value) return value
https://github.com/pandas-dev/pandas/issues/26469
In [8]: pd.Series(None, index=[1, 2, 3], dtype='Int64') --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 691 if is_integer_dtype(dtype): --> 692 subarr = maybe_cast_to_integer_array(arr, dtype) 693 ~/scipy/pandas/pandas/core/dtypes/cast.py in maybe_cast_to_integer_array(arr, dtype, copy) 1311 if not hasattr(arr, "astype"): -> 1312 casted = np.array(arr, dtype=dtype, copy=copy) 1313 else: TypeError: data type not understood During handling of the above exception, another exception occurred: TypeError Traceback (most recent call last) <ipython-input-8-9447295feee6> in <module> ----> 1 pd.Series(None, index=[1, 2, 3], dtype='Int64') ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 202 data = data._data 203 elif isinstance(data, dict): --> 204 data, index = self._init_dict(data, index, dtype) 205 dtype = None 206 copy = False ~/scipy/pandas/pandas/core/series.py in _init_dict(self, data, index, dtype) 295 296 # Input is now list-like, so rely on "standard" construction: --> 297 s = Series(values, index=keys, dtype=dtype) 298 299 # Now we just make sure the order is respected, if any ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 253 else: 254 data = sanitize_array(data, index, dtype, copy, --> 255 raise_cast_failure=True) 256 257 data = SingleBlockManager(data, index, fastpath=True) ~/scipy/pandas/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure) 620 subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) 621 else: --> 622 subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) 623 624 # scalar like, GH ~/scipy/pandas/pandas/core/internals/construction.py in _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure) 711 # create an extension array from its dtype 712 array_type = dtype.construct_array_type()._from_sequence --> 713 subarr = array_type(arr, dtype=dtype, copy=copy) 714 elif dtype is not None and raise_cast_failure: 715 raise ~/scipy/pandas/pandas/core/arrays/integer.py in _from_sequence(cls, scalars, dtype, copy) 305 @classmethod 306 def _from_sequence(cls, scalars, dtype=None, copy=False): --> 307 return integer_array(scalars, dtype=dtype, copy=copy) 308 309 @classmethod ~/scipy/pandas/pandas/core/arrays/integer.py in integer_array(values, dtype, copy) 110 TypeError if incompatible types 111 """ --> 112 values, mask = coerce_to_array(values, dtype=dtype, copy=copy) 113 return IntegerArray(values, mask) 114 ~/scipy/pandas/pandas/core/arrays/integer.py in coerce_to_array(values, dtype, mask, copy) 202 203 if not values.ndim == 1: --> 204 raise TypeError("values must be a 1D list-like") 205 if not mask.ndim == 1: 206 raise TypeError("mask must be a 1D list-like") TypeError: values must be a 1D list-like
TypeError
def _format_strings(self) -> List[str]: if self.float_format is None: float_format = get_option("display.float_format") if float_format is None: precision = get_option("display.precision") float_format = lambda x: f"{x: .{precision:d}g}" else: float_format = self.float_format formatter = ( self.formatter if self.formatter is not None else (lambda x: pprint_thing(x, escape_chars=("\t", "\r", "\n"))) ) def _format(x): if self.na_rep is not None and is_scalar(x) and isna(x): try: # try block for np.isnat specifically # determine na_rep if x is None or NaT-like if x is None: return "None" elif x is NA: return str(NA) elif x is NaT or np.isnat(x): return "NaT" except (TypeError, ValueError): # np.isnat only handles datetime or timedelta objects pass return self.na_rep elif isinstance(x, PandasObject): return str(x) else: # object dtype return str(formatter(x)) vals = extract_array(self.values, extract_numpy=True) is_float_type = ( lib.map_infer(vals, is_float) # vals may have 2 or more dimensions & np.all(notna(vals), axis=tuple(range(1, len(vals.shape)))) ) leading_space = self.leading_space if leading_space is None: leading_space = is_float_type.any() fmt_values = [] for i, v in enumerate(vals): if not is_float_type[i] and leading_space: fmt_values.append(f" {_format(v)}") elif is_float_type[i]: fmt_values.append(float_format(v)) else: if leading_space is False: # False specifically, so that the default is # to include a space if we get here. tpl = "{v}" else: tpl = " {v}" fmt_values.append(tpl.format(v=_format(v))) return fmt_values
def _format_strings(self) -> List[str]: if self.float_format is None: float_format = get_option("display.float_format") if float_format is None: precision = get_option("display.precision") float_format = lambda x: f"{x: .{precision:d}g}" else: float_format = self.float_format formatter = ( self.formatter if self.formatter is not None else (lambda x: pprint_thing(x, escape_chars=("\t", "\r", "\n"))) ) def _format(x): if self.na_rep is not None and is_scalar(x) and isna(x): try: # try block for np.isnat specifically # determine na_rep if x is None or NaT-like if x is None: return "None" elif x is NA: return str(NA) elif x is NaT or np.isnat(x): return "NaT" except (TypeError, ValueError): # np.isnat only handles datetime or timedelta objects pass return self.na_rep elif isinstance(x, PandasObject): return str(x) else: # object dtype return str(formatter(x)) vals = extract_array(self.values, extract_numpy=True) is_float_type = lib.map_infer(vals, is_float) & notna(vals) leading_space = self.leading_space if leading_space is None: leading_space = is_float_type.any() fmt_values = [] for i, v in enumerate(vals): if not is_float_type[i] and leading_space: fmt_values.append(f" {_format(v)}") elif is_float_type[i]: fmt_values.append(float_format(v)) else: if leading_space is False: # False specifically, so that the default is # to include a space if we get here. tpl = "{v}" else: tpl = " {v}" fmt_values.append(tpl.format(v=_format(v))) return fmt_values
https://github.com/pandas-dev/pandas/issues/33770
import pandas as pd import numpy as np import text_extensions_for_pandas as tp # Integers work int_tensors = np.array([[1, 2], [3, 4]]) int_tensor_series = pd.Series(tp.TensorArray(int_tensors)) int_tensor_series 0 [1 2] 1 [3 4] dtype: TensorType # Boolean values don't work bool_tensors = np.array([[True, False], [False, True]]) bool_tensor_series = pd.Series(tp.TensorArray(bool_tensors)) bool_tensor_series --------------------------------------------------------------------------- ValueError Traceback (most recent call last) ~/pd/covid-notebooks/env/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj) 700 type_pprinters=self.type_printers, 701 deferred_pprinters=self.deferred_printers) --> 702 printer.pretty(obj) 703 printer.flush() 704 return stream.getvalue() [...many lines of stack trace...] ~/pd/covid-notebooks/env/lib/python3.7/site-packages/pandas/io/formats/format.py in _format_strings(self) 1255 fmt_values = [] 1256 for i, v in enumerate(vals): -> 1257 if not is_float_type[i] and leading_space: 1258 fmt_values.append(" {v}".format(v=_format(v))) 1259 elif is_float_type[i]: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
ValueError
def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: if (levels is None and isinstance(keys[0], tuple)) or ( levels is not None and len(levels) > 1 ): zipped = list(zip(*keys)) if names is None: names = [None] * len(zipped) if levels is None: _, levels = factorize_from_iterables(zipped) else: levels = [ensure_index(x) for x in levels] else: zipped = [keys] if names is None: names = [None] if levels is None: levels = [ensure_index(keys)] else: levels = [ensure_index(x) for x in levels] if not all_indexes_same(indexes): codes_list = [] # things are potentially different sizes, so compute the exact codes # for each level and pass those to MultiIndex.from_arrays for hlevel, level in zip(zipped, levels): to_concat = [] for key, index in zip(hlevel, indexes): mask = level == key if not mask.any(): raise ValueError(f"Key {key} not in level {level}") i = np.nonzero(level == key)[0][0] to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) concat_index = _concat_indexes(indexes) # these go at the end if isinstance(concat_index, MultiIndex): levels.extend(concat_index.levels) codes_list.extend(concat_index.codes) else: codes, categories = factorize_from_iterable(concat_index) levels.append(categories) codes_list.append(codes) if len(names) == len(levels): names = list(names) else: # make sure that all of the passed indices have the same nlevels if not len({idx.nlevels for idx in indexes}) == 1: raise AssertionError( "Cannot concat indices that do not have the same number of levels" ) # also copies names = names + get_consensus_names(indexes) return MultiIndex( levels=levels, codes=codes_list, names=names, verify_integrity=False ) new_index = indexes[0] n = len(new_index) kpieces = len(indexes) # also copies new_names = list(names) new_levels = list(levels) # construct codes new_codes = [] # do something a bit more speedy for hlevel, level in zip(zipped, levels): hlevel = ensure_index(hlevel) mapped = level.get_indexer(hlevel) mask = mapped == -1 if mask.any(): raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}") new_codes.append(np.repeat(mapped, n)) if isinstance(new_index, MultiIndex): new_levels.extend(new_index.levels) new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes]) else: new_levels.append(new_index) new_codes.append(np.tile(np.arange(n), kpieces)) if len(new_names) < len(new_levels): new_names.extend(new_index.names) return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False )
def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: if (levels is None and isinstance(keys[0], tuple)) or ( levels is not None and len(levels) > 1 ): zipped = list(zip(*keys)) if names is None: names = [None] * len(zipped) if levels is None: _, levels = factorize_from_iterables(zipped) else: levels = [ensure_index(x) for x in levels] else: zipped = [keys] if names is None: names = [None] if levels is None: levels = [ensure_index(keys)] else: levels = [ensure_index(x) for x in levels] if not all_indexes_same(indexes): codes_list = [] # things are potentially different sizes, so compute the exact codes # for each level and pass those to MultiIndex.from_arrays for hlevel, level in zip(zipped, levels): to_concat = [] for key, index in zip(hlevel, indexes): try: i = level.get_loc(key) except KeyError as err: raise ValueError(f"Key {key} not in level {level}") from err to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) concat_index = _concat_indexes(indexes) # these go at the end if isinstance(concat_index, MultiIndex): levels.extend(concat_index.levels) codes_list.extend(concat_index.codes) else: codes, categories = factorize_from_iterable(concat_index) levels.append(categories) codes_list.append(codes) if len(names) == len(levels): names = list(names) else: # make sure that all of the passed indices have the same nlevels if not len({idx.nlevels for idx in indexes}) == 1: raise AssertionError( "Cannot concat indices that do not have the same number of levels" ) # also copies names = names + get_consensus_names(indexes) return MultiIndex( levels=levels, codes=codes_list, names=names, verify_integrity=False ) new_index = indexes[0] n = len(new_index) kpieces = len(indexes) # also copies new_names = list(names) new_levels = list(levels) # construct codes new_codes = [] # do something a bit more speedy for hlevel, level in zip(zipped, levels): hlevel = ensure_index(hlevel) mapped = level.get_indexer(hlevel) mask = mapped == -1 if mask.any(): raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}") new_codes.append(np.repeat(mapped, n)) if isinstance(new_index, MultiIndex): new_levels.extend(new_index.levels) new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes]) else: new_levels.append(new_index) new_codes.append(np.tile(np.arange(n), kpieces)) if len(new_names) < len(new_levels): new_names.extend(new_index.names) return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False )
https://github.com/pandas-dev/pandas/issues/33654
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-2-f6a5f4790f76> in <module> 3 s1 = pd.Series([1,2,3], name='a') 4 s2 = pd.Series([1,2,3], name='a') ----> 5 pd.concat([df, s1, s2], axis=1, keys=['a', 'b', 'b']) ~/pandas-dev/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 269 ValueError: Indexes have overlapping values: ['a'] 270 """ --> 271 op = _Concatenator( 272 objs, 273 axis=axis, ~/pandas-dev/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 449 self.copy = copy 450 --> 451 self.new_axes = self._get_new_axes() 452 453 def get_result(self): ~/pandas-dev/pandas/core/reshape/concat.py in _get_new_axes(self) 512 def _get_new_axes(self) -> List[Index]: 513 ndim = self._get_result_dim() --> 514 return [ 515 self._get_concat_axis() if i == self.bm_axis else self._get_comb_axis(i) 516 for i in range(ndim) ~/pandas-dev/pandas/core/reshape/concat.py in <listcomp>(.0) 513 ndim = self._get_result_dim() 514 return [ --> 515 self._get_concat_axis() if i == self.bm_axis else self._get_comb_axis(i) 516 for i in range(ndim) 517 ] ~/pandas-dev/pandas/core/reshape/concat.py in _get_concat_axis(self) 569 concat_axis = _concat_indexes(indexes) 570 else: --> 571 concat_axis = _make_concat_multiindex( 572 indexes, self.keys, self.levels, self.names 573 ) ~/pandas-dev/pandas/core/reshape/concat.py in _make_concat_multiindex(indexes, keys, levels, names) 651 names = names + get_consensus_names(indexes) 652 --> 653 return MultiIndex( 654 levels=levels, codes=codes_list, names=names, verify_integrity=False 655 ) ~/pandas-dev/pandas/core/indexes/multi.py in __new__(cls, levels, codes, sortorder, names, dtype, copy, name, verify_integrity, _set_identity) 281 # we've already validated levels and codes, so shortcut here 282 result._set_levels(levels, copy=copy, validate=False) --> 283 result._set_codes(codes, copy=copy, validate=False) 284 285 result._names = [None] * len(levels) ~/pandas-dev/pandas/core/indexes/multi.py in _set_codes(self, codes, level, copy, validate, verify_integrity) 880 881 if level is None: --> 882 new_codes = FrozenList( 883 _coerce_indexer_frozen(level_codes, lev, copy=copy).view() 884 for lev, level_codes in zip(self._levels, codes) ~/pandas-dev/pandas/core/indexes/multi.py in <genexpr>(.0) 881 if level is None: 882 new_codes = FrozenList( --> 883 _coerce_indexer_frozen(level_codes, lev, copy=copy).view() 884 for lev, level_codes in zip(self._levels, codes) 885 ) ~/pandas-dev/pandas/core/indexes/multi.py in _coerce_indexer_frozen(array_like, categories, copy) 3681 Non-writeable. 3682 """ -> 3683 array_like = coerce_indexer_dtype(array_like, categories) 3684 if copy: 3685 array_like = array_like.copy() ~/pandas-dev/pandas/core/dtypes/cast.py in coerce_indexer_dtype(indexer, categories) 866 length = len(categories) 867 if length < _int8_max: --> 868 return ensure_int8(indexer) 869 elif length < _int16_max: 870 return ensure_int16(indexer) ~/pandas-dev/pandas/_libs/algos_common_helper.pxi in pandas._libs.algos.ensure_int8() 59 return arr 60 else: ---> 61 return arr.astype(np.int8, copy=copy) 62 else: 63 return np.array(arr, dtype=np.int8) TypeError: int() argument must be a string, a bytes-like object or a number, not 'slice'
TypeError
def __init__( self, obj, path_or_buf: Optional[FilePathOrBuffer[str]] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, cols=None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, mode: str = "w", encoding: Optional[str] = None, compression: Union[str, Mapping[str, str], None] = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, quotechar='"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, decimal=".", ): self.obj = obj if path_or_buf is None: path_or_buf = StringIO() # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode ) self.sep = sep self.na_rep = na_rep self.float_format = float_format self.decimal = decimal self.header = header self.index = index self.index_label = index_label self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL self.quoting = quoting if quoting == csvlib.QUOTE_NONE: # prevents crash in _csv quotechar = None self.quotechar = quotechar self.doublequote = doublequote self.escapechar = escapechar self.line_terminator = line_terminator or os.linesep self.date_format = date_format self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) # validate mi options if self.has_mi_columns: if cols is not None: raise TypeError("cannot specify cols with a MultiIndex on the columns") if cols is not None: if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) # save it self.cols = cols # preallocate data 2d list ncols = self.obj.shape[-1] self.data = [None] * ncols if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 self.chunksize = int(chunksize) self.data_index = obj.index if ( isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None ): from pandas import Index self.data_index = Index( [x.strftime(date_format) if notna(x) else "" for x in self.data_index] ) self.nlevels = getattr(self.data_index, "nlevels", 1) if not index: self.nlevels = 0
def __init__( self, obj, path_or_buf: Optional[FilePathOrBuffer[str]] = None, sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, cols=None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, mode: str = "w", encoding: Optional[str] = None, compression: Union[str, Mapping[str, str], None] = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, quotechar='"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, decimal=".", ): self.obj = obj if path_or_buf is None: path_or_buf = StringIO() # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, _ = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode ) self.sep = sep self.na_rep = na_rep self.float_format = float_format self.decimal = decimal self.header = header self.index = index self.index_label = index_label self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL self.quoting = quoting if quoting == csvlib.QUOTE_NONE: # prevents crash in _csv quotechar = None self.quotechar = quotechar self.doublequote = doublequote self.escapechar = escapechar self.line_terminator = line_terminator or os.linesep self.date_format = date_format self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) # validate mi options if self.has_mi_columns: if cols is not None: raise TypeError("cannot specify cols with a MultiIndex on the columns") if cols is not None: if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): cols = cols.to_native_types( na_rep=na_rep, float_format=float_format, date_format=date_format, quoting=self.quoting, ) else: cols = list(cols) # save it self.cols = cols # preallocate data 2d list ncols = self.obj.shape[-1] self.data = [None] * ncols if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 self.chunksize = int(chunksize) self.data_index = obj.index if ( isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None ): from pandas import Index self.data_index = Index( [x.strftime(date_format) if notna(x) else "" for x in self.data_index] ) self.nlevels = getattr(self.data_index, "nlevels", 1) if not index: self.nlevels = 0
https://github.com/pandas-dev/pandas/issues/27679
Exception ignored in: <function AbstractBufferedFile.__del__ at 0x7fe0ae8db440> Traceback (most recent call last): File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1137, in __del__ self.close() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1114, in close self.flush(force=True) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 986, in flush self._initiate_upload() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 951, in _initiate_upload Bucket=self.bucket, Key=self.key, ACL=self.acl) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 939, in _call_s3 **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 182, in _call_s3 return method(**additional_kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 648, in _make_api_call operation_model, request_dict, request_context) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 667, in _make_request return self._endpoint.make_request(operation_model, request_dict) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 102, in make_request return self._send_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 132, in _send_request request = self.create_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 116, in create_request operation_name=operation_model.name) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 356, in emit return self._emitter.emit(aliased_event_name, **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 228, in emit return self._emit(event_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 211, in _emit response = handler(**kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 90, in handler return self.sign(operation_name, request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 157, in sign auth.add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 425, in add_auth super(S3SigV4Auth, self).add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 357, in add_auth raise NoCredentialsError botocore.exceptions.NoCredentialsError: Unable to locate credentials
botocore.exceptions.NoCredentialsError
def save(self) -> None: """ Create the writer & save. """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, "write"): warnings.warn( "compression has no effect when passing file-like object as input.", RuntimeWarning, stacklevel=2, ) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, "write") and self.compression == "zip" ) if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, "write"): f = self.path_or_buf close = False else: f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=dict(self.compression_args, method=self.compression), ) close = True try: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( f, lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, ) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, "write"): self.path_or_buf.write(buf) else: compression = dict(self.compression_args, method=self.compression) f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=compression, ) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close() elif self.should_close: f.close()
def save(self) -> None: """ Create the writer & save. """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, "write"): warnings.warn( "compression has no effect when passing file-like object as input.", RuntimeWarning, stacklevel=2, ) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, "write") and self.compression == "zip" ) if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, "write"): f = self.path_or_buf close = False else: f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=dict(self.compression_args, method=self.compression), ) close = True try: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( f, lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, ) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, "write"): self.path_or_buf.write(buf) else: compression = dict(self.compression_args, method=self.compression) f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, compression=compression, ) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close()
https://github.com/pandas-dev/pandas/issues/27679
Exception ignored in: <function AbstractBufferedFile.__del__ at 0x7fe0ae8db440> Traceback (most recent call last): File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1137, in __del__ self.close() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1114, in close self.flush(force=True) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 986, in flush self._initiate_upload() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 951, in _initiate_upload Bucket=self.bucket, Key=self.key, ACL=self.acl) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 939, in _call_s3 **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 182, in _call_s3 return method(**additional_kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 648, in _make_api_call operation_model, request_dict, request_context) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 667, in _make_request return self._endpoint.make_request(operation_model, request_dict) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 102, in make_request return self._send_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 132, in _send_request request = self.create_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 116, in create_request operation_name=operation_model.name) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 356, in emit return self._emitter.emit(aliased_event_name, **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 228, in emit return self._emit(event_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 211, in _emit response = handler(**kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 90, in handler return self.sign(operation_name, request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 157, in sign auth.add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 425, in add_auth super(S3SigV4Auth, self).add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 357, in add_auth raise NoCredentialsError botocore.exceptions.NoCredentialsError: Unable to locate credentials
botocore.exceptions.NoCredentialsError
def write( self, df: DataFrame, path, compression="snappy", index: Optional[bool] = None, partition_cols=None, **kwargs, ): self.validate_dataframe(df) path, _, _, should_close = get_filepath_or_buffer(path, mode="wb") from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)} if index is not None: from_pandas_kwargs["preserve_index"] = index table = self.api.Table.from_pandas(df, **from_pandas_kwargs) if partition_cols is not None: self.api.parquet.write_to_dataset( table, path, compression=compression, partition_cols=partition_cols, **kwargs, ) else: self.api.parquet.write_table(table, path, compression=compression, **kwargs) if should_close: path.close()
def write( self, df: DataFrame, path, compression="snappy", index: Optional[bool] = None, partition_cols=None, **kwargs, ): self.validate_dataframe(df) path, _, _, _ = get_filepath_or_buffer(path, mode="wb") from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)} if index is not None: from_pandas_kwargs["preserve_index"] = index table = self.api.Table.from_pandas(df, **from_pandas_kwargs) if partition_cols is not None: self.api.parquet.write_to_dataset( table, path, compression=compression, partition_cols=partition_cols, **kwargs, ) else: self.api.parquet.write_table(table, path, compression=compression, **kwargs)
https://github.com/pandas-dev/pandas/issues/27679
Exception ignored in: <function AbstractBufferedFile.__del__ at 0x7fe0ae8db440> Traceback (most recent call last): File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1137, in __del__ self.close() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 1114, in close self.flush(force=True) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/fsspec/spec.py", line 986, in flush self._initiate_upload() File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 951, in _initiate_upload Bucket=self.bucket, Key=self.key, ACL=self.acl) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 939, in _call_s3 **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/s3fs/core.py", line 182, in _call_s3 return method(**additional_kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call return self._make_api_call(operation_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 648, in _make_api_call operation_model, request_dict, request_context) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/client.py", line 667, in _make_request return self._endpoint.make_request(operation_model, request_dict) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 102, in make_request return self._send_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 132, in _send_request request = self.create_request(request_dict, operation_model) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/endpoint.py", line 116, in create_request operation_name=operation_model.name) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 356, in emit return self._emitter.emit(aliased_event_name, **kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 228, in emit return self._emit(event_name, kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/hooks.py", line 211, in _emit response = handler(**kwargs) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 90, in handler return self.sign(operation_name, request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/signers.py", line 157, in sign auth.add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 425, in add_auth super(S3SigV4Auth, self).add_auth(request) File "/home/languitar/.pyenv/versions/analytics-3.7/lib/python3.7/site-packages/botocore/auth.py", line 357, in add_auth raise NoCredentialsError botocore.exceptions.NoCredentialsError: Unable to locate credentials
botocore.exceptions.NoCredentialsError
def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determin if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional(): return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key]
def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determin if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional(): return self.loc[key] else: return self.iloc[key] if isinstance(key, list): # handle the dup indexing case GH#4246 return self.loc[key] return self.reindex(key)
https://github.com/pandas-dev/pandas/issues/33642
ser = pd.Series(["A", "B"]) key = pd.Series(["C"]) ser[key] C NaN dtype: object ser[pd.Index(key)] C NaN dtype: object ser[np.array(key)] C NaN dtype: object ser[list(key)] Traceback (most recent call last): [...] File "/Users/bmendel/Desktop/pd/pandas/pandas/core/indexing.py", line 1312, in _validate_read_indexer raise KeyError(f"None of [{key}] are in the [{axis_name}]") KeyError: "None of [Index(['C'], dtype='object')] are in the [index]"
KeyError
def __getitem__(self, key): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (getting)!") return self.obj.loc[key] return super().__getitem__(key)
def __getitem__(self, key): if self.ndim != 1 or not is_scalar(key): # FIXME: is_scalar check is a kludge return super().__getitem__(key) # Like Index.get_value, but we do not allow positional fallback obj = self.obj loc = obj.index.get_loc(key) return obj.index._get_values_for_loc(obj, loc, key)
https://github.com/pandas-dev/pandas/issues/33041
arr = np.random.randn(6).reshape(3, 2) df = pd.DataFrame(arr, columns=["A", "A"]) df.at[0, "A"] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pandas/core/indexing.py", line 2069, in __getitem__ return super().__getitem__(key) File "pandas/core/indexing.py", line 2034, in __getitem__ return self.obj._get_value(*key, takeable=self._takeable) File "pandas/core/frame.py", line 2706, in _get_value series = self._get_item_cache(col) File "pandas/core/generic.py", line 3550, in _get_item_cache res = self._box_item_values(item, values) File "pandas/core/frame.py", line 2880, in _box_item_values return self._constructor(values.T, columns=items, index=self.index) AttributeError: 'BlockManager' object has no attribute 'T'
AttributeError
def __setitem__(self, key, value): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (setting)!") self.obj.loc[key] = value return return super().__setitem__(key, value)
def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) if len(key) != self.ndim: raise ValueError("Not enough indexers for scalar access (setting)!") key = list(self._convert_key(key, is_setter=True)) self.obj._set_value(*key, value=value, takeable=self._takeable)
https://github.com/pandas-dev/pandas/issues/33041
arr = np.random.randn(6).reshape(3, 2) df = pd.DataFrame(arr, columns=["A", "A"]) df.at[0, "A"] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pandas/core/indexing.py", line 2069, in __getitem__ return super().__getitem__(key) File "pandas/core/indexing.py", line 2034, in __getitem__ return self.obj._get_value(*key, takeable=self._takeable) File "pandas/core/frame.py", line 2706, in _get_value series = self._get_item_cache(col) File "pandas/core/generic.py", line 3550, in _get_item_cache res = self._box_item_values(item, values) File "pandas/core/frame.py", line 2880, in _box_item_values return self._constructor(values.T, columns=items, index=self.index) AttributeError: 'BlockManager' object has no attribute 'T'
AttributeError
def quantile(self, q=0.5, interpolation: str = "linear"): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals.dtype): if is_extension_array_dtype(vals.dtype): vals = vals.to_numpy(dtype=float, na_value=np.nan) inference = np.int64 elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype): vals = vals.to_numpy(dtype=float, na_value=np.nan) elif is_datetime64_dtype(vals.dtype): inference = "datetime64[ns]" vals = np.asarray(vals).astype(np.float) return vals, inference def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: if inference: # Check for edge case if not ( is_integer_dtype(inference) and interpolation in {"linear", "midpoint"} ): vals = vals.astype(inference) return vals if is_scalar(q): return self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation, ) else: results = [ self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=qi, interpolation=interpolation, ) for qi in q ] result = concat(results, axis=0, keys=q) # fix levels to place quantiles on the inside # TODO(GH-10710): Ideally, we could write this as # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] # but this hits https://github.com/pandas-dev/pandas/issues/10710 # which doesn't reorder the list-like `q` on the inner level. order = list(range(1, result.index.nlevels)) + [0] # temporarily saves the index names index_names = np.array(result.index.names) # set index names to positions to avoid confusion result.index.names = np.arange(len(index_names)) # place quantiles on the inside result = result.reorder_levels(order) # restore the index names in order result.index.names = index_names[order] # reorder rows to keep things sorted indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten() return result.take(indices)
def quantile(self, q=0.5, interpolation: str = "linear"): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = "datetime64[ns]" vals = np.asarray(vals).astype(np.float) return vals, inference def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: if inference: # Check for edge case if not ( is_integer_dtype(inference) and interpolation in {"linear", "midpoint"} ): vals = vals.astype(inference) return vals if is_scalar(q): return self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation, ) else: results = [ self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=qi, interpolation=interpolation, ) for qi in q ] result = concat(results, axis=0, keys=q) # fix levels to place quantiles on the inside # TODO(GH-10710): Ideally, we could write this as # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] # but this hits https://github.com/pandas-dev/pandas/issues/10710 # which doesn't reorder the list-like `q` on the inner level. order = list(range(1, result.index.nlevels)) + [0] # temporarily saves the index names index_names = np.array(result.index.names) # set index names to positions to avoid confusion result.index.names = np.arange(len(index_names)) # place quantiles on the inside result = result.reorder_levels(order) # restore the index names in order result.index.names = index_names[order] # reorder rows to keep things sorted indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten() return result.take(indices)
https://github.com/pandas-dev/pandas/issues/33136
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-ef27f953b945> in <module> 4 {"a": ["x", "x", "y", "y"], "b": pd.array([1, 2, 3, 4], dtype="Int64")} 5 ) ----> 6 df.groupby("a")["b"].quantile(0.5) ~/opt/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in quantile(self, q, interpolation) 1911 post_processing=post_processor, 1912 q=q, -> 1913 interpolation=interpolation, 1914 ) 1915 else: ~/opt/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _get_cythonized_result(self, how, cython_dtype, aggregate, needs_values, needs_mask, needs_ngroups, result_is_index, pre_processing, post_processing, **kwargs) 2289 func = partial(func, ngroups) 2290 -> 2291 func(**kwargs) # Call func to modify indexer values in place 2292 2293 if result_is_index: pandas/_libs/groupby.pyx in pandas._libs.groupby.__pyx_fused_cpdef() TypeError: No matching signature found
TypeError
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals.dtype): if is_extension_array_dtype(vals.dtype): vals = vals.to_numpy(dtype=float, na_value=np.nan) inference = np.int64 elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype): vals = vals.to_numpy(dtype=float, na_value=np.nan) elif is_datetime64_dtype(vals.dtype): inference = "datetime64[ns]" vals = np.asarray(vals).astype(np.float) return vals, inference
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = "datetime64[ns]" vals = np.asarray(vals).astype(np.float) return vals, inference
https://github.com/pandas-dev/pandas/issues/33136
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-ef27f953b945> in <module> 4 {"a": ["x", "x", "y", "y"], "b": pd.array([1, 2, 3, 4], dtype="Int64")} 5 ) ----> 6 df.groupby("a")["b"].quantile(0.5) ~/opt/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in quantile(self, q, interpolation) 1911 post_processing=post_processor, 1912 q=q, -> 1913 interpolation=interpolation, 1914 ) 1915 else: ~/opt/miniconda3/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _get_cythonized_result(self, how, cython_dtype, aggregate, needs_values, needs_mask, needs_ngroups, result_is_index, pre_processing, post_processing, **kwargs) 2289 func = partial(func, ngroups) 2290 -> 2291 func(**kwargs) # Call func to modify indexer values in place 2292 2293 if result_is_index: pandas/_libs/groupby.pyx in pandas._libs.groupby.__pyx_fused_cpdef() TypeError: No matching signature found
TypeError
def __init__( self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None ): self._encoding = encoding self._lines_read = 0 self._index = index self._chunksize = chunksize if isinstance(filepath_or_buffer, str): ( filepath_or_buffer, encoding, compression, should_close, ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding) if isinstance(filepath_or_buffer, (str, bytes)): self.filepath_or_buffer = open(filepath_or_buffer, "rb") else: # Since xport files include non-text byte sequences, xport files # should already be opened in binary mode in Python 3. self.filepath_or_buffer = filepath_or_buffer self._read_header()
def __init__( self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None ): self._encoding = encoding self._lines_read = 0 self._index = index self._chunksize = chunksize if isinstance(filepath_or_buffer, str): ( filepath_or_buffer, encoding, compression, should_close, ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding) if isinstance(filepath_or_buffer, (str, bytes)): self.filepath_or_buffer = open(filepath_or_buffer, "rb") else: # Copy to BytesIO, and ensure no encoding contents = filepath_or_buffer.read() try: contents = contents.encode(self._encoding) except UnicodeEncodeError: pass self.filepath_or_buffer = BytesIO(contents) self._read_header()
https://github.com/pandas-dev/pandas/issues/33069
Traceback (most recent call last): File "after.py", line 15, in <module> df = pd.read_sas(f, format="xport") File "/Users/swast/miniconda3/envs/scratch/lib/python3.7/site-packages/pandas/io/sas/sasreader.py", line 70, in read_sas filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize File "/Users/swast/miniconda3/envs/scratch/lib/python3.7/site-packages/pandas/io/sas/sas_xport.py", line 280, in __init__ contents = contents.encode(self._encoding) AttributeError: 'bytes' object has no attribute 'encode' (scratch)
AttributeError
def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, ABCMultiIndex): raise TypeError( f"Can only count levels on hierarchical {self._get_axis_name(axis)}." ) # Mask NaNs: Mask rows or columns where the index level is NaN, and all # values in the DataFrame that are NaN if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object values_mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes values_mask = notna(frame.values) index_mask = notna(count_axis.get_level_values(level=level)) if axis == 1: mask = index_mask & values_mask else: mask = index_mask.reshape(-1, 1) & values_mask if isinstance(level, str): level = count_axis._get_level_number(level) level_name = count_axis._names[level] level_index = count_axis.levels[level]._shallow_copy(name=level_name) level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis) if axis == 1: result = DataFrame(counts, index=agg_axis, columns=level_index) else: result = DataFrame(counts, index=level_index, columns=agg_axis) return result
def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, ABCMultiIndex): raise TypeError( f"Can only count levels on hierarchical {self._get_axis_name(axis)}." ) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notna(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, str): level = count_axis._get_level_number(level) level_name = count_axis._names[level] level_index = count_axis.levels[level]._shallow_copy(name=level_name) level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result
https://github.com/pandas-dev/pandas/issues/32841
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/pandas/pandas/core/groupby/generic.py", line 1784, in count blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)] File "/pandas/pandas/core/groupby/generic.py", line 1784, in <listcomp> blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)] File "/pandas/pandas/core/groupby/generic.py", line 1782, in <genexpr> lib.count_level_2d(x, labels=ids, max_bin=ngroups, axis=1) for x in vals File "pandas/_libs/lib.pyx", line 803, in pandas._libs.lib.count_level_2d counts[i, labels[j]] += mask[i, j] IndexError: Out of bounds on buffer access (axis 1)
IndexError
def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) is_positional = is_index_slice and not (self.is_integer() or self.is_categorical()) if kind == "getitem": """ called from the getitem slicers, validate that we are in fact integers """ if self.is_integer() or is_index_slice: self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional and self.is_mixed(): try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 warnings.warn( "Slicing a positional slice with .loc is not supported, " "and will raise TypeError in a future version. " "Use .loc with labels or .iloc with positions instead.", FutureWarning, stacklevel=6, ) indexer = key else: indexer = self.slice_indexer(start, stop, step, kind=kind) return indexer
def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) is_positional = is_index_slice and not (self.is_integer() or self.is_categorical()) if kind == "getitem": """ called from the getitem slicers, validate that we are in fact integers """ if self.is_integer() or is_index_slice: self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional and self.is_mixed(): try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): indexer = key elif is_positional: indexer = key else: indexer = self.slice_indexer(start, stop, step, kind=kind) return indexer
https://github.com/pandas-dev/pandas/issues/16121
In [2]: df = pd.DataFrame(-1, index=['i', 'ii', 'iii'], columns=pd.MultiIndex.from_tuples([['A', 'a'], ['B', 'b']])) In [3]: df.loc[1:, 'A'] = '' In [4]: df Out[4]: A B a b i -1 -1 ii -1 iii -1 In [5]: df.loc[1:, 'A'] --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-98b6153a5b09> in <module>() ----> 1 df.loc[1:, 'A'] /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in __getitem__(self, key) 1322 except (KeyError, IndexError): 948 949 # we maybe be using a tuple to represent multiple dimensions here /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_nested_tuple(self, tup) 1020 1021 current_ndim = obj.ndim -> 1022 obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) 1023 axis += 1 1024 /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis) 1503 if isinstance(key, slice): 1504 self._has_valid_type(key, axis) -> 1505 return self._get_slice_axis(key, axis=axis) 1506 elif is_bool_indexer(key): 1507 return self._getbool_axis(key, axis=axis) /home/pietro/nobackup/repo/pandas/pandas/core/indexing.py in _get_slice_axis(self, slice_obj, axis) 1353 labels = obj._get_axis(axis) 1354 indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, -> 1355 slice_obj.step, kind=self.name) 1356 1357 if isinstance(indexer, slice): /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in slice_indexer(self, start, end, step, kind) 3247 """ 3248 start_slice, end_slice = self.slice_locs(start, end, step=step, -> 3249 kind=kind) 3250 3251 # return a slice /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in slice_locs(self, start, end, step, kind) 3428 start_slice = None 3429 if start is not None: -> 3430 start_slice = self.get_slice_bound(start, 'left', kind) 3431 if start_slice is None: 3432 start_slice = 0 /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in get_slice_bound(self, label, side, kind) 3367 # For datetime indices label may be a string that has to be converted 3368 # to datetime boundary according to its resolution. -> 3369 label = self._maybe_cast_slice_bound(label, side, kind) 3370 3371 # we need to look up the label /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in _maybe_cast_slice_bound(self, label, side, kind) 3325 # this is rejected (generally .loc gets you here) 3326 elif is_integer(label): -> 3327 self._invalid_indexer('slice', label) 3328 3329 return label /home/pietro/nobackup/repo/pandas/pandas/indexes/base.py in _invalid_indexer(self, form, key) 1447 "indexers [{key}] of {kind}".format( 1448 form=form, klass=type(self), key=key, -> 1449 kind=type(key))) 1450 1451 def get_duplicates(self): TypeError: cannot do slice indexing on <class 'pandas.indexes.base.Index'> with these indexers [1] of <class 'int'>
TypeError
def _make_reader(self, f): sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError( "Custom line terminators not supported in python parser (yet)" ) class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = "\n" dia = MyDialect if sep is not None: dia.delimiter = sep else: # attempt to sniff the delimiter from the first valid line, # i.e. no comment line and not in skiprows line = f.readline() lines = self._check_comments([[line]])[0] while self.skipfunc(self.pos) or not lines: self.pos += 1 line = f.readline() lines = self._check_comments([[line]])[0] # since `line` was a string, lines will be a list containing # only a single string line = lines[0] self.pos += 1 self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter # Note: self.encoding is irrelevant here line_rdr = csv.reader(StringIO(line), dialect=dia) self.buf.extend(list(line_rdr)) # Note: self.encoding is irrelevant here reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = f.readline() pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() self.data = reader
def _make_reader(self, f): sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError( "Custom line terminators not supported in python parser (yet)" ) class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = "\n" dia = MyDialect sniff_sep = True if sep is not None: sniff_sep = False dia.delimiter = sep # attempt to sniff the delimiter if sniff_sep: line = f.readline() while self.skipfunc(self.pos): self.pos += 1 line = f.readline() line = self._check_comments([line])[0] self.pos += 1 self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter # Note: self.encoding is irrelevant here line_rdr = csv.reader(StringIO(line), dialect=dia) self.buf.extend(list(line_rdr)) # Note: self.encoding is irrelevant here reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = f.readline() pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() self.data = reader
https://github.com/pandas-dev/pandas/issues/31396
TypeError Traceback (most recent call last) <ipython-input-17-c89b3c3e691f> in <module> ----> 1 pd.read_csv('data.csv', sep=None, comment='#') ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfoote r, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirs t, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encodin g, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 683 ) 684 --> 685 return _read(filepath_or_buffer, kwds) 686 687 parser_f.__name__ = name ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 455 456 # Create the parser. --> 457 parser = TextFileReader(fp_or_buf, **kwds) 458 459 if chunksize or iterator: ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in __init__(self, f, engine, **kwds) 893 self.options["has_index_names"] = kwds["has_index_names"] 894 --> 895 self._make_engine(self.engine) 896 897 def close(self): ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in _make_engine(self, engine) 1145 ' "python-fwf")'.format(engine=engine) 1146 ) -> 1147 self._engine = klass(self.f, **self.options) 1148 1149 def _failover_to_python(self): ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in __init__(self, f, **kwds) 2297 # Set self.data to something that can read lines. 2298 if hasattr(f, "readline"): -> 2299 self._make_reader(f) 2300 else: 2301 self.data = f ~/.local/share/virtualenvs/openqlab/lib/python3.8/site-packages/pandas/io/parsers.py in _make_reader(self, f) 2427 self.pos += 1 2428 self.line_pos += 1 -> 2429 sniffed = csv.Sniffer().sniff(line) 2430 dia.delimiter = sniffed.delimiter 2431 if self.encoding is not None: /usr/lib64/python3.8/csv.py in sniff(self, sample, delimiters) 179 180 quotechar, doublequote, delimiter, skipinitialspace = \ --> 181 self._guess_quote_and_delimiter(sample, delimiters) 182 if not delimiter: 183 delimiter, skipinitialspace = self._guess_delimiter(sample, /usr/lib64/python3.8/csv.py in _guess_quote_and_delimiter(self, data, delimiters) 220 r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) 221 regexp = re.compile(restr, re.DOTALL | re.MULTILINE) --> 222 matches = regexp.findall(data) 223 if matches: 224 break TypeError: expected string or bytes-like object
TypeError
def __setitem__(self, key, value) -> None: value = extract_array(value, extract_numpy=True) key = check_array_indexer(self, key) scalar_value = lib.is_scalar(value) if not scalar_value: value = np.asarray(value, dtype=self._ndarray.dtype) self._ndarray[key] = value
def __setitem__(self, key, value) -> None: value = extract_array(value, extract_numpy=True) key = check_array_indexer(self, key) scalar_key = lib.is_scalar(key) scalar_value = lib.is_scalar(value) if not scalar_key and scalar_value: key = np.asarray(key) if not scalar_value: value = np.asarray(value, dtype=self._ndarray.dtype) self._ndarray[key] = value
https://github.com/pandas-dev/pandas/issues/31772
In [33]: df = pd.DataFrame({'A': ['a', 'b', 'c']}, dtype='string') In [34]: df.loc[0:, 'A'] = "test" --------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-34-bc65087be9d7> in <module> ----> 1 df.loc[0:, 'A'] = "test" ~/scipy/pandas/pandas/core/indexing.py in __setitem__(self, key, value) 629 key = com.apply_if_callable(key, self.obj) 630 indexer = self._get_setitem_indexer(key) --> 631 self._setitem_with_indexer(indexer, value) 632 633 def _validate_key(self, key, axis: int): ~/scipy/pandas/pandas/core/indexing.py in _setitem_with_indexer(self, indexer, value) 1014 # actually do the set 1015 self.obj._consolidate_inplace() -> 1016 self.obj._data = self.obj._data.setitem(indexer=indexer, value=value) 1017 self.obj._maybe_update_cacher(clear=True) 1018 ~/scipy/pandas/pandas/core/internals/managers.py in setitem(self, **kwargs) 538 539 def setitem(self, **kwargs): --> 540 return self.apply("setitem", **kwargs) 541 542 def putmask(self, **kwargs): ~/scipy/pandas/pandas/core/internals/managers.py in apply(self, f, filter, **kwargs) 417 applied = b.apply(f, **kwargs) 418 else: --> 419 applied = getattr(b, f)(**kwargs) 420 result_blocks = _extend_blocks(applied, result_blocks) 421 ~/scipy/pandas/pandas/core/internals/blocks.py in setitem(self, indexer, value) 1801 1802 check_setitem_lengths(indexer, value, self.values) -> 1803 self.values[indexer] = value 1804 return self 1805 ~/scipy/pandas/pandas/core/arrays/string_.py in __setitem__(self, key, value) 260 raise ValueError("Must provide strings.") 261 --> 262 super().__setitem__(key, value) 263 264 def fillna(self, value=None, method=None, limit=None): ~/scipy/pandas/pandas/core/arrays/numpy_.py in __setitem__(self, key, value) 273 value = np.asarray(value, dtype=self._ndarray.dtype) 274 --> 275 self._ndarray[key] = value 276 277 def __len__(self) -> int: IndexError: arrays used as indices must be of integer (or boolean) type
IndexError
def from_codes(cls, codes, categories=None, ordered=None, dtype=None): """ Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like of int An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. .. versionadded:: 0.24.0 When `dtype` is provided, neither `categories` nor `ordered` should be provided. Returns ------- Categorical Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) [a, b, a, b] Categories (2, object): [a < b] """ dtype = CategoricalDtype._from_values_or_dtype( categories=categories, ordered=ordered, dtype=dtype ) if dtype.categories is None: msg = ( "The categories must be provided in 'categories' or " "'dtype'. Both were None." ) raise ValueError(msg) if is_extension_array_dtype(codes) and is_integer_dtype(codes): # Avoid the implicit conversion of Int to object if isna(codes).any(): raise ValueError("codes cannot contain NA values") codes = codes.to_numpy(dtype=np.int64) else: codes = np.asarray(codes) if len(codes) and not is_integer_dtype(codes): raise ValueError("codes need to be array-like integers") if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and len(categories)-1") return cls(codes, dtype=dtype, fastpath=True)
def from_codes(cls, codes, categories=None, ordered=None, dtype=None): """ Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like of int An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. .. versionadded:: 0.24.0 When `dtype` is provided, neither `categories` nor `ordered` should be provided. Returns ------- Categorical Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) [a, b, a, b] Categories (2, object): [a < b] """ dtype = CategoricalDtype._from_values_or_dtype( categories=categories, ordered=ordered, dtype=dtype ) if dtype.categories is None: msg = ( "The categories must be provided in 'categories' or " "'dtype'. Both were None." ) raise ValueError(msg) codes = np.asarray(codes) # #21767 if len(codes) and not is_integer_dtype(codes): raise ValueError("codes need to be array-like integers") if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and len(categories)-1") return cls(codes, dtype=dtype, fastpath=True)
https://github.com/pandas-dev/pandas/issues/31779
import pandas as pd codes = pd.Series([1, 0], dtype="Int64") pd.Categorical.from_codes(codes, categories=["foo", "bar"]) Traceback (most recent call last): File "<stdin>", line 1, in <module> File ".../lib/python3.7/site-packages/pandas/core/arrays/categorical.py", line 649, in from_codes raise ValueError("codes need to be array-like integers") ValueError: codes need to be array-like integers
ValueError
def insert_statement(self, *, num_rows): names = list(map(str, self.frame.columns)) wld = "?" # wildcard char escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ",".join(bracketed_names) row_wildcards = ",".join([wld] * len(names)) wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows)) insert_statement = ( f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" ) return insert_statement
def insert_statement(self): names = list(map(str, self.frame.columns)) wld = "?" # wildcard char escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ",".join(bracketed_names) wildcards = ",".join([wld] * len(names)) insert_statement = ( f"INSERT INTO {escape(self.name)} ({col_names}) VALUES ({wildcards})" ) return insert_statement
https://github.com/pandas-dev/pandas/issues/29921
INFO - ========================================================================== INFO - Inserting data one per row. INFO - Data inserted successfully! INFO - ========================================================================== INFO - Inserting data with multirow method. ERROR - Error while inserting data with multirow method: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 52, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi") File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given) INFO - ========================================================================== INFO - Inserting data with multirow method and chunksize. ERROR - Error while inserting data with multirow method and chunksize: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 64, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi", chunksize=10) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given)
TypeError
def _execute_insert(self, conn, keys, data_iter): data_list = list(data_iter) conn.executemany(self.insert_statement(num_rows=1), data_list)
def _execute_insert(self, conn, keys, data_iter): data_list = list(data_iter) conn.executemany(self.insert_statement(), data_list)
https://github.com/pandas-dev/pandas/issues/29921
INFO - ========================================================================== INFO - Inserting data one per row. INFO - Data inserted successfully! INFO - ========================================================================== INFO - Inserting data with multirow method. ERROR - Error while inserting data with multirow method: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 52, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi") File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given) INFO - ========================================================================== INFO - Inserting data with multirow method and chunksize. ERROR - Error while inserting data with multirow method and chunksize: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 64, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi", chunksize=10) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given)
TypeError
def _execute_insert_multi(self, conn, keys, data_iter): data_list = list(data_iter) flattened_data = [x for row in data_list for x in row] conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
def _execute_insert_multi(self, conn, keys, data_iter): """Alternative to _execute_insert for DBs support multivalue INSERT. Note: multi-value insert is usually faster for analytics DBs and tables containing a few columns but performance degrades quickly with increase of columns. """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(data))
https://github.com/pandas-dev/pandas/issues/29921
INFO - ========================================================================== INFO - Inserting data one per row. INFO - Data inserted successfully! INFO - ========================================================================== INFO - Inserting data with multirow method. ERROR - Error while inserting data with multirow method: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 52, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi") File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given) INFO - ========================================================================== INFO - Inserting data with multirow method and chunksize. ERROR - Error while inserting data with multirow method and chunksize: Traceback (most recent call last): File "bug_multirow_sqlite.py", line 64, in store_df_data df.to_sql("financial_data", con=conn, if_exists="append", index=False, method="multi", chunksize=10) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\generic.py", line 2531, in to_sql dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 460, in to_sql chunksize=chunksize, dtype=dtype, method=method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1547, in to_sql table.insert(chunksize, method) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 686, in insert exec_insert(conn, keys, chunk_iter) File "C:\Users\jconstanzo\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 609, in _execute_insert_multi conn.execute(self.table.insert(data)) TypeError: insert() takes exactly 2 arguments (1 given)
TypeError
def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFrame": if columns is None: raise TypeError("pivot() missing 1 required argument: 'columns'") columns = columns if is_list_like(columns) else [columns] if values is None: cols: List[str] = [] if index is None: pass elif is_list_like(index): cols = list(index) else: cols = [index] cols.extend(columns) append = index is None indexed = data.set_index(cols, append=append) else: if index is None: index = [Series(data.index, name=data.index.name)] elif is_list_like(index): index = [data[idx] for idx in index] else: index = [data[index]] data_columns = [data[col] for col in columns] index.extend(data_columns) index = MultiIndex.from_arrays(index) if is_list_like(values) and not isinstance(values, tuple): # Exclude tuple because it is seen as a single column name indexed = data._constructor( data[values].values, index=index, columns=values ) else: indexed = data._constructor_sliced(data[values].values, index=index) return indexed.unstack(columns)
def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFrame": if columns is None: raise TypeError("pivot() missing 1 required argument: 'columns'") if values is None: cols = [columns] if index is None else [index, columns] append = index is None indexed = data.set_index(cols, append=append) else: if index is None: index = data.index else: index = data[index] index = MultiIndex.from_arrays([index, data[columns]]) if is_list_like(values) and not isinstance(values, tuple): # Exclude tuple because it is seen as a single column name indexed = data._constructor( data[values].values, index=index, columns=values ) else: indexed = data._constructor_sliced(data[values].values, index=index) return indexed.unstack(columns)
https://github.com/pandas-dev/pandas/issues/21425
In [1]: df = pd.DataFrame({'lev1': [1, 1, 1, 1,2, 2, 2,2], 'lev2': [1, 1, 2, 2, 1, 1, 2, 2], 'lev3': [1, 2, 1, 2, 1, 2, 1, 2], 'values': range(8)}) In [2]: df Out[2]: lev1 lev2 lev3 values 0 1 1 1 0 1 1 1 2 1 2 1 2 1 2 3 1 2 2 3 4 2 1 1 4 5 2 1 2 5 6 2 2 1 6 7 2 2 2 7 In [3]: df.pivot(index=['lev1', 'lev2'], columns='lev3', values='values') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-3-2fef29f9fd39> in <module>() ----> 1 df.pivot(index=['lev1', 'lev2'], columns='lev3', values='values') ~/scipy/pandas/pandas/core/frame.py in pivot(self, index, columns, values) 5191 """ 5192 from pandas.core.reshape.reshape import pivot -> 5193 return pivot(self, index=index, columns=columns, values=values) 5194 5195 _shared_docs['pivot_table'] = """ ~/scipy/pandas/pandas/core/reshape/reshape.py in pivot(self, index, columns, values) 406 else: 407 indexed = self._constructor_sliced(self[values].values, --> 408 index=index) 409 return indexed.unstack(columns) 410 ~/scipy/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 260 'Length of passed values is {val}, ' 261 'index implies {ind}' --> 262 .format(val=len(data), ind=len(index))) 263 except TypeError: 264 pass ValueError: Length of passed values is 8, index implies 2
ValueError
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception( f"Holiday Calendar {self.name} does not have any rules specified" ) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: holidays = [rule.dates(start, end, return_name=True) for rule in self.rules] if holidays: holidays = concat(holidays) else: holidays = Series(index=DatetimeIndex([]), dtype=object) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception( f"Holiday Calendar {self.name} does not have any rules specified" ) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
https://github.com/pandas-dev/pandas/issues/31415
In [58]: cal.holidays(pd.Timestamp('01-Jan-2020'), pd.Timestamp('01-Jan-2021')) Traceback (most recent call last): File "<ipython-input-58-022244d4e794>", line 1, in <module> cal.holidays(pd.Timestamp('01-Jan-2020'), pd.Timestamp('01-Jan-2021')) File "C:\Users\dhirschf\envs\dev\lib\site-packages\pandas\tseries\holiday.py", line 422, in holidays self._cache = (start, end, holidays.sort_index()) AttributeError: 'NoneType' object has no attribute 'sort_index' In [59]: pd.__version__ Out[59]: '0.25.3'
AttributeError
def astype(self, dtype, copy=True): """ Cast to a NumPy array or IntegerArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- array : ndarray or IntegerArray NumPy ndarray or IntergerArray with 'dtype' for its dtype. Raises ------ TypeError if incompatible type with an IntegerDtype, equivalent of same_kind casting """ from pandas.core.arrays.boolean import BooleanArray, BooleanDtype dtype = pandas_dtype(dtype) # if we are astyping to an existing IntegerDtype we can fastpath if isinstance(dtype, _IntegerDtype): result = self._data.astype(dtype.numpy_dtype, copy=False) return type(self)(result, mask=self._mask, copy=False) elif isinstance(dtype, BooleanDtype): result = self._data.astype("bool", copy=False) return BooleanArray(result, mask=self._mask, copy=False) # coerce if is_float_dtype(dtype): # In astype, we consider dtype=float to also mean na_value=np.nan kwargs = dict(na_value=np.nan) else: kwargs = {} data = self.to_numpy(dtype=dtype, **kwargs) return astype_nansafe(data, dtype, copy=False)
def astype(self, dtype, copy=True): """ Cast to a NumPy array or IntegerArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- array : ndarray or IntegerArray NumPy ndarray or IntergerArray with 'dtype' for its dtype. Raises ------ TypeError if incompatible type with an IntegerDtype, equivalent of same_kind casting """ # if we are astyping to an existing IntegerDtype we can fastpath if isinstance(dtype, _IntegerDtype): result = self._data.astype(dtype.numpy_dtype, copy=False) return type(self)(result, mask=self._mask, copy=False) # coerce if is_float_dtype(dtype): # In astype, we consider dtype=float to also mean na_value=np.nan kwargs = dict(na_value=np.nan) else: kwargs = {} data = self.to_numpy(dtype=dtype, **kwargs) return astype_nansafe(data, dtype, copy=False)
https://github.com/pandas-dev/pandas/issues/31102
In [23]: a = pd.array([1, 0, pd.NA]) In [24]: a Out[24]: <IntegerArray> [1, 0, <NA>] Length: 3, dtype: Int64 In [25]: a.astype("boolean") --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-25-41973ed53ee3> in <module> ----> 1 a.astype("boolean") ~/scipy/pandas/pandas/core/arrays/integer.py in astype(self, dtype, copy) 454 kwargs = {} 455 --> 456 data = self.to_numpy(dtype=dtype, **kwargs) 457 return astype_nansafe(data, dtype, copy=False) 458 ~/scipy/pandas/pandas/core/arrays/masked.py in to_numpy(self, dtype, copy, na_value) 124 ): 125 raise ValueError( --> 126 f"cannot convert to '{dtype}'-dtype NumPy array " 127 "with missing values. Specify an appropriate 'na_value' " 128 "for this dtype." ValueError: cannot convert to 'boolean'-dtype NumPy array with missing values. Specify an appropriate 'na_value' for this dtype. In [26]: a.astype(pd.BooleanDtype()) ... ValueError: cannot convert to 'boolean'-dtype NumPy array with missing values. Specify an appropriate 'na_value' for this dtype.
ValueError
def _concat_objects(self, keys, values, not_indexed_same: bool = False): from pandas.core.reshape.concat import concat def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing for v in com.not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values if not not_indexed_same: result = concat(values, axis=self.axis) ax = self._selected_obj._get_axis(self.axis) # this is a very unfortunate situation # we can't use reindex to restore the original order # when the ax has duplicates # so we resort to this # GH 14776, 30667 if ax.has_duplicates: indexer, _ = result.index.get_indexer_non_unique(ax.values) indexer = algorithms.unique1d(indexer) result = result.take(indexer, axis=self.axis) else: result = result.reindex(ax, axis=self.axis) elif self.group_keys: values = reset_identity(values) if self.as_index: # possible MI return case group_keys = keys group_levels = self.grouper.levels group_names = self.grouper.names result = concat( values, axis=self.axis, keys=group_keys, levels=group_levels, names=group_names, sort=False, ) else: # GH5610, returns a MI, with the first level being a # range index keys = list(range(len(values))) result = concat(values, axis=self.axis, keys=keys) else: values = reset_identity(values) result = concat(values, axis=self.axis) if isinstance(result, Series) and self._selection_name is not None: result.name = self._selection_name return result
def _concat_objects(self, keys, values, not_indexed_same: bool = False): from pandas.core.reshape.concat import concat def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing for v in com.not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values if not not_indexed_same: result = concat(values, axis=self.axis) ax = self._selected_obj._get_axis(self.axis) if isinstance(result, Series): result = result.reindex(ax) else: # this is a very unfortunate situation # we have a multi-index that is NOT lexsorted # and we have a result which is duplicated # we can't reindex, so we resort to this # GH 14776 if isinstance(ax, MultiIndex) and not ax.is_unique: indexer = algorithms.unique1d(result.index.get_indexer_for(ax.values)) result = result.take(indexer, axis=self.axis) else: result = result.reindex(ax, axis=self.axis) elif self.group_keys: values = reset_identity(values) if self.as_index: # possible MI return case group_keys = keys group_levels = self.grouper.levels group_names = self.grouper.names result = concat( values, axis=self.axis, keys=group_keys, levels=group_levels, names=group_names, sort=False, ) else: # GH5610, returns a MI, with the first level being a # range index keys = list(range(len(values))) result = concat(values, axis=self.axis, keys=keys) else: values = reset_identity(values) result = concat(values, axis=self.axis) if isinstance(result, Series) and self._selection_name is not None: result.name = self._selection_name return result
https://github.com/pandas-dev/pandas/issues/30667
Traceback (most recent call last): File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 725, in apply result = self._python_apply_general(f) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 745, in _python_apply_general keys, values, not_indexed_same=mutated or self.mutated File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/generic.py", line 372, in _wrap_applied_output return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 955, in _concat_objects result = result.reindex(ax, axis=self.axis) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/util/_decorators.py", line 221, in wrapper return func(*args, **kwargs) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3976, in reindex return super().reindex(**kwargs) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/generic.py", line 4514, in reindex axes, level, limit, tolerance, method, fill_value, copy File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3864, in _reindex_axes index, method, copy, level, fill_value, limit, tolerance File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3886, in _reindex_index allow_dups=False, File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/generic.py", line 4577, in _reindex_with_indexers copy=copy, File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/internals/managers.py", line 1251, in reindex_indexer self.axes[axis]._can_reindex(indexer) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3362, in _can_reindex raise ValueError("cannot reindex from a duplicate axis") ValueError: cannot reindex from a duplicate axis During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/hm106930/ipv/web/groupby_bug.py", line 16, in <module> df=df.groupby(['Y']).apply(lambda x: x) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 737, in apply return self._python_apply_general(f) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 745, in _python_apply_general keys, values, not_indexed_same=mutated or self.mutated File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/generic.py", line 372, in _wrap_applied_output return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/groupby/groupby.py", line 955, in _concat_objects result = result.reindex(ax, axis=self.axis) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/util/_decorators.py", line 221, in wrapper return func(*args, **kwargs) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3976, in reindex return super().reindex(**kwargs) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/generic.py", line 4514, in reindex axes, level, limit, tolerance, method, fill_value, copy File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3864, in _reindex_axes index, method, copy, level, fill_value, limit, tolerance File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/frame.py", line 3886, in _reindex_index allow_dups=False, File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/generic.py", line 4577, in _reindex_with_indexers copy=copy, File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/internals/managers.py", line 1251, in reindex_indexer self.axes[axis]._can_reindex(indexer) File "/home/hm106930/.local/share/virtualenvs/ipv-work-62kMEXht/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 3362, in _can_reindex raise ValueError("cannot reindex from a duplicate axis") ValueError: cannot reindex from a duplicate axis
ValueError
def pivot_table( data, values=None, index=None, columns=None, aggfunc="mean", fill_value=None, margins=False, dropna=True, margins_name="All", observed=False, ) -> "DataFrame": index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces: List[DataFrame] = [] keys = [] for func in aggfunc: table = pivot_table( data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, ) pieces.append(table) keys.append(getattr(func, "__name__", func)) return concat(pieces, keys=keys, axis=1) keys = index + columns values_passed = values is not None if values_passed: if is_list_like(values): values_multi = True values = list(values) else: values_multi = False values = [values] # GH14938 Make sure value labels are in data for i in values: if i not in data: raise KeyError(i) to_filter = [] for x in keys + values: if isinstance(x, Grouper): x = x.key try: if x in data: to_filter.append(x) except TypeError: pass if len(to_filter) < len(data.columns): data = data[to_filter] else: values = data.columns for key in keys: try: values = values.drop(key) except (TypeError, ValueError, KeyError): pass values = list(values) grouped = data.groupby(keys, observed=observed) agged = grouped.agg(aggfunc) if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): agged = agged.dropna(how="all") # gh-21133 # we want to down cast if # the original values are ints # as we grouped with a NaN value # and then dropped, coercing to floats for v in values: if ( v in data and is_integer_dtype(data[v]) and v in agged and not is_integer_dtype(agged[v]) ): agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype) table = agged # GH17038, this check should only happen if index is defined (not None) if table.index.nlevels > 1 and index: # Related GH #17123 # If index_names are integers, determine whether the integers refer # to the level position or name. index_names = agged.index.names[: len(index)] to_unstack = [] for i in range(len(index), len(keys)): name = agged.index.names[i] if name is None or name in index_names: to_unstack.append(i) else: to_unstack.append(name) table = agged.unstack(to_unstack) if not dropna: if table.index.nlevels > 1: m = MultiIndex.from_arrays( cartesian_product(table.index.levels), names=table.index.names ) table = table.reindex(m, axis=0) if table.columns.nlevels > 1: m = MultiIndex.from_arrays( cartesian_product(table.columns.levels), names=table.columns.names ) table = table.reindex(m, axis=1) if isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) if fill_value is not None: table = table._ensure_type(table.fillna(fill_value, downcast="infer")) if margins: if dropna: data = data[data.notna().all(axis=1)] table = _add_margins( table, data, values, rows=index, cols=columns, aggfunc=aggfunc, observed=dropna, margins_name=margins_name, fill_value=fill_value, ) # discard the top level if ( values_passed and not values_multi and not table.empty and (table.columns.nlevels > 1) ): table = table[values[0]] if len(index) == 0 and len(columns) > 0: table = table.T # GH 15193 Make sure empty columns are removed if dropna=True if isinstance(table, ABCDataFrame) and dropna: table = table.dropna(how="all", axis=1) return table
def pivot_table( data, values=None, index=None, columns=None, aggfunc="mean", fill_value=None, margins=False, dropna=True, margins_name="All", observed=False, ) -> "DataFrame": index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces: List[DataFrame] = [] keys = [] for func in aggfunc: table = pivot_table( data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, ) pieces.append(table) keys.append(getattr(func, "__name__", func)) return concat(pieces, keys=keys, axis=1) keys = index + columns values_passed = values is not None if values_passed: if is_list_like(values): values_multi = True values = list(values) else: values_multi = False values = [values] # GH14938 Make sure value labels are in data for i in values: if i not in data: raise KeyError(i) to_filter = [] for x in keys + values: if isinstance(x, Grouper): x = x.key try: if x in data: to_filter.append(x) except TypeError: pass if len(to_filter) < len(data.columns): data = data[to_filter] else: values = data.columns for key in keys: try: values = values.drop(key) except (TypeError, ValueError, KeyError): pass values = list(values) grouped = data.groupby(keys, observed=observed) agged = grouped.agg(aggfunc) if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): agged = agged.dropna(how="all") # gh-21133 # we want to down cast if # the original values are ints # as we grouped with a NaN value # and then dropped, coercing to floats for v in values: if ( v in data and is_integer_dtype(data[v]) and v in agged and not is_integer_dtype(agged[v]) ): agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype) table = agged if table.index.nlevels > 1: # Related GH #17123 # If index_names are integers, determine whether the integers refer # to the level position or name. index_names = agged.index.names[: len(index)] to_unstack = [] for i in range(len(index), len(keys)): name = agged.index.names[i] if name is None or name in index_names: to_unstack.append(i) else: to_unstack.append(name) table = agged.unstack(to_unstack) if not dropna: if table.index.nlevels > 1: m = MultiIndex.from_arrays( cartesian_product(table.index.levels), names=table.index.names ) table = table.reindex(m, axis=0) if table.columns.nlevels > 1: m = MultiIndex.from_arrays( cartesian_product(table.columns.levels), names=table.columns.names ) table = table.reindex(m, axis=1) if isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) if fill_value is not None: table = table._ensure_type(table.fillna(fill_value, downcast="infer")) if margins: if dropna: data = data[data.notna().all(axis=1)] table = _add_margins( table, data, values, rows=index, cols=columns, aggfunc=aggfunc, observed=dropna, margins_name=margins_name, fill_value=fill_value, ) # discard the top level if ( values_passed and not values_multi and not table.empty and (table.columns.nlevels > 1) ): table = table[values[0]] if len(index) == 0 and len(columns) > 0: table = table.T # GH 15193 Make sure empty columns are removed if dropna=True if isinstance(table, ABCDataFrame) and dropna: table = table.dropna(how="all", axis=1) return table
https://github.com/pandas-dev/pandas/issues/17038
In [21]: df = pd.DataFrame({'k': [1, 2, 3], 'v': [4, 5, 6]}) In [22]: df.pivot_table(values='v', columns='k') Out[22]: k 1 2 3 v 4 5 6 In [23]: df.pivot_table(values='v', index='k') Out[23]: v k 1 4 2 5 3 6 In [24]: df2 = pd.DataFrame({'k1': [1, 2, 3], 'k2': [1, 2, 3], 'v': [4, 5, 6]}) In [25]: df2.pivot_table(values='v', index=('k1','k2')) Out[25]: v k1 k2 1 1 4 2 2 5 3 3 6 In [26]: df2.pivot_table(values='v', columns=('k1','k2')) --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-26-80d7fdeb9743> in <module>() ----> 1 df2.pivot_table(values='v', columns=('k1','k2')) ~\Anaconda\envs\py36\lib\site-packages\pandas\core\reshape\pivot.py in pivot_table(data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name) 172 # discard the top level 173 if values_passed and not values_multi and not table.empty and \ --> 174 (table.columns.nlevels > 1): 175 table = table[values[0]] 176 ~\Anaconda\envs\py36\lib\site-packages\pandas\core\generic.py in __getattr__(self, name) 3075 if (name in self._internal_names_set or name in self._metadata or 3076 name in self._accessors): -> 3077 return object.__getattribute__(self, name) 3078 else: 3079 if name in self._info_axis: AttributeError: 'Series' object has no attribute 'columns'
AttributeError
def rename( self, mapper: Optional[Renamer] = None, *, index: Optional[Renamer] = None, columns: Optional[Renamer] = None, axis: Optional[Axis] = None, copy: bool = True, inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", ) -> Optional["DataFrame"]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ return super().rename( mapper=mapper, index=index, columns=columns, axis=axis, copy=copy, inplace=inplace, level=level, errors=errors, )
def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop("axis", None) kwargs.pop("mapper", None) return super().rename(**kwargs)
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def rename( self: FrameOrSeries, mapper: Optional[Renamer] = None, *, index: Optional[Renamer] = None, columns: Optional[Renamer] = None, axis: Optional[Axis] = None, copy: bool = True, inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", ) -> Optional[FrameOrSeries]: """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value (Series only). Parameters ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame. dict-like or functions are transformations to apply to that axis' values copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new %(klass)s. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- renamed : %(klass)s (new object) Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- NDFrame.rename_axis Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 Since ``DataFrame`` doesn't have a ``.name`` attribute, only mapping-type arguments are allowed. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 See the :ref:`user guide <basics.rename>` for more. """ if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) elif mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper result = self if inplace else self.copy(deep=copy) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) baxis = self._get_block_manager_axis(axis_no) f = com.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") result._data = result._data.rename_axis(f, axis=baxis, copy=copy, level=level) result._clear_item_cache() if inplace: self._update_inplace(result._data) return None else: return result.__finalize__(self)
def rename(self, *args, **kwargs): """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value (Series only). Parameters ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame. dict-like or functions are transformations to apply to that axis' values copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new %(klass)s. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- renamed : %(klass)s (new object) Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- NDFrame.rename_axis Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 Since ``DataFrame`` doesn't have a ``.name`` attribute, only mapping-type arguments are allowed. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 See the :ref:`user guide <basics.rename>` for more. """ axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop("copy", True) inplace = kwargs.pop("inplace", False) level = kwargs.pop("level", None) axis = kwargs.pop("axis", None) errors = kwargs.pop("errors", "ignore") if axis is not None: # Validate the axis self._get_axis_number(axis) if kwargs: raise TypeError( f'rename() got an unexpected keyword argument "{list(kwargs.keys())[0]}"' ) if com.count_not_none(*axes.values()) == 0: raise TypeError("must pass an index to rename") self._consolidate_inplace() result = self if inplace else self.copy(deep=copy) # start in the axis order to eliminate too many copies for axis in range(self._AXIS_LEN): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue f = com.get_rename_function(v) baxis = self._get_block_manager_axis(axis) if level is not None: level = self.axes[axis]._get_level_number(level) # GH 13473 if not callable(v): indexer = self.axes[axis].get_indexer_for(v) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(v) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") result._data = result._data.rename_axis(f, axis=baxis, copy=copy, level=level) result._clear_item_cache() if inplace: self._update_inplace(result._data) else: return result.__finalize__(self)
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} return self.rename(**mapper) # type: ignore
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def argsort(self, axis=0, kind="quicksort", order=None): """ Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or "index"} Has no effect but is accepted for compatibility with numpy. kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort """ values = self._values mask = isna(values) if mask.any(): result = Series(-1, index=self.index, name=self.name, dtype="int64") notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) return self._constructor(result, index=self.index).__finalize__(self) else: return self._constructor( np.argsort(values, kind=kind), index=self.index, dtype="int64" ).__finalize__(self)
def argsort(self, axis=0, kind="quicksort", order=None): """ Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : int Has no effect but is accepted for compatibility with numpy. kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort """ values = self._values mask = isna(values) if mask.any(): result = Series(-1, index=self.index, name=self.name, dtype="int64") notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) return self._constructor(result, index=self.index).__finalize__(self) else: return self._constructor( np.argsort(values, kind=kind), index=self.index, dtype="int64" ).__finalize__(self)
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def rename( self, index=None, *, axis=None, copy=True, inplace=False, level=None, errors="ignore", ): """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- axis : {0 or "index"} Unused. Accepted for compatability with DataFrame method only. index : scalar, hashable sequence, dict-like or function, optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. **kwargs Additional keyword arguments passed to the function. Only the "inplace" keyword is used. Returns ------- Series Series with index labels or name altered. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if callable(index) or is_dict_like(index): return super().rename( index, copy=copy, inplace=inplace, level=level, errors=errors ) else: return self._set_name(index, inplace=inplace)
def rename(self, index=None, **kwargs): """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function, optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. **kwargs Additional keyword arguments passed to the function. Only the "inplace" keyword is used. Returns ------- Series Series with index labels or name altered. See Also -------- Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace") if callable(index) or is_dict_like(index): return super().rename(index=index, **kwargs) else: return self._set_name(index, inplace=kwargs.get("inplace"))
https://github.com/pandas-dev/pandas/issues/29136
df = pd.DataFrame([[1]]) df.rename({0: 1}, columns={0: 2}, axis=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/williamayd/clones/pandas/pandas/util/_decorators.py", line 235, in wrapper return func(*args, **kwargs) File "/Users/williamayd/clones/pandas/pandas/core/frame.py", line 4143, in rename axes = validate_axis_style_args(self, args, kwargs, "mapper", "rename") File "/Users/williamayd/clones/pandas/pandas/util/_validators.py", line 287, in validate_axis_style_args raise TypeError(msg) TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
TypeError
def __init__(self, f: IO): self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __init__(self, f: BinaryIO, encoding: str): self.reader = codecs.getreader(encoding)(f)
https://github.com/pandas-dev/pandas/issues/24130
In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf16') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf16') UnicodeDecodeError: 'utf-16-le' codec can't decode byte 0x6f in position 2: truncated data In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf32') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf32') UnicodeDecodeError: 'utf-32-le' codec can't decode bytes in position 0-2: truncated data
UnicodeDecodeError
def __next__(self) -> str: newbytes = self.mmap.readline() # readline returns bytes, not str, but Python's CSV reader # expects str, so convert the output to str before continuing newline = newbytes.decode("utf-8") # mmap doesn't raise if reading past the allocated # data but instead returns an empty string, so raise # if that is returned if newline == "": raise StopIteration return newline
def __next__(self) -> bytes: return next(self.reader).encode("utf-8")
https://github.com/pandas-dev/pandas/issues/24130
In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf16') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf16') UnicodeDecodeError: 'utf-16-le' codec can't decode byte 0x6f in position 2: truncated data In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf32') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf32') UnicodeDecodeError: 'utf-32-le' codec can't decode bytes in position 0-2: truncated data
UnicodeDecodeError
def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() ParserBase.__init__(self, kwds) encoding = kwds.get("encoding") if kwds.get("compression") is None and encoding: if isinstance(src, str): src = open(src, "rb") self.handles.append(src) # Handle the file object with universal line mode enabled. # We will handle the newline character ourselves later on. if isinstance(src, BufferedIOBase): src = TextIOWrapper(src, encoding=encoding, newline="") kwds["encoding"] = "utf-8" # #2442 kwds["allow_leading_cols"] = self.index_col is not False # GH20529, validate usecol arg before TextReader self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"]) kwds["usecols"] = self.usecols self._reader = parsers.TextReader(src, **kwds) self.unnamed_cols = self._reader.unnamed_cols passed_names = self.names is None if self._reader.header is None: self.names = None else: if len(self._reader.header) > 1: # we have a multi index in the columns ( self.names, self.index_names, self.col_names, passed_names, ) = self._extract_multi_indexer_columns( self._reader.header, self.index_names, self.col_names, passed_names ) else: self.names = list(self._reader.header[0]) if self.names is None: if self.prefix: self.names = [f"{self.prefix}{i}" for i in range(self._reader.table_width)] else: self.names = list(range(self._reader.table_width)) # gh-9755 # # need to set orig_names here first # so that proper indexing can be done # with _set_noconvert_columns # # once names has been filtered, we will # then set orig_names again to names self.orig_names = self.names[:] if self.usecols: usecols = _evaluate_usecols(self.usecols, self.orig_names) # GH 14671 if self.usecols_dtype == "string" and not set(usecols).issubset( self.orig_names ): _validate_usecols_names(usecols, self.orig_names) if len(self.names) > len(usecols): self.names = [ n for i, n in enumerate(self.names) if (i in usecols or n in usecols) ] if len(self.names) < len(usecols): _validate_usecols_names(usecols, self.names) self._set_noconvert_columns() self.orig_names = self.names if not self._has_complex_date_col: if self._reader.leading_cols == 0 and _is_index_col(self.index_col): self._name_processed = True (index_names, self.names, self.index_col) = _clean_index_names( self.names, self.index_col, self.unnamed_cols ) if self.index_names is None: self.index_names = index_names if self._reader.header is None and not passed_names: self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0
def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() ParserBase.__init__(self, kwds) if kwds.get("compression") is None and "utf-16" in (kwds.get("encoding") or ""): # if source is utf-16 plain text, convert source to utf-8 if isinstance(src, str): src = open(src, "rb") self.handles.append(src) src = UTF8Recoder(src, kwds["encoding"]) kwds["encoding"] = "utf-8" # #2442 kwds["allow_leading_cols"] = self.index_col is not False # GH20529, validate usecol arg before TextReader self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"]) kwds["usecols"] = self.usecols self._reader = parsers.TextReader(src, **kwds) self.unnamed_cols = self._reader.unnamed_cols passed_names = self.names is None if self._reader.header is None: self.names = None else: if len(self._reader.header) > 1: # we have a multi index in the columns ( self.names, self.index_names, self.col_names, passed_names, ) = self._extract_multi_indexer_columns( self._reader.header, self.index_names, self.col_names, passed_names ) else: self.names = list(self._reader.header[0]) if self.names is None: if self.prefix: self.names = [f"{self.prefix}{i}" for i in range(self._reader.table_width)] else: self.names = list(range(self._reader.table_width)) # gh-9755 # # need to set orig_names here first # so that proper indexing can be done # with _set_noconvert_columns # # once names has been filtered, we will # then set orig_names again to names self.orig_names = self.names[:] if self.usecols: usecols = _evaluate_usecols(self.usecols, self.orig_names) # GH 14671 if self.usecols_dtype == "string" and not set(usecols).issubset( self.orig_names ): _validate_usecols_names(usecols, self.orig_names) if len(self.names) > len(usecols): self.names = [ n for i, n in enumerate(self.names) if (i in usecols or n in usecols) ] if len(self.names) < len(usecols): _validate_usecols_names(usecols, self.names) self._set_noconvert_columns() self.orig_names = self.names if not self._has_complex_date_col: if self._reader.leading_cols == 0 and _is_index_col(self.index_col): self._name_processed = True (index_names, self.names, self.index_col) = _clean_index_names( self.names, self.index_col, self.unnamed_cols ) if self.index_names is None: self.index_names = index_names if self._reader.header is None and not passed_names: self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0
https://github.com/pandas-dev/pandas/issues/24130
In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf16') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf16') UnicodeDecodeError: 'utf-16-le' codec can't decode byte 0x6f in position 2: truncated data In [4]: with tempfile.TemporaryFile(mode='w+', encoding='utf32') as outfile: ...: outfile.write('foo') ...: outfile.seek(0) ...: pd.read_csv(outfile, encoding='utf32') UnicodeDecodeError: 'utf-32-le' codec can't decode bytes in position 0-2: truncated data
UnicodeDecodeError
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs): # TODO: Not sure if above is correct - need someone to confirm. axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: data = self.fillna(method=fill_method, limit=limit, axis=axis) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs): # TODO: Not sure if above is correct - need someone to confirm. axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: data = self.fillna(method=fill_method, limit=limit, axis=axis) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) if freq is None: mask = isna(com.values_from_object(data)) np.putmask(rs.values, mask, np.nan) return rs
https://github.com/pandas-dev/pandas/issues/30463
data.reset_index().groupby('index')[0].pct_change(1, fill_method=None, limit=1) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-18-60898304743e> in <module> ----> 1 data.reset_index().groupby('index')[0].pct_change(1, fill_method=None, limit=1) ~/opt/anaconda3/lib/python3.7/site-packages/pandas/core/groupby/generic.py in pct_change(self, periods, fill_method, limit, freq) 1344 ) 1345 ) -> 1346 filled = getattr(self, fill_method)(limit=limit) 1347 fill_grp = filled.groupby(self.grouper.labels) 1348 shifted = fill_grp.shift(periods=periods, freq=freq) TypeError: getattr(): attribute name must be string
TypeError
def init_ndarray(values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index if isinstance(values, ABCSeries): if columns is None: if values.name is not None: columns = [values.name] if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # we could have a categorical type passed or coerced to 'category' # recast this to an arrays_to_mgr if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype( dtype ): if not hasattr(values, "dtype"): values = prep_ndarray(values, copy=copy) values = values.ravel() elif copy: values = values.copy() index, columns = _get_axes(len(values), 1, index, columns) return arrays_to_mgr([values], columns, index, columns, dtype=dtype) elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype): # GH#19157 if isinstance(values, np.ndarray) and values.ndim > 1: # GH#12513 a EA dtype passed with a 2D array, split into # multiple EAs that view the values values = [values[:, n] for n in range(values.shape[1])] else: values = [values] if columns is None: columns = list(range(len(values))) return arrays_to_mgr(values, columns, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype values = prep_ndarray(values, copy=copy) if dtype is not None: if not is_dtype_equal(values.dtype, dtype): try: values = values.astype(dtype) except Exception as orig: # e.g. ValueError when trying to cast object dtype to float64 raise ValueError( f"failed to cast to '{dtype}' (Exception was: {orig})" ) from orig index, columns = _get_axes(*values.shape, index=index, columns=columns) values = values.T # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type if dtype is None and is_object_dtype(values): if values.ndim == 2 and values.shape[0] != 1: # transpose and separate blocks dvals_list = [maybe_infer_to_datetimelike(row) for row in values] for n in range(len(dvals_list)): if isinstance(dvals_list[n], np.ndarray): dvals_list[n] = dvals_list[n].reshape(1, -1) from pandas.core.internals.blocks import make_block # TODO: What about re-joining object columns? block_values = [ make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list)) ] else: datelike_vals = maybe_infer_to_datetimelike(values) block_values = [datelike_vals] else: block_values = [values] return create_block_manager_from_blocks(block_values, [columns, index])
def init_ndarray(values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index if isinstance(values, ABCSeries): if columns is None: if values.name is not None: columns = [values.name] if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # we could have a categorical type passed or coerced to 'category' # recast this to an arrays_to_mgr if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype( dtype ): if not hasattr(values, "dtype"): values = prep_ndarray(values, copy=copy) values = values.ravel() elif copy: values = values.copy() index, columns = _get_axes(len(values), 1, index, columns) return arrays_to_mgr([values], columns, index, columns, dtype=dtype) elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype): # GH#19157 if columns is None: columns = [0] return arrays_to_mgr([values], columns, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype values = prep_ndarray(values, copy=copy) if dtype is not None: if not is_dtype_equal(values.dtype, dtype): try: values = values.astype(dtype) except Exception as orig: # e.g. ValueError when trying to cast object dtype to float64 raise ValueError( f"failed to cast to '{dtype}' (Exception was: {orig})" ) from orig index, columns = _get_axes(*values.shape, index=index, columns=columns) values = values.T # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type if dtype is None and is_object_dtype(values): if values.ndim == 2 and values.shape[0] != 1: # transpose and separate blocks dvals_list = [maybe_infer_to_datetimelike(row) for row in values] for n in range(len(dvals_list)): if isinstance(dvals_list[n], np.ndarray): dvals_list[n] = dvals_list[n].reshape(1, -1) from pandas.core.internals.blocks import make_block # TODO: What about re-joining object columns? block_values = [ make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list)) ] else: datelike_vals = maybe_infer_to_datetimelike(values) block_values = [datelike_vals] else: block_values = [values] return create_block_manager_from_blocks(block_values, [columns, index])
https://github.com/pandas-dev/pandas/issues/12513
TypeError Traceback (most recent call last) <ipython-input-4-7101cf798aa3> in <module>() ----> 1 df = pd.DataFrame(array_dim2 , dtype='datetime64[ns, UTC]') C:\D\Projects\Github\pandas\pandas\core\frame.py in __init__(self, data, index, columns, dtype, copy) 252 else: 253 mgr = self._init_ndarray(data, index, columns, dtype=dty pe, --> 254 copy=copy) 255 elif isinstance(data, (list, types.GeneratorType)): 256 if isinstance(data, types.GeneratorType): C:\D\Projects\Github\pandas\pandas\core\frame.py in _init_ndarray(self, values, index, columns, dtype, copy) 412 413 if dtype is not None: --> 414 if values.dtype != dtype: 415 try: 416 values = values.astype(dtype) TypeError: data type not understood
TypeError
def quantile(self, q=0.5, interpolation: str = "linear"): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = "datetime64[ns]" vals = vals.astype(np.float) return vals, inference def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: if inference: # Check for edge case if not ( is_integer_dtype(inference) and interpolation in {"linear", "midpoint"} ): vals = vals.astype(inference) return vals if is_scalar(q): return self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation, ) else: results = [ self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=qi, interpolation=interpolation, ) for qi in q ] result = concat(results, axis=0, keys=q) # fix levels to place quantiles on the inside # TODO(GH-10710): Ideally, we could write this as # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] # but this hits https://github.com/pandas-dev/pandas/issues/10710 # which doesn't reorder the list-like `q` on the inner level. order = list(range(1, result.index.nlevels)) + [0] # temporarily saves the index names index_names = np.array(result.index.names) # set index names to positions to avoid confusion result.index.names = np.arange(len(index_names)) # place quantiles on the inside result = result.reorder_levels(order) # restore the index names in order result.index.names = index_names[order] # reorder rows to keep things sorted indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten() return result.take(indices)
def quantile(self, q=0.5, interpolation: str = "linear"): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = "datetime64[ns]" vals = vals.astype(np.float) return vals, inference def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: if inference: # Check for edge case if not ( is_integer_dtype(inference) and interpolation in {"linear", "midpoint"} ): vals = vals.astype(inference) return vals if is_scalar(q): return self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation, ) else: results = [ self._get_cythonized_result( "group_quantile", aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.dtype(np.float64), pre_processing=pre_processor, post_processing=post_processor, q=qi, interpolation=interpolation, ) for qi in q ] result = concat(results, axis=0, keys=q) # fix levels to place quantiles on the inside # TODO(GH-10710): Ideally, we could write this as # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] # but this hits https://github.com/pandas-dev/pandas/issues/10710 # which doesn't reorder the list-like `q` on the inner level. order = np.roll(list(range(result.index.nlevels)), -1) result = result.reorder_levels(order) result = result.reindex(q, level=-1) # fix order. hi = len(q) * self.ngroups arr = np.arange(0, hi, self.ngroups) arrays = [] for i in range(self.ngroups): arr2 = arr + i arrays.append(arr2) indices = np.concatenate(arrays) assert len(indices) == len(result) return result.take(indices)
https://github.com/pandas-dev/pandas/issues/30289
# Your code here df = pd.DataFrame(np.array([10*[_%4] for _ in range(100)])) df.groupby(0).quantile(0.5) # Out[19]: # 1 2 3 4 5 6 7 8 9 # 0 # 0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 # 1 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 # 2 2.0 2.0 2.0 2.0 2.0 2.0 2.0 2.0 2.0 # 3 3.0 3.0 3.0 3.0 3.0 3.0 3.0 3.0 3.0 df.groupby(0).quantile([0.5,0.99]) --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-20-21c92d2481c9> in <module> ----> 1 df.groupby(0).quantile([0.5,0.99]) ~/PycharmProjects/netsim_stats/venv/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in quantile(self, q, interpolation) 1950 1951 indices = np.concatenate(arrays) -> 1952 assert len(indices) == len(result) 1953 return result.take(indices) 1954 AssertionError: df.quantile([0.5,0.99]) # 0 1 2 3 4 5 6 7 8 9 # 0.50 1.5 1.5 1.5 1.5 1.5 1.5 1.5 1.5 1.5 1.5 # 0.99 3.0 3.0 3.0 3.0 3.0 3.0 3.0 3.0 3.0 3.0 df.groupby(0)[1].quantile(0.5) # 0 # 0 0.0 # 1 1.0 # 2 2.0 # 3 3.0 # Name: 1, dtype: float64 df.groupby(0)[1].quantile([0.5,0.99]) --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-24-ebf6ade716ff> in <module> ----> 1 df.groupby(0)[1].quantile([0.5,0.99]) ~/PycharmProjects/netsim_stats/venv/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in quantile(self, q, interpolation) 1950 1951 indices = np.concatenate(arrays) -> 1952 assert len(indices) == len(result) 1953 return result.take(indices) 1954 AssertionError:
AssertionError
def __new__( cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs, ) -> "Index": from .range import RangeIndex from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex from .numeric import Float64Index, Int64Index, UInt64Index from .interval import IntervalIndex from .category import CategoricalIndex name = maybe_extract_name(name, data, cls) if isinstance(data, ABCPandasArray): # ensure users don't accidentally put a PandasArray in an index. data = data.to_numpy() # range if isinstance(data, RangeIndex): return RangeIndex(start=data, copy=copy, dtype=dtype, name=name) elif isinstance(data, range): return RangeIndex.from_range(data, dtype=dtype, name=name) # categorical elif is_categorical_dtype(data) or is_categorical_dtype(dtype): return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) # interval elif (is_interval_dtype(data) or is_interval_dtype(dtype)) and not is_object_dtype( dtype ): closed = kwargs.get("closed", None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) elif ( is_datetime64_any_dtype(data) or is_datetime64_any_dtype(dtype) or "tz" in kwargs ): if is_dtype_equal(_o_dtype, dtype): # GH#23524 passing `dtype=object` to DatetimeIndex is invalid, # will raise in the where `data` is already tz-aware. So # we leave it out of this step and cast to object-dtype after # the DatetimeIndex construction. # Note we can pass copy=False because the .astype below # will always make a copy return DatetimeIndex(data, copy=False, name=name, **kwargs).astype(object) else: return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype): if is_dtype_equal(_o_dtype, dtype): # Note we can pass copy=False because the .astype below # will always make a copy return TimedeltaIndex(data, copy=False, name=name, **kwargs).astype(object) else: return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) elif is_period_dtype(data) and not is_object_dtype(dtype): return PeriodIndex(data, copy=copy, name=name, **kwargs) # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): if not (dtype is None or is_object_dtype(dtype)): # coerce to the provided dtype ea_cls = dtype.construct_array_type() data = ea_cls._from_sequence(data, dtype=dtype, copy=False) else: data = np.asarray(data, dtype=object) # coerce to the object dtype data = data.astype(object) return Index(data, dtype=object, copy=copy, name=name, **kwargs) # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): if dtype is not None: # we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced # GH 11836 if is_integer_dtype(dtype): inferred = lib.infer_dtype(data, skipna=False) if inferred == "integer": data = maybe_cast_to_integer_array(data, dtype, copy=copy) elif inferred in ["floating", "mixed-integer-float"]: if isna(data).any(): raise ValueError("cannot convert float NaN to integer") if inferred == "mixed-integer-float": data = maybe_cast_to_integer_array(data, dtype) # If we are actually all equal to integers, # then coerce to integer. try: return cls._try_convert_to_int_index(data, copy, name, dtype) except ValueError: pass # Return an actual float index. return Float64Index(data, copy=copy, name=name) elif inferred == "string": pass else: data = data.astype(dtype) elif is_float_dtype(dtype): inferred = lib.infer_dtype(data, skipna=False) if inferred == "string": pass else: data = data.astype(dtype) else: data = np.array(data, dtype=dtype, copy=copy) # maybe coerce to a sub-class if is_signed_integer_dtype(data.dtype): return Int64Index(data, copy=copy, dtype=dtype, name=name) elif is_unsigned_integer_dtype(data.dtype): return UInt64Index(data, copy=copy, dtype=dtype, name=name) elif is_float_dtype(data.dtype): return Float64Index(data, copy=copy, dtype=dtype, name=name) elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype("object") else: subarr = com.asarray_tuplesafe(data, dtype=object) # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens if copy: subarr = subarr.copy() if dtype is None: inferred = lib.infer_dtype(subarr, skipna=False) if inferred == "integer": try: return cls._try_convert_to_int_index(subarr, copy, name, dtype) except ValueError: pass return Index(subarr, copy=copy, dtype=object, name=name) elif inferred in ["floating", "mixed-integer-float", "integer-na"]: # TODO: Returns IntegerArray for integer-na case in the future return Float64Index(subarr, copy=copy, name=name) elif inferred == "interval": try: return IntervalIndex(subarr, name=name, copy=copy) except ValueError: # GH27172: mixed closed Intervals --> object dtype pass elif inferred == "boolean": # don't support boolean explicitly ATM pass elif inferred != "string": if inferred.startswith("datetime"): try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) except (ValueError, OutOfBoundsDatetime): # GH 27011 # If we have mixed timezones, just send it # down the base constructor pass elif inferred.startswith("timedelta"): return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == "period": try: return PeriodIndex(subarr, name=name, **kwargs) except IncompatibleFrequency: pass if kwargs: raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}") return cls._simple_new(subarr, name, **kwargs) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs) elif data is None or is_scalar(data): raise cls._scalar_data_error(data) else: if tupleize_cols and is_list_like(data): # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from .multi import MultiIndex return MultiIndex.from_tuples(data, names=name or kwargs.get("names")) # other iterable of some kind subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
def __new__( cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs, ) -> "Index": from .range import RangeIndex from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex from .numeric import Float64Index, Int64Index, UInt64Index from .interval import IntervalIndex from .category import CategoricalIndex if name is None and hasattr(data, "name"): name = data.name if isinstance(data, ABCPandasArray): # ensure users don't accidentally put a PandasArray in an index. data = data.to_numpy() # range if isinstance(data, RangeIndex): return RangeIndex(start=data, copy=copy, dtype=dtype, name=name) elif isinstance(data, range): return RangeIndex.from_range(data, dtype=dtype, name=name) # categorical elif is_categorical_dtype(data) or is_categorical_dtype(dtype): return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) # interval elif (is_interval_dtype(data) or is_interval_dtype(dtype)) and not is_object_dtype( dtype ): closed = kwargs.get("closed", None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) elif ( is_datetime64_any_dtype(data) or is_datetime64_any_dtype(dtype) or "tz" in kwargs ): if is_dtype_equal(_o_dtype, dtype): # GH#23524 passing `dtype=object` to DatetimeIndex is invalid, # will raise in the where `data` is already tz-aware. So # we leave it out of this step and cast to object-dtype after # the DatetimeIndex construction. # Note we can pass copy=False because the .astype below # will always make a copy return DatetimeIndex(data, copy=False, name=name, **kwargs).astype(object) else: return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype): if is_dtype_equal(_o_dtype, dtype): # Note we can pass copy=False because the .astype below # will always make a copy return TimedeltaIndex(data, copy=False, name=name, **kwargs).astype(object) else: return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) elif is_period_dtype(data) and not is_object_dtype(dtype): return PeriodIndex(data, copy=copy, name=name, **kwargs) # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): if not (dtype is None or is_object_dtype(dtype)): # coerce to the provided dtype ea_cls = dtype.construct_array_type() data = ea_cls._from_sequence(data, dtype=dtype, copy=False) else: data = np.asarray(data, dtype=object) # coerce to the object dtype data = data.astype(object) return Index(data, dtype=object, copy=copy, name=name, **kwargs) # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): if dtype is not None: # we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced # GH 11836 if is_integer_dtype(dtype): inferred = lib.infer_dtype(data, skipna=False) if inferred == "integer": data = maybe_cast_to_integer_array(data, dtype, copy=copy) elif inferred in ["floating", "mixed-integer-float"]: if isna(data).any(): raise ValueError("cannot convert float NaN to integer") if inferred == "mixed-integer-float": data = maybe_cast_to_integer_array(data, dtype) # If we are actually all equal to integers, # then coerce to integer. try: return cls._try_convert_to_int_index(data, copy, name, dtype) except ValueError: pass # Return an actual float index. return Float64Index(data, copy=copy, name=name) elif inferred == "string": pass else: data = data.astype(dtype) elif is_float_dtype(dtype): inferred = lib.infer_dtype(data, skipna=False) if inferred == "string": pass else: data = data.astype(dtype) else: data = np.array(data, dtype=dtype, copy=copy) # maybe coerce to a sub-class if is_signed_integer_dtype(data.dtype): return Int64Index(data, copy=copy, dtype=dtype, name=name) elif is_unsigned_integer_dtype(data.dtype): return UInt64Index(data, copy=copy, dtype=dtype, name=name) elif is_float_dtype(data.dtype): return Float64Index(data, copy=copy, dtype=dtype, name=name) elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype("object") else: subarr = com.asarray_tuplesafe(data, dtype=object) # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens if copy: subarr = subarr.copy() if dtype is None: inferred = lib.infer_dtype(subarr, skipna=False) if inferred == "integer": try: return cls._try_convert_to_int_index(subarr, copy, name, dtype) except ValueError: pass return Index(subarr, copy=copy, dtype=object, name=name) elif inferred in ["floating", "mixed-integer-float", "integer-na"]: # TODO: Returns IntegerArray for integer-na case in the future return Float64Index(subarr, copy=copy, name=name) elif inferred == "interval": try: return IntervalIndex(subarr, name=name, copy=copy) except ValueError: # GH27172: mixed closed Intervals --> object dtype pass elif inferred == "boolean": # don't support boolean explicitly ATM pass elif inferred != "string": if inferred.startswith("datetime"): try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) except (ValueError, OutOfBoundsDatetime): # GH 27011 # If we have mixed timezones, just send it # down the base constructor pass elif inferred.startswith("timedelta"): return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == "period": try: return PeriodIndex(subarr, name=name, **kwargs) except IncompatibleFrequency: pass if kwargs: raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}") return cls._simple_new(subarr, name, **kwargs) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs) elif data is None or is_scalar(data): raise cls._scalar_data_error(data) else: if tupleize_cols and is_list_like(data): # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from .multi import MultiIndex return MultiIndex.from_tuples(data, names=name or kwargs.get("names")) # other iterable of some kind subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError
def _simple_new(cls, values, name=None, dtype=None): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, # which should be always at this point. values = np.asarray(values._values) result = object.__new__(cls) result._data = values # _index_data is a (temporary?) fix to ensure that the direct data # manipulation we do in `_libs/reduction.pyx` continues to work. # We need access to the actual ndarray, since we're messing with # data buffers and strides. We don't re-use `_ndarray_values`, since # we actually set this value too. result._index_data = values result._name = name return result._reset_identity()
def _simple_new(cls, values, name=None, dtype=None): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, # which should be always at this point. values = np.asarray(values._values) result = object.__new__(cls) result._data = values # _index_data is a (temporary?) fix to ensure that the direct data # manipulation we do in `_libs/reduction.pyx` continues to work. # We need access to the actual ndarray, since we're messing with # data buffers and strides. We don't re-use `_ndarray_values`, since # we actually set this value too. result._index_data = values result.name = name return result._reset_identity()
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError
def _set_names(self, values, level=None): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: for name in values: if not is_hashable(name): raise TypeError(f"{type(self).__name__}.name must be a hashable type") self._name = values[0]
def _set_names(self, values, level=None): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: for name in values: if not is_hashable(name): raise TypeError(f"{type(self).__name__}.name must be a hashable type") self.name = values[0]
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError
def droplevel(self, level=0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] if len(level) == 0: return self if len(level) >= self.nlevels: raise ValueError( f"Cannot remove {len(level)} levels from an index with {self.nlevels} " "levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from .multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, )
def droplevel(self, level=0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] if len(level) == 0: return self if len(level) >= self.nlevels: raise ValueError( f"Cannot remove {len(level)} levels from an index with {self.nlevels} " "levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result.name = new_names[0] return result else: from .multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, )
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError
def __setstate__(self, state): """ Necessary for making this object picklable. """ if isinstance(state, dict): self._data = state.pop("data") for k, v in state.items(): setattr(self, k, v) elif isinstance(state, tuple): if len(state) == 2: nd_state, own_state = state data = np.empty(nd_state[1], dtype=nd_state[2]) np.ndarray.__setstate__(data, nd_state) self._name = own_state[0] else: # pragma: no cover data = np.empty(state) np.ndarray.__setstate__(data, state) self._data = data self._reset_identity() else: raise Exception("invalid pickle state")
def __setstate__(self, state): """ Necessary for making this object picklable. """ if isinstance(state, dict): self._data = state.pop("data") for k, v in state.items(): setattr(self, k, v) elif isinstance(state, tuple): if len(state) == 2: nd_state, own_state = state data = np.empty(nd_state[1], dtype=nd_state[2]) np.ndarray.__setstate__(data, nd_state) self.name = own_state[0] else: # pragma: no cover data = np.empty(state) np.ndarray.__setstate__(data, state) self._data = data self._reset_identity() else: raise Exception("invalid pickle state")
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError
def __new__( cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, ): dtype = CategoricalDtype._from_values_or_dtype(data, categories, ordered, dtype) name = maybe_extract_name(name, data, cls) if not is_categorical_dtype(data): # don't allow scalars # if data is None, then categories must be provided if is_scalar(data): if data is not None or categories is None: raise cls._scalar_data_error(data) data = [] data = cls._create_categorical(data, dtype=dtype) data = data.copy() if copy else data return cls._simple_new(data, name=name)
def __new__( cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, ): dtype = CategoricalDtype._from_values_or_dtype(data, categories, ordered, dtype) if name is None and hasattr(data, "name"): name = data.name if not is_categorical_dtype(data): # don't allow scalars # if data is None, then categories must be provided if is_scalar(data): if data is not None or categories is None: raise cls._scalar_data_error(data) data = [] data = cls._create_categorical(data, dtype=dtype) data = data.copy() if copy else data return cls._simple_new(data, name=name)
https://github.com/pandas-dev/pandas/issues/29069
In [5]: pd.Series([], name=[]) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-2c73ddde103e> in <module> ----> 1 pd.Series([], name=[]) ~/sandbox/pandas/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath) 326 generic.NDFrame.__init__(self, data, fastpath=True) 327 --> 328 self.name = name 329 self._set_axis(0, index, fastpath=True) 330 ~/sandbox/pandas/pandas/core/generic.py in __setattr__(self, name, value) 5257 object.__setattr__(self, name, value) 5258 elif name in self._metadata: -> 5259 object.__setattr__(self, name, value) 5260 else: 5261 try: ~/sandbox/pandas/pandas/core/series.py in name(self, value) 468 def name(self, value): 469 if value is not None and not is_hashable(value): --> 470 raise TypeError("Series.name must be a hashable type") 471 object.__setattr__(self, "_name", value) 472 TypeError: Series.name must be a hashable type
TypeError