Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def nunique(self, dropna=True): uniqs = self.unique() n = len(uniqs) if dropna and isna(uniqs).any(): n -= 1 return n
[ "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n ...
Please provide a description of the function:def memory_usage(self, deep=False): if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if deep and is_object_dtype(self) and not PYPY: v += lib.memory_usage_of_obje...
[ "\n Memory usage of the values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n ...
Please provide a description of the function:def _expand_user(filepath_or_buffer): if isinstance(filepath_or_buffer, str): return os.path.expanduser(filepath_or_buffer) return filepath_or_buffer
[ "Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted if possible\n\n Returns\n -------\n expanded_filepath_or_buffer : an expanded filepath or the\n ...
Please provide a description of the function:def _stringify_path(filepath_or_buffer): try: import pathlib _PATHLIB_INSTALLED = True except ImportError: _PATHLIB_INSTALLED = False try: from py.path import local as LocalPath _PY_PATH_INSTALLED = True except Im...
[ "Attempt to convert a path-like object to a string.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted\n\n Returns\n -------\n str_filepath_or_buffer : maybe a string version of the object\n\n Notes\n -----\n Objects supporting the fspath protocol (python 3.6+) are ...
Please provide a description of the function:def get_filepath_or_buffer(filepath_or_buffer, encoding=None, compression=None, mode=None): filepath_or_buffer = _stringify_path(filepath_or_buffer) if _is_url(filepath_or_buffer): req = urlopen(filepath_or_buffer) con...
[ "\n If the filepath_or_buffer is a url, translate and return the buffer.\n Otherwise passthrough.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),\n or buffer\n compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optio...
Please provide a description of the function:def _infer_compression(filepath_or_buffer, compression): # No compression has been explicitly specified if compression is None: return None # Infer compression if compression == 'infer': # Convert all path types (e.g. pathlib.Path) to s...
[ "\n Get the compression method for filepath_or_buffer. If compression='infer',\n the inferred compression method is returned. Otherwise, the input\n compression method is returned unchanged, unless it's invalid, in which\n case an error is raised.\n\n Parameters\n ----------\n filepath_or_buffe...
Please provide a description of the function:def _get_handle(path_or_buf, mode, encoding=None, compression=None, memory_map=False, is_text=True): try: from s3fs import S3File need_text_wrapping = (BytesIO, S3File) except ImportError: need_text_wrapping = (BytesIO,) ...
[ "\n Get file handle for given path/buffer and mode.\n\n Parameters\n ----------\n path_or_buf :\n a path (str) or buffer\n mode : str\n mode to open path_or_buf with\n encoding : str or None\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None\n If 'inf...
Please provide a description of the function:def _td_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if _is_co...
[ "\n Wrap comparison operations to convert timedelta-like to timedelta64\n " ]
Please provide a description of the function:def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): inferred_freq = None unit = parse_timedelta_unit(unit) # Unwrap whatever we have into a np.ndarray if not hasattr(data, 'dtype'): # e.g. list, tuple if np.ndim(data) ==...
[ "\n Parameters\n ----------\n array : list-like\n copy : bool, default False\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\"\n How to handle elements that cannot be converted to tim...
Please provide a description of the function:def ints_to_td64ns(data, unit="ns"): copy_made = False unit = unit if unit is not None else "ns" if data.dtype != np.int64: # converting to int64 makes a copy, so we can avoid # re-copying later data = data.astype(np.int64) c...
[ "\n Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating\n the integers as multiples of the given timedelta unit.\n\n Parameters\n ----------\n data : numpy.ndarray with integer-dtype\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n\...
Please provide a description of the function:def objects_to_td64ns(data, unit="ns", errors="raise"): # coerce Index to np.ndarray, converting string-dtype if necessary values = np.array(data, dtype=np.object_, copy=False) result = array_to_timedelta64(values, unit=uni...
[ "\n Convert a object-dtyped or string-dtyped array into an\n timedelta64[ns]-dtyped array.\n\n Parameters\n ----------\n data : ndarray or Index\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"...
Please provide a description of the function:def _add_datetime_arraylike(self, other): if isinstance(other, np.ndarray): # At this point we have already checked that dtype is datetime64 from pandas.core.arrays import DatetimeArray other = DatetimeArray(other) ...
[ "\n Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.\n " ]
Please provide a description of the function:def components(self): from pandas import DataFrame columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds'] hasnans = self._hasnans if hasnans: def f(x): ...
[ "\n Return a dataframe of the components (days, hours, minutes,\n seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.\n\n Returns\n -------\n a DataFrame\n " ]
Please provide a description of the function:def register_writer(klass): if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
[ "\n Add engine to the excel writer registry.io.excel.\n\n You must use this method to integrate with ``to_excel``.\n\n Parameters\n ----------\n klass : ExcelWriter\n " ]
Please provide a description of the function:def _excel2num(x): index = 0 for c in x.upper().strip(): cp = ord(c) if cp < ord("A") or cp > ord("Z"): raise ValueError("Invalid column name: {x}".format(x=x)) index = index * 26 + cp - ord("A") + 1 return index - 1
[ "\n Convert Excel column name like 'AB' to 0-based column index.\n\n Parameters\n ----------\n x : str\n The Excel column name to convert to a 0-based column index.\n\n Returns\n -------\n num : int\n The column index corresponding to the name.\n\n Raises\n ------\n Value...
Please provide a description of the function:def _range2cols(areas): cols = [] for rng in areas.split(","): if ":" in rng: rng = rng.split(":") cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) else: cols.append(_excel2num(rng)) return...
[ "\n Convert comma separated list of column names and ranges to indices.\n\n Parameters\n ----------\n areas : str\n A string containing a sequence of column ranges (or areas).\n\n Returns\n -------\n cols : list\n A list of 0-based column indices.\n\n Examples\n --------\n ...
Please provide a description of the function:def _maybe_convert_usecols(usecols): if usecols is None: return usecols if is_integer(usecols): warnings.warn(("Passing in an integer for `usecols` has been " "deprecated. Please pass in a list of int from " ...
[ "\n Convert `usecols` into a compatible format for parsing in `parsers.py`.\n\n Parameters\n ----------\n usecols : object\n The use-columns object to potentially convert.\n\n Returns\n -------\n converted : object\n The compatible format of `usecols`.\n " ]
Please provide a description of the function:def _fill_mi_header(row, control_row): last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == '' or row[i] is None: row[i] = last else: control_row[i] = False ...
[ "Forward fill blank entries in row but only inside the same parent index.\n\n Used for creating headers in Multiindex.\n Parameters\n ----------\n row : list\n List of items in a single row.\n control_row : list of bool\n Helps to determine if particular column is in same parent index a...
Please provide a description of the function:def _pop_header_name(row, index_col): # Pop out header name and fill w/blank. i = index_col if not is_list_like(index_col) else max(index_col) header_name = row[i] header_name = None if header_name == "" else header_name return header_name, row[:i]...
[ "\n Pop the header name for MultiIndex parsing.\n\n Parameters\n ----------\n row : list\n The data row to parse for the header name.\n index_col : int, list\n The index columns for our data. Assumed to be non-null.\n\n Returns\n -------\n header_name : str\n The extract...
Please provide a description of the function:def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(), target=None, **kwargs): return Scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target)
[ "Ensure that we are grabbing the correct scope." ]
Please provide a description of the function:def _replacer(x): # get the hex repr of the binary char and remove 0x and pad by pad_size # zeros try: hexin = ord(x) except TypeError: # bytes literals masquerade as ints when iterating in py3 hexin = x return hex(hexin)
[ "Replace a number with its hexadecimal representation. Used to tag\n temporary variables with their calling scope's id.\n " ]
Please provide a description of the function:def _raw_hex_id(obj): # interpret as a pointer since that's what really what id returns packed = struct.pack('@P', id(obj)) return ''.join(map(_replacer, packed))
[ "Return the padded hexadecimal id of ``obj``." ]
Please provide a description of the function:def _get_pretty_string(obj): sio = StringIO() pprint.pprint(obj, stream=sio) return sio.getvalue()
[ "Return a prettier version of obj\n\n Parameters\n ----------\n obj : object\n Object to pretty print\n\n Returns\n -------\n s : str\n Pretty print object repr\n " ]
Please provide a description of the function:def resolve(self, key, is_local): try: # only look for locals in outer scope if is_local: return self.scope[key] # not a local variable so check in resolvers if we have them if self.has_resolve...
[ "Resolve a variable name in a possibly local context\n\n Parameters\n ----------\n key : str\n A variable name\n is_local : bool\n Flag indicating whether the variable is local or not (prefixed with\n the '@' symbol)\n\n Returns\n -------\n ...
Please provide a description of the function:def swapkey(self, old_key, new_key, new_value=None): if self.has_resolvers: maps = self.resolvers.maps + self.scope.maps else: maps = self.scope.maps maps.append(self.temps) for mapping in maps: i...
[ "Replace a variable name, with a potentially new value.\n\n Parameters\n ----------\n old_key : str\n Current variable name to replace\n new_key : str\n New variable name to replace `old_key` with\n new_value : object\n Value to be replaced along w...
Please provide a description of the function:def _get_vars(self, stack, scopes): variables = itertools.product(scopes, stack) for scope, (frame, _, _, _, _, _) in variables: try: d = getattr(frame, 'f_' + scope) self.scope = self.scope.new_child(d) ...
[ "Get specifically scoped variables from a list of stack frames.\n\n Parameters\n ----------\n stack : list\n A list of stack frames as returned by ``inspect.stack()``\n scopes : sequence of strings\n A sequence containing valid stack frame attribute names that\n ...
Please provide a description of the function:def update(self, level): sl = level + 1 # add sl frames to the scope starting with the # most distant and overwriting with more current # makes sure that we can capture variable scope stack = inspect.stack() try: ...
[ "Update the current scope by going back `level` levels.\n\n Parameters\n ----------\n level : int or None, optional, default None\n " ]
Please provide a description of the function:def add_tmp(self, value): name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__, num=self.ntemps, hex_id=_raw_hex_id(self)) # add to inner most sco...
[ "Add a temporary variable to the scope.\n\n Parameters\n ----------\n value : object\n An arbitrary object to be assigned to a temporary variable.\n\n Returns\n -------\n name : basestring\n The name of the temporary variable created.\n " ]
Please provide a description of the function:def full_scope(self): maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps)
[ "Return the full scope for use with passing to engines transparently\n as a mapping.\n\n Returns\n -------\n vars : DeepChainMap\n All variables in this scope.\n " ]
Please provide a description of the function:def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): if format is None: buffer_error_msg = ("If this is a buffer object rather " "than a string name, you must speci...
[ "\n Read SAS files stored as either XPORT or SAS7BDAT format files.\n\n Parameters\n ----------\n filepath_or_buffer : string or file-like object\n Path to the SAS file.\n format : string {'xport', 'sas7bdat'} or None\n If None, file format is inferred from file extension. If 'xport' or...
Please provide a description of the function:def _coerce_method(converter): def wrapper(self): if len(self) == 1: return converter(self.iloc[0]) raise TypeError("cannot convert the series to " "{0}".format(str(converter))) wrapper.__name__ = "__{name}__...
[ "\n Install the scalar coercion methods.\n " ]
Please provide a description of the function:def _init_dict(self, data, index=None, dtype=None): # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: keys, values = zip(*data.items()) ...
[ "\n Derive the \"_data\" and \"index\" attributes of a new Series from a\n dictionary input.\n\n Parameters\n ----------\n data : dict or dict-like\n Data used to populate the new Series\n index : Index or index-like, default None\n index for the new S...
Please provide a description of the function:def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.Series(..) " ...
[ "\n Construct Series from array.\n\n .. deprecated :: 0.23.0\n Use pd.Series(..) constructor instead.\n " ]
Please provide a description of the function:def _set_axis(self, axis, labels, fastpath=False): if not fastpath: labels = ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: if not isinstance(labels, (DatetimeI...
[ "\n Override generic, we want to set the _typ here.\n " ]
Please provide a description of the function:def asobject(self): warnings.warn("'asobject' is deprecated. Use 'astype(object)'" " instead", FutureWarning, stacklevel=2) return self.astype(object).values
[ "\n Return object Series which contains boxed values.\n\n .. deprecated :: 0.23.0\n\n Use ``astype(object)`` instead.\n\n *this is an internal non-public method*\n " ]
Please provide a description of the function:def compress(self, condition, *args, **kwargs): msg = ("Series.compress(condition) is deprecated. " "Use 'Series[condition]' or " "'np.asarray(series).compress(condition)' instead.") warnings.warn(msg, FutureWarning, sta...
[ "\n Return selected slices of an array along given axis as a Series.\n\n .. deprecated:: 0.24.0\n\n See Also\n --------\n numpy.ndarray.compress\n " ]
Please provide a description of the function:def nonzero(self): msg = ("Series.nonzero() is deprecated " "and will be removed in a future version." "Use Series.to_numpy().nonzero() instead") warnings.warn(msg, FutureWarning, stacklevel=2) return self._value...
[ "\n Return the *integer* indices of the elements that are non-zero.\n\n .. deprecated:: 0.24.0\n Please use .to_numpy().nonzero() as a replacement.\n\n This method is equivalent to calling `numpy.nonzero` on the\n series data. For compatibility with NumPy, the return value is\n...
Please provide a description of the function:def view(self, dtype=None): return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self)
[ "\n Create a new view of the Series.\n\n This function will return a new Series with a view of the same\n underlying values in memory, optionally reinterpreted with a new data\n type. The new data type must preserve the same size in bytes as to not\n cause index misalignment.\n\n ...
Please provide a description of the function:def _ixs(self, i, axis=0): try: # dispatch to the values if we need values = self._values if isinstance(values, np.ndarray): return libindex.get_value_at(values, i) else: return...
[ "\n Return the i-th value or values in the Series by location.\n\n Parameters\n ----------\n i : int, slice, or sequence of integers\n\n Returns\n -------\n scalar (int) or Series (slice, sequence)\n " ]
Please provide a description of the function:def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, ...
[ "\n Repeat elements of a Series.\n\n Returns a new Series where each element of the current Series\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This...
Please provide a description of the function:def reset_index(self, level=None, drop=False, name=None, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = ibase.default_index(len(self)) if level is not None: if not isinst...
[ "\n Generate a new DataFrame or Series with the index reset.\n\n This is useful when the index needs to be treated as a column, or\n when the index is meaningless and needs to be reset to the default\n before another operation.\n\n Parameters\n ----------\n level : i...
Please provide a description of the function:def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None): formatter = fmt.SeriesFormatter(self, name=name, length=length, ...
[ "\n Render a string representation of the Series.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n na_rep : str, optional\n String representation of NaN to use, default 'NaN'.\n float_format : one-parameter function, ...
Please provide a description of the function:def to_dict(self, into=dict): # GH16122 into_c = com.standardize_mapping(into) return into_c(self.items())
[ "\n Convert Series to {label -> value} dict or dict-like object.\n\n Parameters\n ----------\n into : class, default dict\n The collections.abc.Mapping subclass to use as the return\n object. Can be the actual class or an empty\n instance of the mapping t...
Please provide a description of the function:def to_frame(self, name=None): if name is None: df = self._constructor_expanddim(self) else: df = self._constructor_expanddim({name: self}) return df
[ "\n Convert Series to DataFrame.\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame representation of Seri...
Please provide a description of the function:def to_sparse(self, kind='block', fill_value=None): # TODO: deprecate from pandas.core.sparse.series import SparseSeries values = SparseArray(self, kind=kind, fill_value=fill_value) return SparseSeries( values, index=self...
[ "\n Convert Series to SparseSeries.\n\n Parameters\n ----------\n kind : {'block', 'integer'}, default 'block'\n fill_value : float, defaults to NaN (missing)\n Value to use for filling NaN values.\n\n Returns\n -------\n SparseSeries\n S...
Please provide a description of the function:def _set_name(self, name, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') ser = self if inplace else self.copy() ser.name = name return ser
[ "\n Set the Series name.\n\n Parameters\n ----------\n name : str\n inplace : bool\n whether to modify `self` directly or return a copy\n " ]
Please provide a description of the function:def count(self, level=None): if level is None: return notna(com.values_from_object(self)).sum() if isinstance(level, str): level = self.index._get_level_number(level) lev = self.index.levels[level] level_code...
[ "\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n ...
Please provide a description of the function:def drop_duplicates(self, keep='first', inplace=False): return super().drop_duplicates(keep=keep, inplace=inplace)
[ "\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n ...
Please provide a description of the function:def idxmin(self, axis=0, skipna=True, *args, **kwargs): skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self....
[ "\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\...
Please provide a description of the function:def idxmax(self, axis=0, skipna=True, *args, **kwargs): skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self....
[ "\n Return the row label of the maximum value.\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\...
Please provide a description of the function:def round(self, decimals=0, *args, **kwargs): nv.validate_round(args, kwargs) result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result
[ "\n Round each value in a Series to the given number of decimals.\n\n Parameters\n ----------\n decimals : int\n Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of\n positions to the left of the decima...
Please provide a description of the function:def quantile(self, q=0.5, interpolation='linear'): self._check_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolatio...
[ "\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n .. versionadded:: 0.1...
Please provide a description of the function:def corr(self, other, method='pearson', min_periods=None): this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan if method in ['pearson', 'spearman', 'kendall'] or callable(method): ...
[ "\n Compute correlation with `other` Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the correlation.\n method : {'pearson', 'kendall', 'spearman'} or callable\n * pearson : standard correlation co...
Please provide a description of the function:def cov(self, other, min_periods=None): this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan return nanops.nancov(this.values, other.values, min_periods=min_period...
[ "\n Compute covariance with Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n\n ...
Please provide a description of the function:def diff(self, periods=1): result = algorithms.diff(com.values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self)
[ "\n First discrete difference of element.\n\n Calculates the difference of a Series element compared with another\n element in the Series (default is element in previous row).\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculati...
Please provide a description of the function:def dot(self, other): from pandas.core.frame import DataFrame if isinstance(other, (Series, DataFrame)): common = self.index.union(other.index) if (len(common) > len(self.index) or len(common) > len(other.i...
[ "\n Compute the dot product between the Series and the columns of other.\n\n This method computes the dot product between the Series and another\n one, or the Series and each columns of a DataFrame, or the Series and\n each columns of an array.\n\n It can also be called using `sel...
Please provide a description of the function:def append(self, to_append, ignore_index=False, verify_integrity=False): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: to_concat = [self, t...
[ "\n Concatenate two or more Series.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n Series to append with self.\n ignore_index : bool, default False\n If True, do not use the index labels.\n\n .. versionadded:: 0.19.0\n\n...
Please provide a description of the function:def _binop(self, other, func, level=None, fill_value=None): if not isinstance(other, Series): raise AssertionError('Other operand must be Series') new_index = self.index this = self if not self.index.equals(other.index)...
[ "\n Perform generic binary operation with optional fill value.\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the res...
Please provide a description of the function:def combine(self, other, func, fill_value=None): if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, ...
[ "\n Combine the Series with a Series or scalar according to `func`.\n\n Combine the Series and `other` using `func` to perform elementwise\n selection for combined Series.\n `fill_value` is assumed when value is missing at some index\n from one of the two objects being combined.\n...
Please provide a description of the function:def combine_first(self, other): new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if is_datetimelike(this) and not is_datetimelike(other): oth...
[ "\n Combine Series values, choosing the calling Series's values first.\n\n Parameters\n ----------\n other : Series\n The value(s) to be combined with the `Series`.\n\n Returns\n -------\n Series\n The result of combining the Series with the oth...
Please provide a description of the function:def update(self, other): other = other.reindex_like(self) mask = notna(other) self._data = self._data.putmask(mask=mask, new=other, inplace=True) self._maybe_update_cacher()
[ "\n Modify Series in place using non-NA values from passed\n Series. Aligns on index.\n\n Parameters\n ----------\n other : Series\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6]))\n >>> s\n 0 4...
Please provide a description of the function:def sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') # Validate the axis parameter self._get_axis_number(axis) # GH ...
[ "\n Sort by the values.\n\n Sort a Series in ascending or descending order by some\n criterion.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Axis to direct sorting. The value 'index' is accepted for\n compatibility with DataFrame.so...
Please provide a description of the function:def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace =...
[ "\n Sort Series by index labels.\n\n Returns a new Series sorted by label if `inplace` argument is\n ``False``, otherwise updates the original series and returns None.\n\n Parameters\n ----------\n axis : int, default 0\n Axis to direct sorting. This can only be ...
Please provide a description of the function:def argsort(self, axis=0, kind='quicksort', order=None): values = self._values mask = isna(values) if mask.any(): result = Series(-1, index=self.index, name=self.name, dtype='int64') notmas...
[ "\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : int\n Has no effect but is accepted for compatibility with numpy.\n kind : {'merges...
Please provide a description of the function:def nlargest(self, n=5, keep='first'): return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
[ "\n Return the largest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many descending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Serie...
Please provide a description of the function:def nsmallest(self, n=5, keep='first'): return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
[ "\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Serie...
Please provide a description of the function:def swaplevel(self, i=-2, j=-1, copy=True): new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__(self)
[ "\n Swap levels i and j in a MultiIndex.\n\n Parameters\n ----------\n i, j : int, str (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n Series\n Series with levels swapped in MultiIndex.\n\n ...
Please provide a description of the function:def reorder_levels(self, order): if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception('Can only reorder levels on a hierarchical axis.') result = self.copy() result.index = result.index.reorder_levels(or...
[ "\n Rearrange index levels using input order.\n\n May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int representing new level order\n (reference level by number or key)\n\n Returns\n -------\n type of caller (new ob...
Please provide a description of the function:def map(self, arg, na_action=None): new_values = super()._map_values( arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__(self)
[ "\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, dict, or Series\n ...
Please provide a description of the function:def apply(self, func, convert_dtype=True, args=(), **kwds): if len(self) == 0: return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) # dispatch to agg if isinstance(f...
[ "\n Invoke function on values of Series.\n\n Can be ufunc (a NumPy function that applies to the entire Series)\n or a Python function that only works on single values.\n\n Parameters\n ----------\n func : function\n Python function or NumPy ufunc to apply.\n ...
Please provide a description of the function:def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, Categorical): ...
[ "\n Perform a reduction operation.\n\n If we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object.\n " ]
Please provide a description of the function:def rename(self, index=None, **kwargs): kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False), 'inplace') non_mapping = is_scalar(index) or (is_list_like(index) and ...
[ "\n Alter Series index labels or name.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n See the :r...
Please provide a description of the function:def reindex_axis(self, labels, axis=0, **kwargs): # for compatibility with higher dims if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") msg = ("'.reindex_axis' is deprecated and will be removed in a future...
[ "\n Conform Series to new index with optional filling logic.\n\n .. deprecated:: 0.21.0\n Use ``Series.reindex`` instead.\n " ]
Please provide a description of the function:def memory_usage(self, index=True, deep=False): v = super().memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v
[ "\n Return the memory usage of the Series.\n\n The memory usage can optionally include the contribution of\n the index and of elements of `object` dtype.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of t...
Please provide a description of the function:def isin(self, values): result = algorithms.isin(self, values) return self._constructor(result, index=self.index).__finalize__(self)
[ "\n Check whether `values` are contained in Series.\n\n Return a boolean Series showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of v...
Please provide a description of the function:def between(self, left, right, inclusive=True): if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask
[ "\n Return boolean Series equivalent to left <= series <= right.\n\n This function returns a boolean vector containing `True` wherever the\n corresponding Series element is between the boundary values `left` and\n `right`. NA values are treated as `False`.\n\n Parameters\n ...
Please provide a description of the function:def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): # We're calling `DataFrame.from_csv` in the implementation, # which will propagate a warning regarding `from_c...
[ "\n Read CSV file.\n\n .. deprecated:: 0.21.0\n Use :func:`pandas.read_csv` instead.\n\n It is preferable to use the more powerful :func:`pandas.read_csv`\n for most general purposes, but ``from_csv`` makes for an easy\n roundtrip to and from a file (the exact counterpa...
Please provide a description of the function:def dropna(self, axis=0, inplace=False, **kwargs): inplace = validate_bool_kwarg(inplace, 'inplace') kwargs.pop('how', None) if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{...
[ "\n Return a new Series with missing values removed.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n There is only ...
Please provide a description of the function:def valid(self, inplace=False, **kwargs): warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) return self.dropna(inplace=inplace, **kwargs)
[ "\n Return Series without null values.\n\n .. deprecated:: 0.23.0\n Use :meth:`Series.dropna` instead.\n " ]
Please provide a description of the function:def to_timestamp(self, freq=None, how='start', copy=True): new_values = self._values if copy: new_values = new_values.copy() new_index = self.index.to_timestamp(freq=freq, how=how) return self._constructor(new_values, ...
[ "\n Cast to DatetimeIndex of Timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of per...
Please provide a description of the function:def to_period(self, freq=None, copy=True): new_values = self._values if copy: new_values = new_values.copy() new_index = self.index.to_period(freq=freq) return self._constructor(new_values, ...
[ "\n Convert Series from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed).\n\n Parameters\n ----------\n freq : str, default None\n Frequency associated with the PeriodIndex.\n copy : bool, default True\n Whether...
Please provide a description of the function:def to_numeric(arg, errors='raise', downcast=None): if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') is_series = False is_index = False is_scalars = False if isinst...
[ "\n Convert argument to a numeric type.\n\n The default return dtype is `float64` or `int64`\n depending on the data supplied. Use the `downcast` parameter\n to obtain other dtypes.\n\n Please note that precision loss may occur if really large numbers\n are passed in. Due to the internal limitatio...
Please provide a description of the function:def _get_fill(arr: ABCSparseArray) -> np.ndarray: try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value)
[ "\n Create a 0-dim ndarray containing the fill value\n\n Parameters\n ----------\n arr : SparseArray\n\n Returns\n -------\n fill_value : ndarray\n 0-dim ndarray with just the fill value.\n\n Notes\n -----\n coerce fill_value to arr dtype if possible\n int64 SparseArray can h...
Please provide a description of the function:def _sparse_array_op( left: ABCSparseArray, right: ABCSparseArray, op: Callable, name: str ) -> Any: if name.startswith('__'): # For lookups in _libs.sparse we need non-dunder op name name = name[2:-2] # dtype use...
[ "\n Perform a binary operation between two arrays.\n\n Parameters\n ----------\n left : Union[SparseArray, ndarray]\n right : Union[SparseArray, ndarray]\n op : Callable\n The binary operation to perform\n name str\n Name of the callable.\n\n Returns\n -------\n SparseArr...
Please provide a description of the function:def _wrap_result(name, data, sparse_index, fill_value, dtype=None): if name.startswith('__'): # e.g. __eq__ --> eq name = name[2:-2] if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = np.bool fill_value = lib.item_from_zerodim...
[ "\n wrap op result to have correct dtype\n " ]
Please provide a description of the function:def _maybe_to_sparse(array): if isinstance(array, ABCSparseSeries): array = array.values.copy() return array
[ "\n array must be SparseSeries or SparseArray\n " ]
Please provide a description of the function:def _sanitize_values(arr): if hasattr(arr, 'values'): arr = arr.values else: # scalar if is_scalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif is_list_like(arr)...
[ "\n return an ndarray for our input,\n in a platform independent manner\n " ]
Please provide a description of the function:def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False): arr = _sanitize_values(arr) if arr.ndim > 1: raise TypeError("expected dimension <= 1 data") if fill_value is None: fill_value = na_value_for_dtype(arr.dtype) ...
[ "\n Convert ndarray to sparse format\n\n Parameters\n ----------\n arr : ndarray\n kind : {'block', 'integer'}\n fill_value : NaN or another value\n dtype : np.dtype, optional\n copy : bool, default False\n\n Returns\n -------\n (sparse_values, index, fill_value) : (ndarray, SparseI...
Please provide a description of the function:def density(self): r = float(self.sp_index.npoints) / float(self.sp_index.length) return r
[ "\n The percent of non- ``fill_value`` points, as decimal.\n\n Examples\n --------\n >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)\n >>> s.density\n 0.6\n " ]
Please provide a description of the function:def fillna(self, value=None, method=None, limit=None): if ((method is None and value is None) or (method is not None and value is not None)): raise ValueError("Must specify one of 'method' or 'value'.") elif method is not...
[ "\n Fill missing values with `value`.\n\n Parameters\n ----------\n value : scalar, optional\n method : str, optional\n\n .. warning::\n\n Using 'method' will result in high memory use,\n as all `fill_value` methods will be converted to\n ...
Please provide a description of the function:def _first_fill_value_loc(self): if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.to_int_index().indices if not len(indices) or indices[0] > 0: return 0 diff = indice...
[ "\n Get the location of the first missing value.\n\n Returns\n -------\n int\n " ]
Please provide a description of the function:def value_counts(self, dropna=True): from pandas import Index, Series keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if f...
[ "\n Returns a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN, even if NaN is in sp_values.\n\n Returns\n -------\n counts : Series\n " ]
Please provide a description of the function:def astype(self, dtype=None, copy=True): dtype = self.dtype.update_dtype(dtype) subtype = dtype._subtype_with_str sp_values = astype_nansafe(self.sp_values, subtype, copy=c...
[ "\n Change the dtype of a SparseArray.\n\n The output will always be a SparseArray. To convert to a dense\n ndarray with a certain dtype, use :meth:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n For SparseDtype, this changes t...
Please provide a description of the function:def map(self, mapper): # this is used in apply. # We get hit since we're an "is_extension_type" but regular extension # types are not hit. This may be worth adding to the interface. if isinstance(mapper, ABCSeries): mapper...
[ "\n Map categories using input correspondence (dict, Series, or function).\n\n Parameters\n ----------\n mapper : dict, Series, callable\n The correspondence from old values to new.\n\n Returns\n -------\n SparseArray\n The output array will hav...
Please provide a description of the function:def all(self, axis=None, *args, **kwargs): nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all()
[ "\n Tests whether all elements evaluate True\n\n Returns\n -------\n all : bool\n\n See Also\n --------\n numpy.all\n " ]
Please provide a description of the function:def any(self, axis=0, *args, **kwargs): nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item()
[ "\n Tests whether at least one of elements evaluate True\n\n Returns\n -------\n any : bool\n\n See Also\n --------\n numpy.any\n " ]
Please provide a description of the function:def sum(self, axis=0, *args, **kwargs): nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() if self._null_fill_value: return sp_sum else: nsparse = self.sp_index.n...
[ "\n Sum of non-NA/null values\n\n Returns\n -------\n sum : float\n " ]
Please provide a description of the function:def cumsum(self, axis=0, *args, **kwargs): nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. raise ValueError("axis(={axis}) out of bounds".format(axis=axis)) if not self._nu...
[ "\n Cumulative sum of non-NA/null values.\n\n When performing the cumulative summation, any non-NA/null values will\n be skipped. The resulting SparseArray will preserve the locations of\n NaN values, but the fill value will be `np.nan` regardless.\n\n Parameters\n --------...
Please provide a description of the function:def mean(self, axis=0, *args, **kwargs): nv.validate_mean(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() ct = len(valid_vals) if self._null_fill_value: return sp_sum / ct else: ...
[ "\n Mean of non-NA/null values\n\n Returns\n -------\n mean : float\n " ]
Please provide a description of the function:def tokenize_string(source): line_reader = StringIO(source).readline token_generator = tokenize.generate_tokens(line_reader) # Loop over all tokens till a backtick (`) is found. # Then, take all tokens till the next backtick to form a backtick quoted ...
[ "Tokenize a Python source code string.\n\n Parameters\n ----------\n source : str\n A Python source code string\n " ]