repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
modin-project/modin
modin/pandas/base.py
BasePandasDataset.all
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): """Return whether all elements are True over requested axis Note: If axis=None or axis=0, this call applies df.all(axis=1) to the transpose of df. """ if axis is not None: axis = self._get_axis_number(axis) if bool_only and axis == 0: if hasattr(self, "dtype"): raise NotImplementedError( "{}.{} does not implement numeric_only.".format( self.__name__, "all" ) ) data_for_compute = self[self.columns[self.dtypes == np.bool]] return data_for_compute.all( axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs ) return self._reduce_dimension( self._query_compiler.all( axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) ) else: if bool_only: raise ValueError("Axis must be 0 or 1 (got {})".format(axis)) # Reduce to a scalar if axis is None. result = self._reduce_dimension( self._query_compiler.all( axis=0, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) ) if isinstance(result, BasePandasDataset): return result.all( axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) return result
python
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): """Return whether all elements are True over requested axis Note: If axis=None or axis=0, this call applies df.all(axis=1) to the transpose of df. """ if axis is not None: axis = self._get_axis_number(axis) if bool_only and axis == 0: if hasattr(self, "dtype"): raise NotImplementedError( "{}.{} does not implement numeric_only.".format( self.__name__, "all" ) ) data_for_compute = self[self.columns[self.dtypes == np.bool]] return data_for_compute.all( axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs ) return self._reduce_dimension( self._query_compiler.all( axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) ) else: if bool_only: raise ValueError("Axis must be 0 or 1 (got {})".format(axis)) # Reduce to a scalar if axis is None. result = self._reduce_dimension( self._query_compiler.all( axis=0, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) ) if isinstance(result, BasePandasDataset): return result.all( axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs ) return result
[ "def", "all", "(", "self", ",", "axis", "=", "0", ",", "bool_only", "=", "None", ",", "skipna", "=", "True", ",", "level", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "is", "not", "None", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "bool_only", "and", "axis", "==", "0", ":", "if", "hasattr", "(", "self", ",", "\"dtype\"", ")", ":", "raise", "NotImplementedError", "(", "\"{}.{} does not implement numeric_only.\"", ".", "format", "(", "self", ".", "__name__", ",", "\"all\"", ")", ")", "data_for_compute", "=", "self", "[", "self", ".", "columns", "[", "self", ".", "dtypes", "==", "np", ".", "bool", "]", "]", "return", "data_for_compute", ".", "all", "(", "axis", "=", "axis", ",", "bool_only", "=", "False", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "all", "(", "axis", "=", "axis", ",", "bool_only", "=", "bool_only", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "*", "*", "kwargs", ")", ")", "else", ":", "if", "bool_only", ":", "raise", "ValueError", "(", "\"Axis must be 0 or 1 (got {})\"", ".", "format", "(", "axis", ")", ")", "# Reduce to a scalar if axis is None.\r", "result", "=", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "all", "(", "axis", "=", "0", ",", "bool_only", "=", "bool_only", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "*", "*", "kwargs", ")", ")", "if", "isinstance", "(", "result", ",", "BasePandasDataset", ")", ":", "return", "result", ".", "all", "(", "axis", "=", "axis", ",", "bool_only", "=", "bool_only", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "*", "*", "kwargs", ")", "return", "result" ]
Return whether all elements are True over requested axis Note: If axis=None or axis=0, this call applies df.all(axis=1) to the transpose of df.
[ "Return", "whether", "all", "elements", "are", "True", "over", "requested", "axis", "Note", ":", "If", "axis", "=", "None", "or", "axis", "=", "0", "this", "call", "applies", "df", ".", "all", "(", "axis", "=", "1", ")", "to", "the", "transpose", "of", "df", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L377-L415
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.apply
def apply( self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, convert_dtype=True, args=(), **kwds ): """Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func. """ axis = self._get_axis_number(axis) ErrorMessage.non_verified_udf() if isinstance(func, string_types): if axis == 1: kwds["axis"] = axis result = self._string_function(func, *args, **kwds) # Sometimes we can return a scalar here if isinstance(result, BasePandasDataset): return result._query_compiler return result elif isinstance(func, dict): if axis == 1: raise TypeError( "(\"'dict' object is not callable\", " "'occurred at index {0}'".format(self.index[0]) ) if len(self.columns) != len(set(self.columns)): warnings.warn( "duplicate column names not supported with apply().", FutureWarning, stacklevel=2, ) elif not callable(func) and not is_list_like(func): raise TypeError("{} object is not callable".format(type(func))) query_compiler = self._query_compiler.apply(func, axis, *args, **kwds) return query_compiler
python
def apply( self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, convert_dtype=True, args=(), **kwds ): """Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func. """ axis = self._get_axis_number(axis) ErrorMessage.non_verified_udf() if isinstance(func, string_types): if axis == 1: kwds["axis"] = axis result = self._string_function(func, *args, **kwds) # Sometimes we can return a scalar here if isinstance(result, BasePandasDataset): return result._query_compiler return result elif isinstance(func, dict): if axis == 1: raise TypeError( "(\"'dict' object is not callable\", " "'occurred at index {0}'".format(self.index[0]) ) if len(self.columns) != len(set(self.columns)): warnings.warn( "duplicate column names not supported with apply().", FutureWarning, stacklevel=2, ) elif not callable(func) and not is_list_like(func): raise TypeError("{} object is not callable".format(type(func))) query_compiler = self._query_compiler.apply(func, axis, *args, **kwds) return query_compiler
[ "def", "apply", "(", "self", ",", "func", ",", "axis", "=", "0", ",", "broadcast", "=", "None", ",", "raw", "=", "False", ",", "reduce", "=", "None", ",", "result_type", "=", "None", ",", "convert_dtype", "=", "True", ",", "args", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ErrorMessage", ".", "non_verified_udf", "(", ")", "if", "isinstance", "(", "func", ",", "string_types", ")", ":", "if", "axis", "==", "1", ":", "kwds", "[", "\"axis\"", "]", "=", "axis", "result", "=", "self", ".", "_string_function", "(", "func", ",", "*", "args", ",", "*", "*", "kwds", ")", "# Sometimes we can return a scalar here\r", "if", "isinstance", "(", "result", ",", "BasePandasDataset", ")", ":", "return", "result", ".", "_query_compiler", "return", "result", "elif", "isinstance", "(", "func", ",", "dict", ")", ":", "if", "axis", "==", "1", ":", "raise", "TypeError", "(", "\"(\\\"'dict' object is not callable\\\", \"", "\"'occurred at index {0}'\"", ".", "format", "(", "self", ".", "index", "[", "0", "]", ")", ")", "if", "len", "(", "self", ".", "columns", ")", "!=", "len", "(", "set", "(", "self", ".", "columns", ")", ")", ":", "warnings", ".", "warn", "(", "\"duplicate column names not supported with apply().\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "elif", "not", "callable", "(", "func", ")", "and", "not", "is_list_like", "(", "func", ")", ":", "raise", "TypeError", "(", "\"{} object is not callable\"", ".", "format", "(", "type", "(", "func", ")", ")", ")", "query_compiler", "=", "self", ".", "_query_compiler", ".", "apply", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwds", ")", "return", "query_compiler" ]
Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func.
[ "Apply", "a", "function", "along", "input", "axis", "of", "DataFrame", ".", "Args", ":", "func", ":", "The", "function", "to", "apply", "axis", ":", "The", "axis", "over", "which", "to", "apply", "the", "func", ".", "broadcast", ":", "Whether", "or", "not", "to", "broadcast", ".", "raw", ":", "Whether", "or", "not", "to", "convert", "to", "a", "Series", ".", "reduce", ":", "Whether", "or", "not", "to", "try", "to", "apply", "reduction", "procedures", ".", "Returns", ":", "Series", "or", "DataFrame", "depending", "on", "func", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L457-L506
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.bfill
def bfill(self, axis=None, inplace=False, limit=None, downcast=None): """Synonym for DataFrame.fillna(method='bfill')""" return self.fillna( method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace )
python
def bfill(self, axis=None, inplace=False, limit=None, downcast=None): """Synonym for DataFrame.fillna(method='bfill')""" return self.fillna( method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace )
[ "def", "bfill", "(", "self", ",", "axis", "=", "None", ",", "inplace", "=", "False", ",", "limit", "=", "None", ",", "downcast", "=", "None", ")", ":", "return", "self", ".", "fillna", "(", "method", "=", "\"bfill\"", ",", "axis", "=", "axis", ",", "limit", "=", "limit", ",", "downcast", "=", "downcast", ",", "inplace", "=", "inplace", ")" ]
Synonym for DataFrame.fillna(method='bfill')
[ "Synonym", "for", "DataFrame", ".", "fillna", "(", "method", "=", "bfill", ")" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L577-L581
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.bool
def bool(self): """Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """ shape = self.shape if shape != (1,) and shape != (1, 1): raise ValueError( """The PandasObject does not have exactly 1 element. Return the bool of a single element PandasObject. The truth value is ambiguous. Use a.empty, a.item(), a.any() or a.all().""" ) else: return self._to_pandas().bool()
python
def bool(self): """Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """ shape = self.shape if shape != (1,) and shape != (1, 1): raise ValueError( """The PandasObject does not have exactly 1 element. Return the bool of a single element PandasObject. The truth value is ambiguous. Use a.empty, a.item(), a.any() or a.all().""" ) else: return self._to_pandas().bool()
[ "def", "bool", "(", "self", ")", ":", "shape", "=", "self", ".", "shape", "if", "shape", "!=", "(", "1", ",", ")", "and", "shape", "!=", "(", "1", ",", "1", ")", ":", "raise", "ValueError", "(", "\"\"\"The PandasObject does not have exactly\r\n 1 element. Return the bool of a single\r\n element PandasObject. The truth value is\r\n ambiguous. Use a.empty, a.item(), a.any()\r\n or a.all().\"\"\"", ")", "else", ":", "return", "self", ".", "_to_pandas", "(", ")", ".", "bool", "(", ")" ]
Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean
[ "Return", "the", "bool", "of", "a", "single", "element", "PandasObject", ".", "This", "must", "be", "a", "boolean", "scalar", "value", "either", "True", "or", "False", ".", "Raise", "a", "ValueError", "if", "the", "PandasObject", "does", "not", "have", "exactly", "1", "element", "or", "that", "element", "is", "not", "boolean" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L583-L600
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.copy
def copy(self, deep=True): """Creates a shallow copy of the DataFrame. Returns: A new DataFrame pointing to the same partitions as this one. """ return self.__constructor__(query_compiler=self._query_compiler.copy())
python
def copy(self, deep=True): """Creates a shallow copy of the DataFrame. Returns: A new DataFrame pointing to the same partitions as this one. """ return self.__constructor__(query_compiler=self._query_compiler.copy())
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ")", ":", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "copy", "(", ")", ")" ]
Creates a shallow copy of the DataFrame. Returns: A new DataFrame pointing to the same partitions as this one.
[ "Creates", "a", "shallow", "copy", "of", "the", "DataFrame", ".", "Returns", ":", "A", "new", "DataFrame", "pointing", "to", "the", "same", "partitions", "as", "this", "one", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L647-L653
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.count
def count(self, axis=0, level=None, numeric_only=False): """Get the count of non-null objects in the DataFrame. Arguments: axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise. level: If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame. numeric_only: Include only float, int, boolean data Returns: The count, in a Series (or DataFrame if level is specified). """ axis = self._get_axis_number(axis) if axis is not None else 0 return self._reduce_dimension( self._query_compiler.count( axis=axis, level=level, numeric_only=numeric_only ) )
python
def count(self, axis=0, level=None, numeric_only=False): """Get the count of non-null objects in the DataFrame. Arguments: axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise. level: If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame. numeric_only: Include only float, int, boolean data Returns: The count, in a Series (or DataFrame if level is specified). """ axis = self._get_axis_number(axis) if axis is not None else 0 return self._reduce_dimension( self._query_compiler.count( axis=axis, level=level, numeric_only=numeric_only ) )
[ "def", "count", "(", "self", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "numeric_only", "=", "False", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "count", "(", "axis", "=", "axis", ",", "level", "=", "level", ",", "numeric_only", "=", "numeric_only", ")", ")" ]
Get the count of non-null objects in the DataFrame. Arguments: axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise. level: If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame. numeric_only: Include only float, int, boolean data Returns: The count, in a Series (or DataFrame if level is specified).
[ "Get", "the", "count", "of", "non", "-", "null", "objects", "in", "the", "DataFrame", ".", "Arguments", ":", "axis", ":", "0", "or", "index", "for", "row", "-", "wise", "1", "or", "columns", "for", "column", "-", "wise", ".", "level", ":", "If", "the", "axis", "is", "a", "MultiIndex", "(", "hierarchical", ")", "count", "along", "a", "particular", "level", "collapsing", "into", "a", "DataFrame", ".", "numeric_only", ":", "Include", "only", "float", "int", "boolean", "data", "Returns", ":", "The", "count", "in", "a", "Series", "(", "or", "DataFrame", "if", "level", "is", "specified", ")", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L655-L672
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.cummax
def cummax(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: self._validate_dtypes() return self.__constructor__( query_compiler=self._query_compiler.cummax( axis=axis, skipna=skipna, **kwargs ) )
python
def cummax(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: self._validate_dtypes() return self.__constructor__( query_compiler=self._query_compiler.cummax( axis=axis, skipna=skipna, **kwargs ) )
[ "def", "cummax", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "axis", ":", "self", ".", "_validate_dtypes", "(", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "cummax", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "*", "*", "kwargs", ")", ")" ]
Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame.
[ "Perform", "a", "cumulative", "maximum", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "maximum", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "Returns", ":", "The", "cumulative", "maximum", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L674-L691
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.cumprod
def cumprod(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative product across the DataFrame. Args: axis (int): The axis to take product on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative product of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 self._validate_dtypes(numeric_only=True) return self.__constructor__( query_compiler=self._query_compiler.cumprod( axis=axis, skipna=skipna, **kwargs ) )
python
def cumprod(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative product across the DataFrame. Args: axis (int): The axis to take product on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative product of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 self._validate_dtypes(numeric_only=True) return self.__constructor__( query_compiler=self._query_compiler.cumprod( axis=axis, skipna=skipna, **kwargs ) )
[ "def", "cumprod", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "self", ".", "_validate_dtypes", "(", "numeric_only", "=", "True", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "cumprod", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "*", "*", "kwargs", ")", ")" ]
Perform a cumulative product across the DataFrame. Args: axis (int): The axis to take product on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative product of the DataFrame.
[ "Perform", "a", "cumulative", "product", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "product", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "Returns", ":", "The", "cumulative", "product", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L712-L728
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.describe
def describe(self, percentiles=None, include=None, exclude=None): """ Generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding NaN values. Args: percentiles (list-like of numbers, optional): The percentiles to include in the output. include: White-list of data types to include in results exclude: Black-list of data types to exclude in results Returns: Series/DataFrame of summary statistics """ if include is not None and (isinstance(include, np.dtype) or include != "all"): if not is_list_like(include): include = [include] include = [ np.dtype(i) if not (isinstance(i, type) and i.__module__ == "numpy") else i for i in include ] if not any( (isinstance(inc, np.dtype) and inc == d) or ( not isinstance(inc, np.dtype) and inc.__subclasscheck__(getattr(np, d.__str__())) ) for d in self._get_dtypes() for inc in include ): # This is the error that pandas throws. raise ValueError("No objects to concatenate") if exclude is not None: if not is_list_like(exclude): exclude = [exclude] exclude = [np.dtype(e) for e in exclude] if all( (isinstance(exc, np.dtype) and exc == d) or ( not isinstance(exc, np.dtype) and exc.__subclasscheck__(getattr(np, d.__str__())) ) for d in self._get_dtypes() for exc in exclude ): # This is the error that pandas throws. raise ValueError("No objects to concatenate") if percentiles is not None: pandas.DataFrame()._check_percentile(percentiles) return self.__constructor__( query_compiler=self._query_compiler.describe( percentiles=percentiles, include=include, exclude=exclude ) )
python
def describe(self, percentiles=None, include=None, exclude=None): """ Generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding NaN values. Args: percentiles (list-like of numbers, optional): The percentiles to include in the output. include: White-list of data types to include in results exclude: Black-list of data types to exclude in results Returns: Series/DataFrame of summary statistics """ if include is not None and (isinstance(include, np.dtype) or include != "all"): if not is_list_like(include): include = [include] include = [ np.dtype(i) if not (isinstance(i, type) and i.__module__ == "numpy") else i for i in include ] if not any( (isinstance(inc, np.dtype) and inc == d) or ( not isinstance(inc, np.dtype) and inc.__subclasscheck__(getattr(np, d.__str__())) ) for d in self._get_dtypes() for inc in include ): # This is the error that pandas throws. raise ValueError("No objects to concatenate") if exclude is not None: if not is_list_like(exclude): exclude = [exclude] exclude = [np.dtype(e) for e in exclude] if all( (isinstance(exc, np.dtype) and exc == d) or ( not isinstance(exc, np.dtype) and exc.__subclasscheck__(getattr(np, d.__str__())) ) for d in self._get_dtypes() for exc in exclude ): # This is the error that pandas throws. raise ValueError("No objects to concatenate") if percentiles is not None: pandas.DataFrame()._check_percentile(percentiles) return self.__constructor__( query_compiler=self._query_compiler.describe( percentiles=percentiles, include=include, exclude=exclude ) )
[ "def", "describe", "(", "self", ",", "percentiles", "=", "None", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "if", "include", "is", "not", "None", "and", "(", "isinstance", "(", "include", ",", "np", ".", "dtype", ")", "or", "include", "!=", "\"all\"", ")", ":", "if", "not", "is_list_like", "(", "include", ")", ":", "include", "=", "[", "include", "]", "include", "=", "[", "np", ".", "dtype", "(", "i", ")", "if", "not", "(", "isinstance", "(", "i", ",", "type", ")", "and", "i", ".", "__module__", "==", "\"numpy\"", ")", "else", "i", "for", "i", "in", "include", "]", "if", "not", "any", "(", "(", "isinstance", "(", "inc", ",", "np", ".", "dtype", ")", "and", "inc", "==", "d", ")", "or", "(", "not", "isinstance", "(", "inc", ",", "np", ".", "dtype", ")", "and", "inc", ".", "__subclasscheck__", "(", "getattr", "(", "np", ",", "d", ".", "__str__", "(", ")", ")", ")", ")", "for", "d", "in", "self", ".", "_get_dtypes", "(", ")", "for", "inc", "in", "include", ")", ":", "# This is the error that pandas throws.\r", "raise", "ValueError", "(", "\"No objects to concatenate\"", ")", "if", "exclude", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "exclude", ")", ":", "exclude", "=", "[", "exclude", "]", "exclude", "=", "[", "np", ".", "dtype", "(", "e", ")", "for", "e", "in", "exclude", "]", "if", "all", "(", "(", "isinstance", "(", "exc", ",", "np", ".", "dtype", ")", "and", "exc", "==", "d", ")", "or", "(", "not", "isinstance", "(", "exc", ",", "np", ".", "dtype", ")", "and", "exc", ".", "__subclasscheck__", "(", "getattr", "(", "np", ",", "d", ".", "__str__", "(", ")", ")", ")", ")", "for", "d", "in", "self", ".", "_get_dtypes", "(", ")", "for", "exc", "in", "exclude", ")", ":", "# This is the error that pandas throws.\r", "raise", "ValueError", "(", "\"No objects to concatenate\"", ")", "if", "percentiles", "is", "not", "None", ":", "pandas", ".", "DataFrame", "(", ")", ".", "_check_percentile", "(", "percentiles", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "describe", "(", "percentiles", "=", "percentiles", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ")", ")" ]
Generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding NaN values. Args: percentiles (list-like of numbers, optional): The percentiles to include in the output. include: White-list of data types to include in results exclude: Black-list of data types to exclude in results Returns: Series/DataFrame of summary statistics
[ "Generates", "descriptive", "statistics", "that", "summarize", "the", "central", "tendency", "dispersion", "and", "shape", "of", "a", "dataset", "s", "distribution", "excluding", "NaN", "values", ".", "Args", ":", "percentiles", "(", "list", "-", "like", "of", "numbers", "optional", ")", ":", "The", "percentiles", "to", "include", "in", "the", "output", ".", "include", ":", "White", "-", "list", "of", "data", "types", "to", "include", "in", "results", "exclude", ":", "Black", "-", "list", "of", "data", "types", "to", "exclude", "in", "results", "Returns", ":", "Series", "/", "DataFrame", "of", "summary", "statistics" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L748-L802
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.diff
def diff(self, periods=1, axis=0): """Finds the difference between elements on the axis requested Args: periods: Periods to shift for forming difference axis: Take difference over rows or columns Returns: DataFrame with the diff applied """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.diff(periods=periods, axis=axis) )
python
def diff(self, periods=1, axis=0): """Finds the difference between elements on the axis requested Args: periods: Periods to shift for forming difference axis: Take difference over rows or columns Returns: DataFrame with the diff applied """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.diff(periods=periods, axis=axis) )
[ "def", "diff", "(", "self", ",", "periods", "=", "1", ",", "axis", "=", "0", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "diff", "(", "periods", "=", "periods", ",", "axis", "=", "axis", ")", ")" ]
Finds the difference between elements on the axis requested Args: periods: Periods to shift for forming difference axis: Take difference over rows or columns Returns: DataFrame with the diff applied
[ "Finds", "the", "difference", "between", "elements", "on", "the", "axis", "requested", "Args", ":", "periods", ":", "Periods", "to", "shift", "for", "forming", "difference", "axis", ":", "Take", "difference", "over", "rows", "or", "columns", "Returns", ":", "DataFrame", "with", "the", "diff", "applied" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L804-L817
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.drop
def drop( self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors="raise", ): """Return new object with labels in requested axis removed. Args: labels: Index or column labels to drop. axis: Whether to drop labels from the index (0 / 'index') or columns (1 / 'columns'). index, columns: Alternative to specifying axis (labels, axis=1 is equivalent to columns=labels). level: For MultiIndex inplace: If True, do operation inplace and return None. errors: If 'ignore', suppress error and existing labels are dropped. Returns: dropped : type of caller """ # TODO implement level if level is not None: return self._default_to_pandas( "drop", labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors, ) inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = pandas.DataFrame()._get_axis_name(axis) axes = {axis: labels} elif index is not None or columns is not None: axes, _ = pandas.DataFrame()._construct_axes_from_arguments( (index, columns), {} ) else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) # TODO Clean up this error checking if "index" not in axes: axes["index"] = None elif axes["index"] is not None: if not is_list_like(axes["index"]): axes["index"] = [axes["index"]] if errors == "raise": non_existant = [obj for obj in axes["index"] if obj not in self.index] if len(non_existant): raise ValueError( "labels {} not contained in axis".format(non_existant) ) else: axes["index"] = [obj for obj in axes["index"] if obj in self.index] # If the length is zero, we will just do nothing if not len(axes["index"]): axes["index"] = None if "columns" not in axes: axes["columns"] = None elif axes["columns"] is not None: if not is_list_like(axes["columns"]): axes["columns"] = [axes["columns"]] if errors == "raise": non_existant = [ obj for obj in axes["columns"] if obj not in self.columns ] if len(non_existant): raise ValueError( "labels {} not contained in axis".format(non_existant) ) else: axes["columns"] = [ obj for obj in axes["columns"] if obj in self.columns ] # If the length is zero, we will just do nothing if not len(axes["columns"]): axes["columns"] = None new_query_compiler = self._query_compiler.drop( index=axes["index"], columns=axes["columns"] ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
python
def drop( self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors="raise", ): """Return new object with labels in requested axis removed. Args: labels: Index or column labels to drop. axis: Whether to drop labels from the index (0 / 'index') or columns (1 / 'columns'). index, columns: Alternative to specifying axis (labels, axis=1 is equivalent to columns=labels). level: For MultiIndex inplace: If True, do operation inplace and return None. errors: If 'ignore', suppress error and existing labels are dropped. Returns: dropped : type of caller """ # TODO implement level if level is not None: return self._default_to_pandas( "drop", labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors, ) inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis = pandas.DataFrame()._get_axis_name(axis) axes = {axis: labels} elif index is not None or columns is not None: axes, _ = pandas.DataFrame()._construct_axes_from_arguments( (index, columns), {} ) else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) # TODO Clean up this error checking if "index" not in axes: axes["index"] = None elif axes["index"] is not None: if not is_list_like(axes["index"]): axes["index"] = [axes["index"]] if errors == "raise": non_existant = [obj for obj in axes["index"] if obj not in self.index] if len(non_existant): raise ValueError( "labels {} not contained in axis".format(non_existant) ) else: axes["index"] = [obj for obj in axes["index"] if obj in self.index] # If the length is zero, we will just do nothing if not len(axes["index"]): axes["index"] = None if "columns" not in axes: axes["columns"] = None elif axes["columns"] is not None: if not is_list_like(axes["columns"]): axes["columns"] = [axes["columns"]] if errors == "raise": non_existant = [ obj for obj in axes["columns"] if obj not in self.columns ] if len(non_existant): raise ValueError( "labels {} not contained in axis".format(non_existant) ) else: axes["columns"] = [ obj for obj in axes["columns"] if obj in self.columns ] # If the length is zero, we will just do nothing if not len(axes["columns"]): axes["columns"] = None new_query_compiler = self._query_compiler.drop( index=axes["index"], columns=axes["columns"] ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
[ "def", "drop", "(", "self", ",", "labels", "=", "None", ",", "axis", "=", "0", ",", "index", "=", "None", ",", "columns", "=", "None", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "errors", "=", "\"raise\"", ",", ")", ":", "# TODO implement level\r", "if", "level", "is", "not", "None", ":", "return", "self", ".", "_default_to_pandas", "(", "\"drop\"", ",", "labels", "=", "labels", ",", "axis", "=", "axis", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "level", "=", "level", ",", "inplace", "=", "inplace", ",", "errors", "=", "errors", ",", ")", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "if", "labels", "is", "not", "None", ":", "if", "index", "is", "not", "None", "or", "columns", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot specify both 'labels' and 'index'/'columns'\"", ")", "axis", "=", "pandas", ".", "DataFrame", "(", ")", ".", "_get_axis_name", "(", "axis", ")", "axes", "=", "{", "axis", ":", "labels", "}", "elif", "index", "is", "not", "None", "or", "columns", "is", "not", "None", ":", "axes", ",", "_", "=", "pandas", ".", "DataFrame", "(", ")", ".", "_construct_axes_from_arguments", "(", "(", "index", ",", "columns", ")", ",", "{", "}", ")", "else", ":", "raise", "ValueError", "(", "\"Need to specify at least one of 'labels', 'index' or 'columns'\"", ")", "# TODO Clean up this error checking\r", "if", "\"index\"", "not", "in", "axes", ":", "axes", "[", "\"index\"", "]", "=", "None", "elif", "axes", "[", "\"index\"", "]", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "axes", "[", "\"index\"", "]", ")", ":", "axes", "[", "\"index\"", "]", "=", "[", "axes", "[", "\"index\"", "]", "]", "if", "errors", "==", "\"raise\"", ":", "non_existant", "=", "[", "obj", "for", "obj", "in", "axes", "[", "\"index\"", "]", "if", "obj", "not", "in", "self", ".", "index", "]", "if", "len", "(", "non_existant", ")", ":", "raise", "ValueError", "(", "\"labels {} not contained in axis\"", ".", "format", "(", "non_existant", ")", ")", "else", ":", "axes", "[", "\"index\"", "]", "=", "[", "obj", "for", "obj", "in", "axes", "[", "\"index\"", "]", "if", "obj", "in", "self", ".", "index", "]", "# If the length is zero, we will just do nothing\r", "if", "not", "len", "(", "axes", "[", "\"index\"", "]", ")", ":", "axes", "[", "\"index\"", "]", "=", "None", "if", "\"columns\"", "not", "in", "axes", ":", "axes", "[", "\"columns\"", "]", "=", "None", "elif", "axes", "[", "\"columns\"", "]", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "axes", "[", "\"columns\"", "]", ")", ":", "axes", "[", "\"columns\"", "]", "=", "[", "axes", "[", "\"columns\"", "]", "]", "if", "errors", "==", "\"raise\"", ":", "non_existant", "=", "[", "obj", "for", "obj", "in", "axes", "[", "\"columns\"", "]", "if", "obj", "not", "in", "self", ".", "columns", "]", "if", "len", "(", "non_existant", ")", ":", "raise", "ValueError", "(", "\"labels {} not contained in axis\"", ".", "format", "(", "non_existant", ")", ")", "else", ":", "axes", "[", "\"columns\"", "]", "=", "[", "obj", "for", "obj", "in", "axes", "[", "\"columns\"", "]", "if", "obj", "in", "self", ".", "columns", "]", "# If the length is zero, we will just do nothing\r", "if", "not", "len", "(", "axes", "[", "\"columns\"", "]", ")", ":", "axes", "[", "\"columns\"", "]", "=", "None", "new_query_compiler", "=", "self", ".", "_query_compiler", ".", "drop", "(", "index", "=", "axes", "[", "\"index\"", "]", ",", "columns", "=", "axes", "[", "\"columns\"", "]", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")" ]
Return new object with labels in requested axis removed. Args: labels: Index or column labels to drop. axis: Whether to drop labels from the index (0 / 'index') or columns (1 / 'columns'). index, columns: Alternative to specifying axis (labels, axis=1 is equivalent to columns=labels). level: For MultiIndex inplace: If True, do operation inplace and return None. errors: If 'ignore', suppress error and existing labels are dropped. Returns: dropped : type of caller
[ "Return", "new", "object", "with", "labels", "in", "requested", "axis", "removed", ".", "Args", ":", "labels", ":", "Index", "or", "column", "labels", "to", "drop", ".", "axis", ":", "Whether", "to", "drop", "labels", "from", "the", "index", "(", "0", "/", "index", ")", "or", "columns", "(", "1", "/", "columns", ")", ".", "index", "columns", ":", "Alternative", "to", "specifying", "axis", "(", "labels", "axis", "=", "1", "is", "equivalent", "to", "columns", "=", "labels", ")", ".", "level", ":", "For", "MultiIndex", "inplace", ":", "If", "True", "do", "operation", "inplace", "and", "return", "None", ".", "errors", ":", "If", "ignore", "suppress", "error", "and", "existing", "labels", "are", "dropped", ".", "Returns", ":", "dropped", ":", "type", "of", "caller" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L824-L918
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.dropna
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): """Create a new DataFrame from the removed NA values from this one. Args: axis (int, tuple, or list): The axis to apply the drop. how (str): How to drop the NA values. 'all': drop the label if all values are NA. 'any': drop the label if any values are NA. thresh (int): The minimum number of NAs to require. subset ([label]): Labels to consider from other axis. inplace (bool): Change this DataFrame or return a new DataFrame. True: Modify the data for this DataFrame, return None. False: Create a new DataFrame and return it. Returns: If inplace is set to True, returns None, otherwise returns a new DataFrame with the dropna applied. """ inplace = validate_bool_kwarg(inplace, "inplace") if is_list_like(axis): axis = [self._get_axis_number(ax) for ax in axis] result = self for ax in axis: result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset) return self._create_or_update_from_compiler(result._query_compiler, inplace) axis = self._get_axis_number(axis) if how is not None and how not in ["any", "all"]: raise ValueError("invalid how option: %s" % how) if how is None and thresh is None: raise TypeError("must specify how or thresh") if subset is not None: if axis == 1: indices = self.index.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) else: indices = self.columns.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) new_query_compiler = self._query_compiler.dropna( axis=axis, how=how, thresh=thresh, subset=subset ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
python
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): """Create a new DataFrame from the removed NA values from this one. Args: axis (int, tuple, or list): The axis to apply the drop. how (str): How to drop the NA values. 'all': drop the label if all values are NA. 'any': drop the label if any values are NA. thresh (int): The minimum number of NAs to require. subset ([label]): Labels to consider from other axis. inplace (bool): Change this DataFrame or return a new DataFrame. True: Modify the data for this DataFrame, return None. False: Create a new DataFrame and return it. Returns: If inplace is set to True, returns None, otherwise returns a new DataFrame with the dropna applied. """ inplace = validate_bool_kwarg(inplace, "inplace") if is_list_like(axis): axis = [self._get_axis_number(ax) for ax in axis] result = self for ax in axis: result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset) return self._create_or_update_from_compiler(result._query_compiler, inplace) axis = self._get_axis_number(axis) if how is not None and how not in ["any", "all"]: raise ValueError("invalid how option: %s" % how) if how is None and thresh is None: raise TypeError("must specify how or thresh") if subset is not None: if axis == 1: indices = self.index.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) else: indices = self.columns.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) new_query_compiler = self._query_compiler.dropna( axis=axis, how=how, thresh=thresh, subset=subset ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "how", "=", "\"any\"", ",", "thresh", "=", "None", ",", "subset", "=", "None", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "if", "is_list_like", "(", "axis", ")", ":", "axis", "=", "[", "self", ".", "_get_axis_number", "(", "ax", ")", "for", "ax", "in", "axis", "]", "result", "=", "self", "for", "ax", "in", "axis", ":", "result", "=", "result", ".", "dropna", "(", "axis", "=", "ax", ",", "how", "=", "how", ",", "thresh", "=", "thresh", ",", "subset", "=", "subset", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "result", ".", "_query_compiler", ",", "inplace", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "how", "is", "not", "None", "and", "how", "not", "in", "[", "\"any\"", ",", "\"all\"", "]", ":", "raise", "ValueError", "(", "\"invalid how option: %s\"", "%", "how", ")", "if", "how", "is", "None", "and", "thresh", "is", "None", ":", "raise", "TypeError", "(", "\"must specify how or thresh\"", ")", "if", "subset", "is", "not", "None", ":", "if", "axis", "==", "1", ":", "indices", "=", "self", ".", "index", ".", "get_indexer_for", "(", "subset", ")", "check", "=", "indices", "==", "-", "1", "if", "check", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "list", "(", "np", ".", "compress", "(", "check", ",", "subset", ")", ")", ")", "else", ":", "indices", "=", "self", ".", "columns", ".", "get_indexer_for", "(", "subset", ")", "check", "=", "indices", "==", "-", "1", "if", "check", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "list", "(", "np", ".", "compress", "(", "check", ",", "subset", ")", ")", ")", "new_query_compiler", "=", "self", ".", "_query_compiler", ".", "dropna", "(", "axis", "=", "axis", ",", "how", "=", "how", ",", "thresh", "=", "thresh", ",", "subset", "=", "subset", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")" ]
Create a new DataFrame from the removed NA values from this one. Args: axis (int, tuple, or list): The axis to apply the drop. how (str): How to drop the NA values. 'all': drop the label if all values are NA. 'any': drop the label if any values are NA. thresh (int): The minimum number of NAs to require. subset ([label]): Labels to consider from other axis. inplace (bool): Change this DataFrame or return a new DataFrame. True: Modify the data for this DataFrame, return None. False: Create a new DataFrame and return it. Returns: If inplace is set to True, returns None, otherwise returns a new DataFrame with the dropna applied.
[ "Create", "a", "new", "DataFrame", "from", "the", "removed", "NA", "values", "from", "this", "one", ".", "Args", ":", "axis", "(", "int", "tuple", "or", "list", ")", ":", "The", "axis", "to", "apply", "the", "drop", ".", "how", "(", "str", ")", ":", "How", "to", "drop", "the", "NA", "values", ".", "all", ":", "drop", "the", "label", "if", "all", "values", "are", "NA", ".", "any", ":", "drop", "the", "label", "if", "any", "values", "are", "NA", ".", "thresh", "(", "int", ")", ":", "The", "minimum", "number", "of", "NAs", "to", "require", ".", "subset", "(", "[", "label", "]", ")", ":", "Labels", "to", "consider", "from", "other", "axis", ".", "inplace", "(", "bool", ")", ":", "Change", "this", "DataFrame", "or", "return", "a", "new", "DataFrame", ".", "True", ":", "Modify", "the", "data", "for", "this", "DataFrame", "return", "None", ".", "False", ":", "Create", "a", "new", "DataFrame", "and", "return", "it", ".", "Returns", ":", "If", "inplace", "is", "set", "to", "True", "returns", "None", "otherwise", "returns", "a", "new", "DataFrame", "with", "the", "dropna", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L920-L967
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.drop_duplicates
def drop_duplicates(self, keep="first", inplace=False, **kwargs): """Return DataFrame with duplicate rows removed, optionally only considering certain columns Args: subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns: deduplicated : DataFrame """ inplace = validate_bool_kwarg(inplace, "inplace") if kwargs.get("subset", None) is not None: duplicates = self.duplicated(keep=keep, **kwargs) else: duplicates = self.duplicated(keep=keep, **kwargs) indices, = duplicates.values.nonzero() return self.drop(index=self.index[indices], inplace=inplace)
python
def drop_duplicates(self, keep="first", inplace=False, **kwargs): """Return DataFrame with duplicate rows removed, optionally only considering certain columns Args: subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns: deduplicated : DataFrame """ inplace = validate_bool_kwarg(inplace, "inplace") if kwargs.get("subset", None) is not None: duplicates = self.duplicated(keep=keep, **kwargs) else: duplicates = self.duplicated(keep=keep, **kwargs) indices, = duplicates.values.nonzero() return self.drop(index=self.index[indices], inplace=inplace)
[ "def", "drop_duplicates", "(", "self", ",", "keep", "=", "\"first\"", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "if", "kwargs", ".", "get", "(", "\"subset\"", ",", "None", ")", "is", "not", "None", ":", "duplicates", "=", "self", ".", "duplicated", "(", "keep", "=", "keep", ",", "*", "*", "kwargs", ")", "else", ":", "duplicates", "=", "self", ".", "duplicated", "(", "keep", "=", "keep", ",", "*", "*", "kwargs", ")", "indices", ",", "=", "duplicates", ".", "values", ".", "nonzero", "(", ")", "return", "self", ".", "drop", "(", "index", "=", "self", ".", "index", "[", "indices", "]", ",", "inplace", "=", "inplace", ")" ]
Return DataFrame with duplicate rows removed, optionally only considering certain columns Args: subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns: deduplicated : DataFrame
[ "Return", "DataFrame", "with", "duplicate", "rows", "removed", "optionally", "only", "considering", "certain", "columns", "Args", ":", "subset", ":", "column", "label", "or", "sequence", "of", "labels", "optional", "Only", "consider", "certain", "columns", "for", "identifying", "duplicates", "by", "default", "use", "all", "of", "the", "columns", "keep", ":", "{", "first", "last", "False", "}", "default", "first", "-", "first", ":", "Drop", "duplicates", "except", "for", "the", "first", "occurrence", ".", "-", "last", ":", "Drop", "duplicates", "except", "for", "the", "last", "occurrence", ".", "-", "False", ":", "Drop", "all", "duplicates", ".", "inplace", ":", "boolean", "default", "False", "Whether", "to", "drop", "duplicates", "in", "place", "or", "to", "return", "a", "copy", "Returns", ":", "deduplicated", ":", "DataFrame" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L980-L1003
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.eq
def eq(self, other, axis="columns", level=None): """Checks element-wise that this is equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the eq over. level: The Multilevel index level to apply eq over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("eq", other, axis=axis, level=level)
python
def eq(self, other, axis="columns", level=None): """Checks element-wise that this is equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the eq over. level: The Multilevel index level to apply eq over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("eq", other, axis=axis, level=level)
[ "def", "eq", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"eq\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the eq over. level: The Multilevel index level to apply eq over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "equal", "to", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "eq", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "eq", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1008-L1019
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.fillna
def fillna( self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs ): """Fill NA/NaN values using the specified method. Args: value: Value to use to fill holes. This value cannot be a list. method: Method to use for filling holes in reindexed Series pad. ffill: propagate last valid observation forward to next valid backfill. bfill: use NEXT valid observation to fill gap. axis: 0 or 'index', 1 or 'columns'. inplace: If True, fill in place. Note: this will modify any other views on this object. limit: If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast: A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type. Returns: filled: DataFrame """ # TODO implement value passed as DataFrame/Series if isinstance(value, BasePandasDataset): new_query_compiler = self._default_to_pandas( "fillna", value=value._to_pandas(), method=method, axis=axis, inplace=False, limit=limit, downcast=downcast, **kwargs )._query_compiler return self._create_or_update_from_compiler(new_query_compiler, inplace) inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) if axis is not None else 0 if isinstance(value, (list, tuple)): raise TypeError( '"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__) ) if value is None and method is None: raise ValueError("must specify a fill method or value") if value is not None and method is not None: raise ValueError("cannot specify both a fill method and value") if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]: expecting = "pad (ffill) or backfill (bfill)" msg = "Invalid fill method. Expecting {expecting}. Got {method}".format( expecting=expecting, method=method ) raise ValueError(msg) new_query_compiler = self._query_compiler.fillna( value=value, method=method, axis=axis, inplace=False, limit=limit, downcast=downcast, **kwargs ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
python
def fillna( self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs ): """Fill NA/NaN values using the specified method. Args: value: Value to use to fill holes. This value cannot be a list. method: Method to use for filling holes in reindexed Series pad. ffill: propagate last valid observation forward to next valid backfill. bfill: use NEXT valid observation to fill gap. axis: 0 or 'index', 1 or 'columns'. inplace: If True, fill in place. Note: this will modify any other views on this object. limit: If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast: A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type. Returns: filled: DataFrame """ # TODO implement value passed as DataFrame/Series if isinstance(value, BasePandasDataset): new_query_compiler = self._default_to_pandas( "fillna", value=value._to_pandas(), method=method, axis=axis, inplace=False, limit=limit, downcast=downcast, **kwargs )._query_compiler return self._create_or_update_from_compiler(new_query_compiler, inplace) inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) if axis is not None else 0 if isinstance(value, (list, tuple)): raise TypeError( '"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__) ) if value is None and method is None: raise ValueError("must specify a fill method or value") if value is not None and method is not None: raise ValueError("cannot specify both a fill method and value") if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]: expecting = "pad (ffill) or backfill (bfill)" msg = "Invalid fill method. Expecting {expecting}. Got {method}".format( expecting=expecting, method=method ) raise ValueError(msg) new_query_compiler = self._query_compiler.fillna( value=value, method=method, axis=axis, inplace=False, limit=limit, downcast=downcast, **kwargs ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
[ "def", "fillna", "(", "self", ",", "value", "=", "None", ",", "method", "=", "None", ",", "axis", "=", "None", ",", "inplace", "=", "False", ",", "limit", "=", "None", ",", "downcast", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO implement value passed as DataFrame/Series\r", "if", "isinstance", "(", "value", ",", "BasePandasDataset", ")", ":", "new_query_compiler", "=", "self", ".", "_default_to_pandas", "(", "\"fillna\"", ",", "value", "=", "value", ".", "_to_pandas", "(", ")", ",", "method", "=", "method", ",", "axis", "=", "axis", ",", "inplace", "=", "False", ",", "limit", "=", "limit", ",", "downcast", "=", "downcast", ",", "*", "*", "kwargs", ")", ".", "_query_compiler", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'\"value\" parameter must be a scalar or dict, but '", "'you passed a \"{0}\"'", ".", "format", "(", "type", "(", "value", ")", ".", "__name__", ")", ")", "if", "value", "is", "None", "and", "method", "is", "None", ":", "raise", "ValueError", "(", "\"must specify a fill method or value\"", ")", "if", "value", "is", "not", "None", "and", "method", "is", "not", "None", ":", "raise", "ValueError", "(", "\"cannot specify both a fill method and value\"", ")", "if", "method", "is", "not", "None", "and", "method", "not", "in", "[", "\"backfill\"", ",", "\"bfill\"", ",", "\"pad\"", ",", "\"ffill\"", "]", ":", "expecting", "=", "\"pad (ffill) or backfill (bfill)\"", "msg", "=", "\"Invalid fill method. Expecting {expecting}. Got {method}\"", ".", "format", "(", "expecting", "=", "expecting", ",", "method", "=", "method", ")", "raise", "ValueError", "(", "msg", ")", "new_query_compiler", "=", "self", ".", "_query_compiler", ".", "fillna", "(", "value", "=", "value", ",", "method", "=", "method", ",", "axis", "=", "axis", ",", "inplace", "=", "False", ",", "limit", "=", "limit", ",", "downcast", "=", "downcast", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")" ]
Fill NA/NaN values using the specified method. Args: value: Value to use to fill holes. This value cannot be a list. method: Method to use for filling holes in reindexed Series pad. ffill: propagate last valid observation forward to next valid backfill. bfill: use NEXT valid observation to fill gap. axis: 0 or 'index', 1 or 'columns'. inplace: If True, fill in place. Note: this will modify any other views on this object. limit: If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast: A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type. Returns: filled: DataFrame
[ "Fill", "NA", "/", "NaN", "values", "using", "the", "specified", "method", ".", "Args", ":", "value", ":", "Value", "to", "use", "to", "fill", "holes", ".", "This", "value", "cannot", "be", "a", "list", ".", "method", ":", "Method", "to", "use", "for", "filling", "holes", "in", "reindexed", "Series", "pad", ".", "ffill", ":", "propagate", "last", "valid", "observation", "forward", "to", "next", "valid", "backfill", ".", "bfill", ":", "use", "NEXT", "valid", "observation", "to", "fill", "gap", ".", "axis", ":", "0", "or", "index", "1", "or", "columns", ".", "inplace", ":", "If", "True", "fill", "in", "place", ".", "Note", ":", "this", "will", "modify", "any", "other", "views", "on", "this", "object", ".", "limit", ":", "If", "method", "is", "specified", "this", "is", "the", "maximum", "number", "of", "consecutive", "NaN", "values", "to", "forward", "/", "backward", "fill", ".", "In", "other", "words", "if", "there", "is", "a", "gap", "with", "more", "than", "this", "number", "of", "consecutive", "NaNs", "it", "will", "only", "be", "partially", "filled", ".", "If", "method", "is", "not", "specified", "this", "is", "the", "maximum", "number", "of", "entries", "along", "the", "entire", "axis", "where", "NaNs", "will", "be", "filled", ".", "Must", "be", "greater", "than", "0", "if", "not", "None", ".", "downcast", ":", "A", "dict", "of", "item", "-", ">", "dtype", "of", "what", "to", "downcast", "if", "possible", "or", "the", "string", "infer", "which", "will", "try", "to", "downcast", "to", "an", "appropriate", "equal", "type", ".", "Returns", ":", "filled", ":", "DataFrame" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1056-L1136
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.filter
def filter(self, items=None, like=None, regex=None, axis=None): """Subset rows or columns based on their labels Args: items (list): list of labels to subset like (string): retain labels where `arg in label == True` regex (string): retain labels matching regex input axis: axis to filter on Returns: A new DataFrame with the filter applied. """ nkw = count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` are mutually exclusive" ) if nkw == 0: raise TypeError("Must pass either `items`, `like`, or `regex`") if axis is None: axis = "columns" # This is the default info axis for dataframes axis = self._get_axis_number(axis) labels = self.columns if axis else self.index if items is not None: bool_arr = labels.isin(items) elif like is not None: def f(x): return like in to_str(x) bool_arr = labels.map(f).tolist() else: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) bool_arr = labels.map(f).tolist() if not axis: return self[bool_arr] return self[self.columns[bool_arr]]
python
def filter(self, items=None, like=None, regex=None, axis=None): """Subset rows or columns based on their labels Args: items (list): list of labels to subset like (string): retain labels where `arg in label == True` regex (string): retain labels matching regex input axis: axis to filter on Returns: A new DataFrame with the filter applied. """ nkw = count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` are mutually exclusive" ) if nkw == 0: raise TypeError("Must pass either `items`, `like`, or `regex`") if axis is None: axis = "columns" # This is the default info axis for dataframes axis = self._get_axis_number(axis) labels = self.columns if axis else self.index if items is not None: bool_arr = labels.isin(items) elif like is not None: def f(x): return like in to_str(x) bool_arr = labels.map(f).tolist() else: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) bool_arr = labels.map(f).tolist() if not axis: return self[bool_arr] return self[self.columns[bool_arr]]
[ "def", "filter", "(", "self", ",", "items", "=", "None", ",", "like", "=", "None", ",", "regex", "=", "None", ",", "axis", "=", "None", ")", ":", "nkw", "=", "count_not_none", "(", "items", ",", "like", ",", "regex", ")", "if", "nkw", ">", "1", ":", "raise", "TypeError", "(", "\"Keyword arguments `items`, `like`, or `regex` are mutually exclusive\"", ")", "if", "nkw", "==", "0", ":", "raise", "TypeError", "(", "\"Must pass either `items`, `like`, or `regex`\"", ")", "if", "axis", "is", "None", ":", "axis", "=", "\"columns\"", "# This is the default info axis for dataframes\r", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "labels", "=", "self", ".", "columns", "if", "axis", "else", "self", ".", "index", "if", "items", "is", "not", "None", ":", "bool_arr", "=", "labels", ".", "isin", "(", "items", ")", "elif", "like", "is", "not", "None", ":", "def", "f", "(", "x", ")", ":", "return", "like", "in", "to_str", "(", "x", ")", "bool_arr", "=", "labels", ".", "map", "(", "f", ")", ".", "tolist", "(", ")", "else", ":", "def", "f", "(", "x", ")", ":", "return", "matcher", ".", "search", "(", "to_str", "(", "x", ")", ")", "is", "not", "None", "matcher", "=", "re", ".", "compile", "(", "regex", ")", "bool_arr", "=", "labels", ".", "map", "(", "f", ")", ".", "tolist", "(", ")", "if", "not", "axis", ":", "return", "self", "[", "bool_arr", "]", "return", "self", "[", "self", ".", "columns", "[", "bool_arr", "]", "]" ]
Subset rows or columns based on their labels Args: items (list): list of labels to subset like (string): retain labels where `arg in label == True` regex (string): retain labels matching regex input axis: axis to filter on Returns: A new DataFrame with the filter applied.
[ "Subset", "rows", "or", "columns", "based", "on", "their", "labels", "Args", ":", "items", "(", "list", ")", ":", "list", "of", "labels", "to", "subset", "like", "(", "string", ")", ":", "retain", "labels", "where", "arg", "in", "label", "==", "True", "regex", "(", "string", ")", ":", "retain", "labels", "matching", "regex", "input", "axis", ":", "axis", "to", "filter", "on", "Returns", ":", "A", "new", "DataFrame", "with", "the", "filter", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1138-L1180
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.floordiv
def floordiv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
python
def floordiv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "floordiv", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"floordiv\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
[ "Divides", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "divide", "against", "this", ".", "axis", ":", "The", "axis", "to", "divide", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "divide", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Divide", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1193-L1207
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.ge
def ge(self, other, axis="columns", level=None): """Checks element-wise that this is greater than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("ge", other, axis=axis, level=level)
python
def ge(self, other, axis="columns", level=None): """Checks element-wise that this is greater than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("ge", other, axis=axis, level=level)
[ "def", "ge", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"ge\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is greater than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "greater", "than", "or", "equal", "to", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "gt", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "gt", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1234-L1245
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.get_dtype_counts
def get_dtype_counts(self): """Get the counts of dtypes in this object. Returns: The counts of dtypes in this object. """ if hasattr(self, "dtype"): return pandas.Series({str(self.dtype): 1}) result = self.dtypes.value_counts() result.index = result.index.map(lambda x: str(x)) return result
python
def get_dtype_counts(self): """Get the counts of dtypes in this object. Returns: The counts of dtypes in this object. """ if hasattr(self, "dtype"): return pandas.Series({str(self.dtype): 1}) result = self.dtypes.value_counts() result.index = result.index.map(lambda x: str(x)) return result
[ "def", "get_dtype_counts", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"dtype\"", ")", ":", "return", "pandas", ".", "Series", "(", "{", "str", "(", "self", ".", "dtype", ")", ":", "1", "}", ")", "result", "=", "self", ".", "dtypes", ".", "value_counts", "(", ")", "result", ".", "index", "=", "result", ".", "index", ".", "map", "(", "lambda", "x", ":", "str", "(", "x", ")", ")", "return", "result" ]
Get the counts of dtypes in this object. Returns: The counts of dtypes in this object.
[ "Get", "the", "counts", "of", "dtypes", "in", "this", "object", ".", "Returns", ":", "The", "counts", "of", "dtypes", "in", "this", "object", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1264-L1274
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.get_ftype_counts
def get_ftype_counts(self): """Get the counts of ftypes in this object. Returns: The counts of ftypes in this object. """ if hasattr(self, "ftype"): return pandas.Series({self.ftype: 1}) return self.ftypes.value_counts().sort_index()
python
def get_ftype_counts(self): """Get the counts of ftypes in this object. Returns: The counts of ftypes in this object. """ if hasattr(self, "ftype"): return pandas.Series({self.ftype: 1}) return self.ftypes.value_counts().sort_index()
[ "def", "get_ftype_counts", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"ftype\"", ")", ":", "return", "pandas", ".", "Series", "(", "{", "self", ".", "ftype", ":", "1", "}", ")", "return", "self", ".", "ftypes", ".", "value_counts", "(", ")", ".", "sort_index", "(", ")" ]
Get the counts of ftypes in this object. Returns: The counts of ftypes in this object.
[ "Get", "the", "counts", "of", "ftypes", "in", "this", "object", ".", "Returns", ":", "The", "counts", "of", "ftypes", "in", "this", "object", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1276-L1284
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.gt
def gt(self, other, axis="columns", level=None): """Checks element-wise that this is greater than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("gt", other, axis=axis, level=level)
python
def gt(self, other, axis="columns", level=None): """Checks element-wise that this is greater than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("gt", other, axis=axis, level=level)
[ "def", "gt", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"gt\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is greater than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "greater", "than", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "gt", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "gt", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1289-L1300
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.head
def head(self, n=5): """Get the first n rows of the DataFrame. Args: n (int): The number of rows to return. Returns: A new DataFrame with the first n rows of the DataFrame. """ if n >= len(self.index): return self.copy() return self.__constructor__(query_compiler=self._query_compiler.head(n))
python
def head(self, n=5): """Get the first n rows of the DataFrame. Args: n (int): The number of rows to return. Returns: A new DataFrame with the first n rows of the DataFrame. """ if n >= len(self.index): return self.copy() return self.__constructor__(query_compiler=self._query_compiler.head(n))
[ "def", "head", "(", "self", ",", "n", "=", "5", ")", ":", "if", "n", ">=", "len", "(", "self", ".", "index", ")", ":", "return", "self", ".", "copy", "(", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "head", "(", "n", ")", ")" ]
Get the first n rows of the DataFrame. Args: n (int): The number of rows to return. Returns: A new DataFrame with the first n rows of the DataFrame.
[ "Get", "the", "first", "n", "rows", "of", "the", "DataFrame", ".", "Args", ":", "n", "(", "int", ")", ":", "The", "number", "of", "rows", "to", "return", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "first", "n", "rows", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1302-L1313
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.idxmax
def idxmax(self, axis=0, skipna=True, *args, **kwargs): """Get the index of the first occurrence of the max value of the axis. Args: axis (int): Identify the max over the rows (1) or columns (0). skipna (bool): Whether or not to skip NA values. Returns: A Series with the index for each maximum value for the axis specified. """ if not all(d != np.dtype("O") for d in self._get_dtypes()): raise TypeError("reduction operation 'argmax' not allowed for this dtype") axis = self._get_axis_number(axis) return self._reduce_dimension( self._query_compiler.idxmax(axis=axis, skipna=skipna) )
python
def idxmax(self, axis=0, skipna=True, *args, **kwargs): """Get the index of the first occurrence of the max value of the axis. Args: axis (int): Identify the max over the rows (1) or columns (0). skipna (bool): Whether or not to skip NA values. Returns: A Series with the index for each maximum value for the axis specified. """ if not all(d != np.dtype("O") for d in self._get_dtypes()): raise TypeError("reduction operation 'argmax' not allowed for this dtype") axis = self._get_axis_number(axis) return self._reduce_dimension( self._query_compiler.idxmax(axis=axis, skipna=skipna) )
[ "def", "idxmax", "(", "self", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "all", "(", "d", "!=", "np", ".", "dtype", "(", "\"O\"", ")", "for", "d", "in", "self", ".", "_get_dtypes", "(", ")", ")", ":", "raise", "TypeError", "(", "\"reduction operation 'argmax' not allowed for this dtype\"", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "idxmax", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ")", ")" ]
Get the index of the first occurrence of the max value of the axis. Args: axis (int): Identify the max over the rows (1) or columns (0). skipna (bool): Whether or not to skip NA values. Returns: A Series with the index for each maximum value for the axis specified.
[ "Get", "the", "index", "of", "the", "first", "occurrence", "of", "the", "max", "value", "of", "the", "axis", ".", "Args", ":", "axis", "(", "int", ")", ":", "Identify", "the", "max", "over", "the", "rows", "(", "1", ")", "or", "columns", "(", "0", ")", ".", "skipna", "(", "bool", ")", ":", "Whether", "or", "not", "to", "skip", "NA", "values", ".", "Returns", ":", "A", "Series", "with", "the", "index", "for", "each", "maximum", "value", "for", "the", "axis", "specified", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1321-L1337
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.isin
def isin(self, values): """Fill a DataFrame with booleans for cells contained in values. Args: values (iterable, DataFrame, Series, or dict): The values to find. Returns: A new DataFrame with booleans representing whether or not a cell is in values. True: cell is contained in values. False: otherwise """ return self.__constructor__( query_compiler=self._query_compiler.isin(values=values) )
python
def isin(self, values): """Fill a DataFrame with booleans for cells contained in values. Args: values (iterable, DataFrame, Series, or dict): The values to find. Returns: A new DataFrame with booleans representing whether or not a cell is in values. True: cell is contained in values. False: otherwise """ return self.__constructor__( query_compiler=self._query_compiler.isin(values=values) )
[ "def", "isin", "(", "self", ",", "values", ")", ":", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "isin", "(", "values", "=", "values", ")", ")" ]
Fill a DataFrame with booleans for cells contained in values. Args: values (iterable, DataFrame, Series, or dict): The values to find. Returns: A new DataFrame with booleans representing whether or not a cell is in values. True: cell is contained in values. False: otherwise
[ "Fill", "a", "DataFrame", "with", "booleans", "for", "cells", "contained", "in", "values", ".", "Args", ":", "values", "(", "iterable", "DataFrame", "Series", "or", "dict", ")", ":", "The", "values", "to", "find", ".", "Returns", ":", "A", "new", "DataFrame", "with", "booleans", "representing", "whether", "or", "not", "a", "cell", "is", "in", "values", ".", "True", ":", "cell", "is", "contained", "in", "values", ".", "False", ":", "otherwise" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1360-L1374
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.le
def le(self, other, axis="columns", level=None): """Checks element-wise that this is less than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the le over. level: The Multilevel index level to apply le over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("le", other, axis=axis, level=level)
python
def le(self, other, axis="columns", level=None): """Checks element-wise that this is less than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the le over. level: The Multilevel index level to apply le over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("le", other, axis=axis, level=level)
[ "def", "le", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"le\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is less than or equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the le over. level: The Multilevel index level to apply le over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "less", "than", "or", "equal", "to", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "le", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "le", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1435-L1446
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.lt
def lt(self, other, axis="columns", level=None): """Checks element-wise that this is less than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the lt over. level: The Multilevel index level to apply lt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("lt", other, axis=axis, level=level)
python
def lt(self, other, axis="columns", level=None): """Checks element-wise that this is less than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the lt over. level: The Multilevel index level to apply lt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("lt", other, axis=axis, level=level)
[ "def", "lt", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"lt\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is less than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the lt over. level: The Multilevel index level to apply lt over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "less", "than", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "lt", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "lt", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1448-L1459
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.mean
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Computes mean across the DataFrame. Args: axis (int): The axis to take the mean on. skipna (bool): True to skip NA values, false otherwise. Returns: The mean of the DataFrame. (Pandas series) """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_sum_prod_mean( axis, numeric_only, ignore_axis=False ) return data._reduce_dimension( data._query_compiler.mean( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
python
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Computes mean across the DataFrame. Args: axis (int): The axis to take the mean on. skipna (bool): True to skip NA values, false otherwise. Returns: The mean of the DataFrame. (Pandas series) """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_sum_prod_mean( axis, numeric_only, ignore_axis=False ) return data._reduce_dimension( data._query_compiler.mean( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
[ "def", "mean", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "None", ",", "level", "=", "None", ",", "numeric_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "data", "=", "self", ".", "_validate_dtypes_sum_prod_mean", "(", "axis", ",", "numeric_only", ",", "ignore_axis", "=", "False", ")", "return", "data", ".", "_reduce_dimension", "(", "data", ".", "_query_compiler", ".", "mean", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "numeric_only", "=", "numeric_only", ",", "*", "*", "kwargs", ")", ")" ]
Computes mean across the DataFrame. Args: axis (int): The axis to take the mean on. skipna (bool): True to skip NA values, false otherwise. Returns: The mean of the DataFrame. (Pandas series)
[ "Computes", "mean", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "the", "mean", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "Returns", ":", "The", "mean", "of", "the", "DataFrame", ".", "(", "Pandas", "series", ")" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1522-L1544
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.median
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series) """ axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.median( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
python
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series) """ axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.median( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
[ "def", "median", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "None", ",", "level", "=", "None", ",", "numeric_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "numeric_only", "is", "not", "None", "and", "not", "numeric_only", ":", "self", ".", "_validate_dtypes", "(", "numeric_only", "=", "True", ")", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "median", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "numeric_only", "=", "numeric_only", ",", "*", "*", "kwargs", ")", ")" ]
Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series)
[ "Computes", "median", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "the", "median", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "Returns", ":", "The", "median", "of", "the", "DataFrame", ".", "(", "Pandas", "series", ")" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1546-L1567
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.memory_usage
def memory_usage(self, index=True, deep=False): """Returns the memory usage of each column in bytes Args: index (bool): Whether to include the memory usage of the DataFrame's index in returned Series. Defaults to True deep (bool): If True, introspect the data deeply by interrogating objects dtypes for system-level memory consumption. Defaults to False Returns: A Series where the index are the column names and the values are the memory usage of each of the columns in bytes. If `index=true`, then the first value of the Series will be 'Index' with its memory usage. """ assert not index, "Internal Error. Index must be evaluated in child class" return self._reduce_dimension( self._query_compiler.memory_usage(index=index, deep=deep) )
python
def memory_usage(self, index=True, deep=False): """Returns the memory usage of each column in bytes Args: index (bool): Whether to include the memory usage of the DataFrame's index in returned Series. Defaults to True deep (bool): If True, introspect the data deeply by interrogating objects dtypes for system-level memory consumption. Defaults to False Returns: A Series where the index are the column names and the values are the memory usage of each of the columns in bytes. If `index=true`, then the first value of the Series will be 'Index' with its memory usage. """ assert not index, "Internal Error. Index must be evaluated in child class" return self._reduce_dimension( self._query_compiler.memory_usage(index=index, deep=deep) )
[ "def", "memory_usage", "(", "self", ",", "index", "=", "True", ",", "deep", "=", "False", ")", ":", "assert", "not", "index", ",", "\"Internal Error. Index must be evaluated in child class\"", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "memory_usage", "(", "index", "=", "index", ",", "deep", "=", "deep", ")", ")" ]
Returns the memory usage of each column in bytes Args: index (bool): Whether to include the memory usage of the DataFrame's index in returned Series. Defaults to True deep (bool): If True, introspect the data deeply by interrogating objects dtypes for system-level memory consumption. Defaults to False Returns: A Series where the index are the column names and the values are the memory usage of each of the columns in bytes. If `index=true`, then the first value of the Series will be 'Index' with its memory usage.
[ "Returns", "the", "memory", "usage", "of", "each", "column", "in", "bytes", "Args", ":", "index", "(", "bool", ")", ":", "Whether", "to", "include", "the", "memory", "usage", "of", "the", "DataFrame", "s", "index", "in", "returned", "Series", ".", "Defaults", "to", "True", "deep", "(", "bool", ")", ":", "If", "True", "introspect", "the", "data", "deeply", "by", "interrogating", "objects", "dtypes", "for", "system", "-", "level", "memory", "consumption", ".", "Defaults", "to", "False", "Returns", ":", "A", "Series", "where", "the", "index", "are", "the", "column", "names", "and", "the", "values", "are", "the", "memory", "usage", "of", "each", "of", "the", "columns", "in", "bytes", ".", "If", "index", "=", "true", "then", "the", "first", "value", "of", "the", "Series", "will", "be", "Index", "with", "its", "memory", "usage", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1569-L1586
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.min
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Perform min across the DataFrame. Args: axis (int): The axis to take the min on. skipna (bool): True to skip NA values, false otherwise. Returns: The min of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_min_max(axis, numeric_only) return data._reduce_dimension( data._query_compiler.min( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
python
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Perform min across the DataFrame. Args: axis (int): The axis to take the min on. skipna (bool): True to skip NA values, false otherwise. Returns: The min of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_min_max(axis, numeric_only) return data._reduce_dimension( data._query_compiler.min( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
[ "def", "min", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "None", ",", "level", "=", "None", ",", "numeric_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "data", "=", "self", ".", "_validate_dtypes_min_max", "(", "axis", ",", "numeric_only", ")", "return", "data", ".", "_reduce_dimension", "(", "data", ".", "_query_compiler", ".", "min", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "numeric_only", "=", "numeric_only", ",", "*", "*", "kwargs", ")", ")" ]
Perform min across the DataFrame. Args: axis (int): The axis to take the min on. skipna (bool): True to skip NA values, false otherwise. Returns: The min of the DataFrame.
[ "Perform", "min", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "the", "min", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "Returns", ":", "The", "min", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1588-L1608
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.mod
def mod(self, other, axis="columns", level=None, fill_value=None): """Mods this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the mod against this. axis: The axis to mod over. level: The Multilevel index level to apply mod over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Mod applied. """ return self._binary_op( "mod", other, axis=axis, level=level, fill_value=fill_value )
python
def mod(self, other, axis="columns", level=None, fill_value=None): """Mods this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the mod against this. axis: The axis to mod over. level: The Multilevel index level to apply mod over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Mod applied. """ return self._binary_op( "mod", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "mod", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"mod\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Mods this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the mod against this. axis: The axis to mod over. level: The Multilevel index level to apply mod over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Mod applied.
[ "Mods", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "mod", "against", "this", ".", "axis", ":", "The", "axis", "to", "mod", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "mod", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Mod", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1610-L1624
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.mode
def mode(self, axis=0, numeric_only=False, dropna=True): """Perform mode across the DataFrame. Args: axis (int): The axis to take the mode on. numeric_only (bool): if True, only apply to numeric columns. Returns: DataFrame: The mode of the DataFrame. """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.mode( axis=axis, numeric_only=numeric_only, dropna=dropna ) )
python
def mode(self, axis=0, numeric_only=False, dropna=True): """Perform mode across the DataFrame. Args: axis (int): The axis to take the mode on. numeric_only (bool): if True, only apply to numeric columns. Returns: DataFrame: The mode of the DataFrame. """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.mode( axis=axis, numeric_only=numeric_only, dropna=dropna ) )
[ "def", "mode", "(", "self", ",", "axis", "=", "0", ",", "numeric_only", "=", "False", ",", "dropna", "=", "True", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "mode", "(", "axis", "=", "axis", ",", "numeric_only", "=", "numeric_only", ",", "dropna", "=", "dropna", ")", ")" ]
Perform mode across the DataFrame. Args: axis (int): The axis to take the mode on. numeric_only (bool): if True, only apply to numeric columns. Returns: DataFrame: The mode of the DataFrame.
[ "Perform", "mode", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "the", "mode", "on", ".", "numeric_only", "(", "bool", ")", ":", "if", "True", "only", "apply", "to", "numeric", "columns", ".", "Returns", ":", "DataFrame", ":", "The", "mode", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1626-L1641
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.mul
def mul(self, other, axis="columns", level=None, fill_value=None): """Multiplies this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the multiply against this. axis: The axis to multiply over. level: The Multilevel index level to apply multiply over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Multiply applied. """ return self._binary_op( "mul", other, axis=axis, level=level, fill_value=fill_value )
python
def mul(self, other, axis="columns", level=None, fill_value=None): """Multiplies this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the multiply against this. axis: The axis to multiply over. level: The Multilevel index level to apply multiply over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Multiply applied. """ return self._binary_op( "mul", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "mul", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"mul\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Multiplies this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the multiply against this. axis: The axis to multiply over. level: The Multilevel index level to apply multiply over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Multiply applied.
[ "Multiplies", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "multiply", "against", "this", ".", "axis", ":", "The", "axis", "to", "multiply", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "multiply", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Multiply", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1643-L1657
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.ne
def ne(self, other, axis="columns", level=None): """Checks element-wise that this is not equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the ne over. level: The Multilevel index level to apply ne over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("ne", other, axis=axis, level=level)
python
def ne(self, other, axis="columns", level=None): """Checks element-wise that this is not equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the ne over. level: The Multilevel index level to apply ne over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("ne", other, axis=axis, level=level)
[ "def", "ne", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"ne\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is not equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the ne over. level: The Multilevel index level to apply ne over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "not", "equal", "to", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "ne", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "ne", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1661-L1672
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.nunique
def nunique(self, axis=0, dropna=True): """Return Series with number of distinct observations over requested axis. Args: axis : {0 or 'index', 1 or 'columns'}, default 0 dropna : boolean, default True Returns: nunique : Series """ axis = self._get_axis_number(axis) if axis is not None else 0 return self._reduce_dimension( self._query_compiler.nunique(axis=axis, dropna=dropna) )
python
def nunique(self, axis=0, dropna=True): """Return Series with number of distinct observations over requested axis. Args: axis : {0 or 'index', 1 or 'columns'}, default 0 dropna : boolean, default True Returns: nunique : Series """ axis = self._get_axis_number(axis) if axis is not None else 0 return self._reduce_dimension( self._query_compiler.nunique(axis=axis, dropna=dropna) )
[ "def", "nunique", "(", "self", ",", "axis", "=", "0", ",", "dropna", "=", "True", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "nunique", "(", "axis", "=", "axis", ",", "dropna", "=", "dropna", ")", ")" ]
Return Series with number of distinct observations over requested axis. Args: axis : {0 or 'index', 1 or 'columns'}, default 0 dropna : boolean, default True Returns: nunique : Series
[ "Return", "Series", "with", "number", "of", "distinct", "observations", "over", "requested", "axis", ".", "Args", ":", "axis", ":", "{", "0", "or", "index", "1", "or", "columns", "}", "default", "0", "dropna", ":", "boolean", "default", "True", "Returns", ":", "nunique", ":", "Series" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1685-L1699
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.pow
def pow(self, other, axis="columns", level=None, fill_value=None): """Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied. """ return self._binary_op( "pow", other, axis=axis, level=level, fill_value=fill_value )
python
def pow(self, other, axis="columns", level=None, fill_value=None): """Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied. """ return self._binary_op( "pow", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "pow", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"pow\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied.
[ "Pow", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "pow", "against", "this", ".", "axis", ":", "The", "axis", "to", "pow", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "pow", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Pow", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1738-L1752
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.prod
def prod( self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs ): """Return the product of the values for the requested axis Args: axis : {index (0), columns (1)} skipna : boolean, default True level : int or level name, default None numeric_only : boolean, default None min_count : int, default 0 Returns: prod : Series or DataFrame (if level specified) """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True) return data._reduce_dimension( data._query_compiler.prod( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, min_count=min_count, **kwargs ) )
python
def prod( self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs ): """Return the product of the values for the requested axis Args: axis : {index (0), columns (1)} skipna : boolean, default True level : int or level name, default None numeric_only : boolean, default None min_count : int, default 0 Returns: prod : Series or DataFrame (if level specified) """ axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True) return data._reduce_dimension( data._query_compiler.prod( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, min_count=min_count, **kwargs ) )
[ "def", "prod", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "None", ",", "level", "=", "None", ",", "numeric_only", "=", "None", ",", "min_count", "=", "0", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "data", "=", "self", ".", "_validate_dtypes_sum_prod_mean", "(", "axis", ",", "numeric_only", ",", "ignore_axis", "=", "True", ")", "return", "data", ".", "_reduce_dimension", "(", "data", ".", "_query_compiler", ".", "prod", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "numeric_only", "=", "numeric_only", ",", "min_count", "=", "min_count", ",", "*", "*", "kwargs", ")", ")" ]
Return the product of the values for the requested axis Args: axis : {index (0), columns (1)} skipna : boolean, default True level : int or level name, default None numeric_only : boolean, default None min_count : int, default 0 Returns: prod : Series or DataFrame (if level specified)
[ "Return", "the", "product", "of", "the", "values", "for", "the", "requested", "axis", "Args", ":", "axis", ":", "{", "index", "(", "0", ")", "columns", "(", "1", ")", "}", "skipna", ":", "boolean", "default", "True", "level", ":", "int", "or", "level", "name", "default", "None", "numeric_only", ":", "boolean", "default", "None", "min_count", ":", "int", "default", "0", "Returns", ":", "prod", ":", "Series", "or", "DataFrame", "(", "if", "level", "specified", ")" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1754-L1786
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.quantile
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"): """Return values at the given quantile over requested axis, a la numpy.percentile. Args: q (float): 0 <= q <= 1, the quantile(s) to compute axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Specifies which interpolation method to use Returns: quantiles : Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. """ axis = self._get_axis_number(axis) if axis is not None else 0 def check_dtype(t): return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t) if not numeric_only: # If not numeric_only and columns, then check all columns are either # numeric, timestamp, or timedelta if not axis and not all(check_dtype(t) for t in self._get_dtypes()): raise TypeError("can't multiply sequence by non-int of type 'float'") # If over rows, then make sure that all dtypes are equal for not # numeric_only elif axis: for i in range(1, len(self._get_dtypes())): pre_dtype = self._get_dtypes()[i - 1] curr_dtype = self._get_dtypes()[i] if not is_dtype_equal(pre_dtype, curr_dtype): raise TypeError( "Cannot compare type '{0}' with type '{1}'".format( pre_dtype, curr_dtype ) ) else: # Normally pandas returns this near the end of the quantile, but we # can't afford the overhead of running the entire operation before # we error. if not any(is_numeric_dtype(t) for t in self._get_dtypes()): raise ValueError("need at least one array to concatenate") # check that all qs are between 0 and 1 pandas.DataFrame()._check_percentile(q) axis = self._get_axis_number(axis) if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)): return self.__constructor__( query_compiler=self._query_compiler.quantile_for_list_of_values( q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation, ) ) else: return self._reduce_dimension( self._query_compiler.quantile_for_single_value( q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation, ) )
python
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"): """Return values at the given quantile over requested axis, a la numpy.percentile. Args: q (float): 0 <= q <= 1, the quantile(s) to compute axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Specifies which interpolation method to use Returns: quantiles : Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. """ axis = self._get_axis_number(axis) if axis is not None else 0 def check_dtype(t): return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t) if not numeric_only: # If not numeric_only and columns, then check all columns are either # numeric, timestamp, or timedelta if not axis and not all(check_dtype(t) for t in self._get_dtypes()): raise TypeError("can't multiply sequence by non-int of type 'float'") # If over rows, then make sure that all dtypes are equal for not # numeric_only elif axis: for i in range(1, len(self._get_dtypes())): pre_dtype = self._get_dtypes()[i - 1] curr_dtype = self._get_dtypes()[i] if not is_dtype_equal(pre_dtype, curr_dtype): raise TypeError( "Cannot compare type '{0}' with type '{1}'".format( pre_dtype, curr_dtype ) ) else: # Normally pandas returns this near the end of the quantile, but we # can't afford the overhead of running the entire operation before # we error. if not any(is_numeric_dtype(t) for t in self._get_dtypes()): raise ValueError("need at least one array to concatenate") # check that all qs are between 0 and 1 pandas.DataFrame()._check_percentile(q) axis = self._get_axis_number(axis) if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)): return self.__constructor__( query_compiler=self._query_compiler.quantile_for_list_of_values( q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation, ) ) else: return self._reduce_dimension( self._query_compiler.quantile_for_single_value( q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation, ) )
[ "def", "quantile", "(", "self", ",", "q", "=", "0.5", ",", "axis", "=", "0", ",", "numeric_only", "=", "True", ",", "interpolation", "=", "\"linear\"", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "def", "check_dtype", "(", "t", ")", ":", "return", "is_numeric_dtype", "(", "t", ")", "or", "is_datetime_or_timedelta_dtype", "(", "t", ")", "if", "not", "numeric_only", ":", "# If not numeric_only and columns, then check all columns are either\r", "# numeric, timestamp, or timedelta\r", "if", "not", "axis", "and", "not", "all", "(", "check_dtype", "(", "t", ")", "for", "t", "in", "self", ".", "_get_dtypes", "(", ")", ")", ":", "raise", "TypeError", "(", "\"can't multiply sequence by non-int of type 'float'\"", ")", "# If over rows, then make sure that all dtypes are equal for not\r", "# numeric_only\r", "elif", "axis", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "_get_dtypes", "(", ")", ")", ")", ":", "pre_dtype", "=", "self", ".", "_get_dtypes", "(", ")", "[", "i", "-", "1", "]", "curr_dtype", "=", "self", ".", "_get_dtypes", "(", ")", "[", "i", "]", "if", "not", "is_dtype_equal", "(", "pre_dtype", ",", "curr_dtype", ")", ":", "raise", "TypeError", "(", "\"Cannot compare type '{0}' with type '{1}'\"", ".", "format", "(", "pre_dtype", ",", "curr_dtype", ")", ")", "else", ":", "# Normally pandas returns this near the end of the quantile, but we\r", "# can't afford the overhead of running the entire operation before\r", "# we error.\r", "if", "not", "any", "(", "is_numeric_dtype", "(", "t", ")", "for", "t", "in", "self", ".", "_get_dtypes", "(", ")", ")", ":", "raise", "ValueError", "(", "\"need at least one array to concatenate\"", ")", "# check that all qs are between 0 and 1\r", "pandas", ".", "DataFrame", "(", ")", ".", "_check_percentile", "(", "q", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "isinstance", "(", "q", ",", "(", "pandas", ".", "Series", ",", "np", ".", "ndarray", ",", "pandas", ".", "Index", ",", "list", ")", ")", ":", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "quantile_for_list_of_values", "(", "q", "=", "q", ",", "axis", "=", "axis", ",", "numeric_only", "=", "numeric_only", ",", "interpolation", "=", "interpolation", ",", ")", ")", "else", ":", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "quantile_for_single_value", "(", "q", "=", "q", ",", "axis", "=", "axis", ",", "numeric_only", "=", "numeric_only", ",", "interpolation", "=", "interpolation", ",", ")", ")" ]
Return values at the given quantile over requested axis, a la numpy.percentile. Args: q (float): 0 <= q <= 1, the quantile(s) to compute axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Specifies which interpolation method to use Returns: quantiles : Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles.
[ "Return", "values", "at", "the", "given", "quantile", "over", "requested", "axis", "a", "la", "numpy", ".", "percentile", ".", "Args", ":", "q", "(", "float", ")", ":", "0", "<", "=", "q", "<", "=", "1", "the", "quantile", "(", "s", ")", "to", "compute", "axis", "(", "int", ")", ":", "0", "or", "index", "for", "row", "-", "wise", "1", "or", "columns", "for", "column", "-", "wise", "interpolation", ":", "{", "linear", "lower", "higher", "midpoint", "nearest", "}", "Specifies", "which", "interpolation", "method", "to", "use", "Returns", ":", "quantiles", ":", "Series", "or", "DataFrame", "If", "q", "is", "an", "array", "a", "DataFrame", "will", "be", "returned", "where", "the", "index", "is", "q", "the", "columns", "are", "the", "columns", "of", "self", "and", "the", "values", "are", "the", "quantiles", ".", "If", "q", "is", "a", "float", "a", "Series", "will", "be", "returned", "where", "the", "index", "is", "the", "columns", "of", "self", "and", "the", "values", "are", "the", "quantiles", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1791-L1862
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.rank
def rank( self, axis=0, method="average", numeric_only=None, na_option="keep", ascending=True, pct=False, ): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the [method] of the ranks of those values. Args: axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise method: {'average', 'min', 'max', 'first', 'dense'} Specifies which method to use for equal vals numeric_only (boolean) Include only float, int, boolean data. na_option: {'keep', 'top', 'bottom'} Specifies how to handle NA options ascending (boolean): Decedes ranking order pct (boolean): Computes percentage ranking of data Returns: A new DataFrame """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.rank( axis=axis, method=method, numeric_only=numeric_only, na_option=na_option, ascending=ascending, pct=pct, ) )
python
def rank( self, axis=0, method="average", numeric_only=None, na_option="keep", ascending=True, pct=False, ): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the [method] of the ranks of those values. Args: axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise method: {'average', 'min', 'max', 'first', 'dense'} Specifies which method to use for equal vals numeric_only (boolean) Include only float, int, boolean data. na_option: {'keep', 'top', 'bottom'} Specifies how to handle NA options ascending (boolean): Decedes ranking order pct (boolean): Computes percentage ranking of data Returns: A new DataFrame """ axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.rank( axis=axis, method=method, numeric_only=numeric_only, na_option=na_option, ascending=ascending, pct=pct, ) )
[ "def", "rank", "(", "self", ",", "axis", "=", "0", ",", "method", "=", "\"average\"", ",", "numeric_only", "=", "None", ",", "na_option", "=", "\"keep\"", ",", "ascending", "=", "True", ",", "pct", "=", "False", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "rank", "(", "axis", "=", "axis", ",", "method", "=", "method", ",", "numeric_only", "=", "numeric_only", ",", "na_option", "=", "na_option", ",", "ascending", "=", "ascending", ",", "pct", "=", "pct", ",", ")", ")" ]
Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the [method] of the ranks of those values. Args: axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise method: {'average', 'min', 'max', 'first', 'dense'} Specifies which method to use for equal vals numeric_only (boolean) Include only float, int, boolean data. na_option: {'keep', 'top', 'bottom'} Specifies how to handle NA options ascending (boolean): Decedes ranking order pct (boolean): Computes percentage ranking of data Returns: A new DataFrame
[ "Compute", "numerical", "data", "ranks", "(", "1", "through", "n", ")", "along", "axis", ".", "Equal", "values", "are", "assigned", "a", "rank", "that", "is", "the", "[", "method", "]", "of", "the", "ranks", "of", "those", "values", ".", "Args", ":", "axis", "(", "int", ")", ":", "0", "or", "index", "for", "row", "-", "wise", "1", "or", "columns", "for", "column", "-", "wise", "method", ":", "{", "average", "min", "max", "first", "dense", "}", "Specifies", "which", "method", "to", "use", "for", "equal", "vals", "numeric_only", "(", "boolean", ")", "Include", "only", "float", "int", "boolean", "data", ".", "na_option", ":", "{", "keep", "top", "bottom", "}", "Specifies", "how", "to", "handle", "NA", "options", "ascending", "(", "boolean", ")", ":", "Decedes", "ranking", "order", "pct", "(", "boolean", ")", ":", "Computes", "percentage", "ranking", "of", "data", "Returns", ":", "A", "new", "DataFrame" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1864-L1904
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.reset_index
def reset_index( self, level=None, drop=False, inplace=False, col_level=0, col_fill="" ): """Reset this index to default and create column from current index. Args: level: Only remove the given levels from the index. Removes all levels by default drop: Do not try to insert index into DataFrame columns. This resets the index to the default integer index. inplace: Modify the DataFrame in place (do not create a new object) col_level : If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill: If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns: A new DataFrame if inplace is False, None otherwise. """ inplace = validate_bool_kwarg(inplace, "inplace") # TODO Implement level if level is not None: new_query_compiler = self._default_to_pandas( "reset_index", level=level, drop=drop, inplace=inplace, col_level=col_level, col_fill=col_fill, ) # Error checking for matching Pandas. Pandas does not allow you to # insert a dropped index into a DataFrame if these columns already # exist. elif ( not drop and not isinstance(self.index, pandas.MultiIndex) and all(n in self.columns for n in ["level_0", "index"]) ): raise ValueError("cannot insert level_0, already exists") else: new_query_compiler = self._query_compiler.reset_index( drop=drop, level=level ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
python
def reset_index( self, level=None, drop=False, inplace=False, col_level=0, col_fill="" ): """Reset this index to default and create column from current index. Args: level: Only remove the given levels from the index. Removes all levels by default drop: Do not try to insert index into DataFrame columns. This resets the index to the default integer index. inplace: Modify the DataFrame in place (do not create a new object) col_level : If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill: If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns: A new DataFrame if inplace is False, None otherwise. """ inplace = validate_bool_kwarg(inplace, "inplace") # TODO Implement level if level is not None: new_query_compiler = self._default_to_pandas( "reset_index", level=level, drop=drop, inplace=inplace, col_level=col_level, col_fill=col_fill, ) # Error checking for matching Pandas. Pandas does not allow you to # insert a dropped index into a DataFrame if these columns already # exist. elif ( not drop and not isinstance(self.index, pandas.MultiIndex) and all(n in self.columns for n in ["level_0", "index"]) ): raise ValueError("cannot insert level_0, already exists") else: new_query_compiler = self._query_compiler.reset_index( drop=drop, level=level ) return self._create_or_update_from_compiler(new_query_compiler, inplace)
[ "def", "reset_index", "(", "self", ",", "level", "=", "None", ",", "drop", "=", "False", ",", "inplace", "=", "False", ",", "col_level", "=", "0", ",", "col_fill", "=", "\"\"", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "# TODO Implement level\r", "if", "level", "is", "not", "None", ":", "new_query_compiler", "=", "self", ".", "_default_to_pandas", "(", "\"reset_index\"", ",", "level", "=", "level", ",", "drop", "=", "drop", ",", "inplace", "=", "inplace", ",", "col_level", "=", "col_level", ",", "col_fill", "=", "col_fill", ",", ")", "# Error checking for matching Pandas. Pandas does not allow you to\r", "# insert a dropped index into a DataFrame if these columns already\r", "# exist.\r", "elif", "(", "not", "drop", "and", "not", "isinstance", "(", "self", ".", "index", ",", "pandas", ".", "MultiIndex", ")", "and", "all", "(", "n", "in", "self", ".", "columns", "for", "n", "in", "[", "\"level_0\"", ",", "\"index\"", "]", ")", ")", ":", "raise", "ValueError", "(", "\"cannot insert level_0, already exists\"", ")", "else", ":", "new_query_compiler", "=", "self", ".", "_query_compiler", ".", "reset_index", "(", "drop", "=", "drop", ",", "level", "=", "level", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")" ]
Reset this index to default and create column from current index. Args: level: Only remove the given levels from the index. Removes all levels by default drop: Do not try to insert index into DataFrame columns. This resets the index to the default integer index. inplace: Modify the DataFrame in place (do not create a new object) col_level : If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill: If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns: A new DataFrame if inplace is False, None otherwise.
[ "Reset", "this", "index", "to", "default", "and", "create", "column", "from", "current", "index", ".", "Args", ":", "level", ":", "Only", "remove", "the", "given", "levels", "from", "the", "index", ".", "Removes", "all", "levels", "by", "default", "drop", ":", "Do", "not", "try", "to", "insert", "index", "into", "DataFrame", "columns", ".", "This", "resets", "the", "index", "to", "the", "default", "integer", "index", ".", "inplace", ":", "Modify", "the", "DataFrame", "in", "place", "(", "do", "not", "create", "a", "new", "object", ")", "col_level", ":", "If", "the", "columns", "have", "multiple", "levels", "determines", "which", "level", "the", "labels", "are", "inserted", "into", ".", "By", "default", "it", "is", "inserted", "into", "the", "first", "level", ".", "col_fill", ":", "If", "the", "columns", "have", "multiple", "levels", "determines", "how", "the", "other", "levels", "are", "named", ".", "If", "None", "then", "the", "index", "name", "is", "repeated", ".", "Returns", ":", "A", "new", "DataFrame", "if", "inplace", "is", "False", "None", "otherwise", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2114-L2159
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.rmod
def rmod(self, other, axis="columns", level=None, fill_value=None): """Mod this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied. """ return self._binary_op( "rmod", other, axis=axis, level=level, fill_value=fill_value )
python
def rmod(self, other, axis="columns", level=None, fill_value=None): """Mod this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied. """ return self._binary_op( "rmod", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "rmod", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"rmod\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Mod this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied.
[ "Mod", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "div", "against", "this", ".", "axis", ":", "The", "axis", "to", "div", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "div", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "rdiv", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2166-L2180
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.round
def round(self, decimals=0, *args, **kwargs): """Round each element in the DataFrame. Args: decimals: The number of decimals to round to. Returns: A new DataFrame. """ return self.__constructor__( query_compiler=self._query_compiler.round(decimals=decimals, **kwargs) )
python
def round(self, decimals=0, *args, **kwargs): """Round each element in the DataFrame. Args: decimals: The number of decimals to round to. Returns: A new DataFrame. """ return self.__constructor__( query_compiler=self._query_compiler.round(decimals=decimals, **kwargs) )
[ "def", "round", "(", "self", ",", "decimals", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "round", "(", "decimals", "=", "decimals", ",", "*", "*", "kwargs", ")", ")" ]
Round each element in the DataFrame. Args: decimals: The number of decimals to round to. Returns: A new DataFrame.
[ "Round", "each", "element", "in", "the", "DataFrame", ".", "Args", ":", "decimals", ":", "The", "number", "of", "decimals", "to", "round", "to", ".", "Returns", ":", "A", "new", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2205-L2216
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.rpow
def rpow(self, other, axis="columns", level=None, fill_value=None): """Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied. """ return self._binary_op( "rpow", other, axis=axis, level=level, fill_value=fill_value )
python
def rpow(self, other, axis="columns", level=None, fill_value=None): """Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied. """ return self._binary_op( "rpow", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "rpow", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"rpow\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Pow this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the pow against this. axis: The axis to pow over. level: The Multilevel index level to apply pow over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Pow applied.
[ "Pow", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "pow", "against", "this", ".", "axis", ":", "The", "axis", "to", "pow", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "pow", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Pow", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2218-L2232
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.rsub
def rsub(self, other, axis="columns", level=None, fill_value=None): """Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied. """ return self._binary_op( "rsub", other, axis=axis, level=level, fill_value=fill_value )
python
def rsub(self, other, axis="columns", level=None, fill_value=None): """Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied. """ return self._binary_op( "rsub", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "rsub", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"rsub\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied.
[ "Subtract", "a", "DataFrame", "/", "Series", "/", "scalar", "from", "this", "DataFrame", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "subtraction", "to", "this", ".", "axis", ":", "The", "axis", "to", "apply", "the", "subtraction", "over", ".", "level", ":", "Mutlilevel", "index", "level", "to", "subtract", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "subtraciont", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2234-L2248
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.rtruediv
def rtruediv(self, other, axis="columns", level=None, fill_value=None): """Div this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied. """ return self._binary_op( "rtruediv", other, axis=axis, level=level, fill_value=fill_value )
python
def rtruediv(self, other, axis="columns", level=None, fill_value=None): """Div this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied. """ return self._binary_op( "rtruediv", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "rtruediv", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"rtruediv\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Div this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the div against this. axis: The axis to div over. level: The Multilevel index level to apply div over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the rdiv applied.
[ "Div", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "div", "against", "this", ".", "axis", ":", "The", "axis", "to", "div", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "div", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "rdiv", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2250-L2264
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.sample
def sample( self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None, ): """Returns a random sample of items from an axis of object. Args: n: Number of items from axis to return. Cannot be used with frac. Default = 1 if frac = None. frac: Fraction of axis items to return. Cannot be used with n. replace: Sample with or without replacement. Default = False. weights: Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. random_state: Seed for the random number generator (if int), or numpy RandomState object. axis: Axis to sample. Accepts axis number or name. Returns: A new Dataframe """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: axis_labels = self.columns axis_length = len(axis_labels) else: # Getting rows requires indices instead of labels. RangeIndex provides this. axis_labels = pandas.RangeIndex(len(self.index)) axis_length = len(axis_labels) if weights is not None: # Index of the weights Series should correspond to the index of the # Dataframe in order to sample if isinstance(weights, BasePandasDataset): weights = weights.reindex(self.axes[axis]) # If weights arg is a string, the weights used for sampling will # the be values in the column corresponding to that string if isinstance(weights, string_types): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a valid column") else: raise ValueError( "Strings can only be passed to " "weights when sampling from rows on " "a DataFrame" ) weights = pandas.Series(weights, dtype="float64") if len(weights) != axis_length: raise ValueError( "Weights and axis to be sampled must be of same length" ) if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative values") # weights cannot be NaN when sampling, so we must set all nan # values to 0 weights = weights.fillna(0) # If passed in weights are not equal to 1, renormalize them # otherwise numpy sampling function will error weights_sum = weights.sum() if weights_sum != 1: if weights_sum != 0: weights = weights / weights_sum else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values if n is None and frac is None: # default to n = 1 if n and frac are both None (in accordance with # Pandas specification) n = 1 elif n is not None and frac is None and n % 1 != 0: # n must be an integer raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: # compute the number of samples based on frac n = int(round(frac * axis_length)) elif n is not None and frac is not None: # Pandas specification does not allow both n and frac to be passed # in raise ValueError("Please enter a value for `frac` OR `n`, not both") if n < 0: raise ValueError( "A negative number of rows requested. Please provide positive value." ) if n == 0: # This returns an empty object, and since it is a weird edge case that # doesn't need to be distributed, we default to pandas for n=0. return self._default_to_pandas( "sample", n=n, frac=frac, replace=replace, weights=weights, random_state=random_state, axis=axis, ) if random_state is not None: # Get a random number generator depending on the type of # random_state that is passed in if isinstance(random_state, int): random_num_gen = np.random.RandomState(random_state) elif isinstance(random_state, np.random.randomState): random_num_gen = random_state else: # random_state must be an int or a numpy RandomState object raise ValueError( "Please enter an `int` OR a " "np.random.RandomState for random_state" ) # choose random numbers and then get corresponding labels from # chosen axis sample_indices = random_num_gen.choice( np.arange(0, axis_length), size=n, replace=replace, p=weights ) samples = axis_labels[sample_indices] else: # randomly select labels from chosen axis samples = np.random.choice( a=axis_labels, size=n, replace=replace, p=weights ) if axis: query_compiler = self._query_compiler.getitem_column_array(samples) return self.__constructor__(query_compiler=query_compiler) else: query_compiler = self._query_compiler.getitem_row_array(samples) return self.__constructor__(query_compiler=query_compiler)
python
def sample( self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None, ): """Returns a random sample of items from an axis of object. Args: n: Number of items from axis to return. Cannot be used with frac. Default = 1 if frac = None. frac: Fraction of axis items to return. Cannot be used with n. replace: Sample with or without replacement. Default = False. weights: Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. random_state: Seed for the random number generator (if int), or numpy RandomState object. axis: Axis to sample. Accepts axis number or name. Returns: A new Dataframe """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: axis_labels = self.columns axis_length = len(axis_labels) else: # Getting rows requires indices instead of labels. RangeIndex provides this. axis_labels = pandas.RangeIndex(len(self.index)) axis_length = len(axis_labels) if weights is not None: # Index of the weights Series should correspond to the index of the # Dataframe in order to sample if isinstance(weights, BasePandasDataset): weights = weights.reindex(self.axes[axis]) # If weights arg is a string, the weights used for sampling will # the be values in the column corresponding to that string if isinstance(weights, string_types): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a valid column") else: raise ValueError( "Strings can only be passed to " "weights when sampling from rows on " "a DataFrame" ) weights = pandas.Series(weights, dtype="float64") if len(weights) != axis_length: raise ValueError( "Weights and axis to be sampled must be of same length" ) if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative values") # weights cannot be NaN when sampling, so we must set all nan # values to 0 weights = weights.fillna(0) # If passed in weights are not equal to 1, renormalize them # otherwise numpy sampling function will error weights_sum = weights.sum() if weights_sum != 1: if weights_sum != 0: weights = weights / weights_sum else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values if n is None and frac is None: # default to n = 1 if n and frac are both None (in accordance with # Pandas specification) n = 1 elif n is not None and frac is None and n % 1 != 0: # n must be an integer raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: # compute the number of samples based on frac n = int(round(frac * axis_length)) elif n is not None and frac is not None: # Pandas specification does not allow both n and frac to be passed # in raise ValueError("Please enter a value for `frac` OR `n`, not both") if n < 0: raise ValueError( "A negative number of rows requested. Please provide positive value." ) if n == 0: # This returns an empty object, and since it is a weird edge case that # doesn't need to be distributed, we default to pandas for n=0. return self._default_to_pandas( "sample", n=n, frac=frac, replace=replace, weights=weights, random_state=random_state, axis=axis, ) if random_state is not None: # Get a random number generator depending on the type of # random_state that is passed in if isinstance(random_state, int): random_num_gen = np.random.RandomState(random_state) elif isinstance(random_state, np.random.randomState): random_num_gen = random_state else: # random_state must be an int or a numpy RandomState object raise ValueError( "Please enter an `int` OR a " "np.random.RandomState for random_state" ) # choose random numbers and then get corresponding labels from # chosen axis sample_indices = random_num_gen.choice( np.arange(0, axis_length), size=n, replace=replace, p=weights ) samples = axis_labels[sample_indices] else: # randomly select labels from chosen axis samples = np.random.choice( a=axis_labels, size=n, replace=replace, p=weights ) if axis: query_compiler = self._query_compiler.getitem_column_array(samples) return self.__constructor__(query_compiler=query_compiler) else: query_compiler = self._query_compiler.getitem_row_array(samples) return self.__constructor__(query_compiler=query_compiler)
[ "def", "sample", "(", "self", ",", "n", "=", "None", ",", "frac", "=", "None", ",", "replace", "=", "False", ",", "weights", "=", "None", ",", "random_state", "=", "None", ",", "axis", "=", "None", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "axis", ":", "axis_labels", "=", "self", ".", "columns", "axis_length", "=", "len", "(", "axis_labels", ")", "else", ":", "# Getting rows requires indices instead of labels. RangeIndex provides this.\r", "axis_labels", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "self", ".", "index", ")", ")", "axis_length", "=", "len", "(", "axis_labels", ")", "if", "weights", "is", "not", "None", ":", "# Index of the weights Series should correspond to the index of the\r", "# Dataframe in order to sample\r", "if", "isinstance", "(", "weights", ",", "BasePandasDataset", ")", ":", "weights", "=", "weights", ".", "reindex", "(", "self", ".", "axes", "[", "axis", "]", ")", "# If weights arg is a string, the weights used for sampling will\r", "# the be values in the column corresponding to that string\r", "if", "isinstance", "(", "weights", ",", "string_types", ")", ":", "if", "axis", "==", "0", ":", "try", ":", "weights", "=", "self", "[", "weights", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"String passed to weights not a valid column\"", ")", "else", ":", "raise", "ValueError", "(", "\"Strings can only be passed to \"", "\"weights when sampling from rows on \"", "\"a DataFrame\"", ")", "weights", "=", "pandas", ".", "Series", "(", "weights", ",", "dtype", "=", "\"float64\"", ")", "if", "len", "(", "weights", ")", "!=", "axis_length", ":", "raise", "ValueError", "(", "\"Weights and axis to be sampled must be of same length\"", ")", "if", "(", "weights", "==", "np", ".", "inf", ")", ".", "any", "(", ")", "or", "(", "weights", "==", "-", "np", ".", "inf", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector may not include `inf` values\"", ")", "if", "(", "weights", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector many not include negative values\"", ")", "# weights cannot be NaN when sampling, so we must set all nan\r", "# values to 0\r", "weights", "=", "weights", ".", "fillna", "(", "0", ")", "# If passed in weights are not equal to 1, renormalize them\r", "# otherwise numpy sampling function will error\r", "weights_sum", "=", "weights", ".", "sum", "(", ")", "if", "weights_sum", "!=", "1", ":", "if", "weights_sum", "!=", "0", ":", "weights", "=", "weights", "/", "weights_sum", "else", ":", "raise", "ValueError", "(", "\"Invalid weights: weights sum to zero\"", ")", "weights", "=", "weights", ".", "values", "if", "n", "is", "None", "and", "frac", "is", "None", ":", "# default to n = 1 if n and frac are both None (in accordance with\r", "# Pandas specification)\r", "n", "=", "1", "elif", "n", "is", "not", "None", "and", "frac", "is", "None", "and", "n", "%", "1", "!=", "0", ":", "# n must be an integer\r", "raise", "ValueError", "(", "\"Only integers accepted as `n` values\"", ")", "elif", "n", "is", "None", "and", "frac", "is", "not", "None", ":", "# compute the number of samples based on frac\r", "n", "=", "int", "(", "round", "(", "frac", "*", "axis_length", ")", ")", "elif", "n", "is", "not", "None", "and", "frac", "is", "not", "None", ":", "# Pandas specification does not allow both n and frac to be passed\r", "# in\r", "raise", "ValueError", "(", "\"Please enter a value for `frac` OR `n`, not both\"", ")", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"A negative number of rows requested. Please provide positive value.\"", ")", "if", "n", "==", "0", ":", "# This returns an empty object, and since it is a weird edge case that\r", "# doesn't need to be distributed, we default to pandas for n=0.\r", "return", "self", ".", "_default_to_pandas", "(", "\"sample\"", ",", "n", "=", "n", ",", "frac", "=", "frac", ",", "replace", "=", "replace", ",", "weights", "=", "weights", ",", "random_state", "=", "random_state", ",", "axis", "=", "axis", ",", ")", "if", "random_state", "is", "not", "None", ":", "# Get a random number generator depending on the type of\r", "# random_state that is passed in\r", "if", "isinstance", "(", "random_state", ",", "int", ")", ":", "random_num_gen", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "elif", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "randomState", ")", ":", "random_num_gen", "=", "random_state", "else", ":", "# random_state must be an int or a numpy RandomState object\r", "raise", "ValueError", "(", "\"Please enter an `int` OR a \"", "\"np.random.RandomState for random_state\"", ")", "# choose random numbers and then get corresponding labels from\r", "# chosen axis\r", "sample_indices", "=", "random_num_gen", ".", "choice", "(", "np", ".", "arange", "(", "0", ",", "axis_length", ")", ",", "size", "=", "n", ",", "replace", "=", "replace", ",", "p", "=", "weights", ")", "samples", "=", "axis_labels", "[", "sample_indices", "]", "else", ":", "# randomly select labels from chosen axis\r", "samples", "=", "np", ".", "random", ".", "choice", "(", "a", "=", "axis_labels", ",", "size", "=", "n", ",", "replace", "=", "replace", ",", "p", "=", "weights", ")", "if", "axis", ":", "query_compiler", "=", "self", ".", "_query_compiler", ".", "getitem_column_array", "(", "samples", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "query_compiler", ")", "else", ":", "query_compiler", "=", "self", ".", "_query_compiler", ".", "getitem_row_array", "(", "samples", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "query_compiler", ")" ]
Returns a random sample of items from an axis of object. Args: n: Number of items from axis to return. Cannot be used with frac. Default = 1 if frac = None. frac: Fraction of axis items to return. Cannot be used with n. replace: Sample with or without replacement. Default = False. weights: Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. random_state: Seed for the random number generator (if int), or numpy RandomState object. axis: Axis to sample. Accepts axis number or name. Returns: A new Dataframe
[ "Returns", "a", "random", "sample", "of", "items", "from", "an", "axis", "of", "object", ".", "Args", ":", "n", ":", "Number", "of", "items", "from", "axis", "to", "return", ".", "Cannot", "be", "used", "with", "frac", ".", "Default", "=", "1", "if", "frac", "=", "None", ".", "frac", ":", "Fraction", "of", "axis", "items", "to", "return", ".", "Cannot", "be", "used", "with", "n", ".", "replace", ":", "Sample", "with", "or", "without", "replacement", ".", "Default", "=", "False", ".", "weights", ":", "Default", "None", "results", "in", "equal", "probability", "weighting", ".", "If", "passed", "a", "Series", "will", "align", "with", "target", "object", "on", "index", ".", "Index", "values", "in", "weights", "not", "found", "in", "sampled", "object", "will", "be", "ignored", "and", "index", "values", "in", "sampled", "object", "not", "in", "weights", "will", "be", "assigned", "weights", "of", "zero", ".", "If", "called", "on", "a", "DataFrame", "will", "accept", "the", "name", "of", "a", "column", "when", "axis", "=", "0", ".", "Unless", "weights", "are", "a", "Series", "weights", "must", "be", "same", "length", "as", "axis", "being", "sampled", ".", "If", "weights", "do", "not", "sum", "to", "1", "they", "will", "be", "normalized", "to", "sum", "to", "1", ".", "Missing", "values", "in", "the", "weights", "column", "will", "be", "treated", "as", "zero", ".", "inf", "and", "-", "inf", "values", "not", "allowed", ".", "random_state", ":", "Seed", "for", "the", "random", "number", "generator", "(", "if", "int", ")", "or", "numpy", "RandomState", "object", ".", "axis", ":", "Axis", "to", "sample", ".", "Accepts", "axis", "number", "or", "name", ".", "Returns", ":", "A", "new", "Dataframe" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2268-L2410
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.set_axis
def set_axis(self, labels, axis=0, inplace=None): """Assign desired index to given axis. Args: labels (pandas.Index or list-like): The Index to assign. axis (string or int): The axis to reassign. inplace (bool): Whether to make these modifications inplace. Returns: If inplace is False, returns a new DataFrame, otherwise None. """ if is_scalar(labels): warnings.warn( 'set_axis now takes "labels" as first argument, and ' '"axis" as named parameter. The old form, with "axis" as ' 'first parameter and "labels" as second, is still supported ' "but will be deprecated in a future version of pandas.", FutureWarning, stacklevel=2, ) labels, axis = axis, labels if inplace is None: warnings.warn( "set_axis currently defaults to operating inplace.\nThis " "will change in a future version of pandas, use " "inplace=True to avoid this warning.", FutureWarning, stacklevel=2, ) inplace = True if inplace: setattr(self, pandas.DataFrame()._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj
python
def set_axis(self, labels, axis=0, inplace=None): """Assign desired index to given axis. Args: labels (pandas.Index or list-like): The Index to assign. axis (string or int): The axis to reassign. inplace (bool): Whether to make these modifications inplace. Returns: If inplace is False, returns a new DataFrame, otherwise None. """ if is_scalar(labels): warnings.warn( 'set_axis now takes "labels" as first argument, and ' '"axis" as named parameter. The old form, with "axis" as ' 'first parameter and "labels" as second, is still supported ' "but will be deprecated in a future version of pandas.", FutureWarning, stacklevel=2, ) labels, axis = axis, labels if inplace is None: warnings.warn( "set_axis currently defaults to operating inplace.\nThis " "will change in a future version of pandas, use " "inplace=True to avoid this warning.", FutureWarning, stacklevel=2, ) inplace = True if inplace: setattr(self, pandas.DataFrame()._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj
[ "def", "set_axis", "(", "self", ",", "labels", ",", "axis", "=", "0", ",", "inplace", "=", "None", ")", ":", "if", "is_scalar", "(", "labels", ")", ":", "warnings", ".", "warn", "(", "'set_axis now takes \"labels\" as first argument, and '", "'\"axis\" as named parameter. The old form, with \"axis\" as '", "'first parameter and \"labels\" as second, is still supported '", "\"but will be deprecated in a future version of pandas.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "labels", ",", "axis", "=", "axis", ",", "labels", "if", "inplace", "is", "None", ":", "warnings", ".", "warn", "(", "\"set_axis currently defaults to operating inplace.\\nThis \"", "\"will change in a future version of pandas, use \"", "\"inplace=True to avoid this warning.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "inplace", "=", "True", "if", "inplace", ":", "setattr", "(", "self", ",", "pandas", ".", "DataFrame", "(", ")", ".", "_get_axis_name", "(", "axis", ")", ",", "labels", ")", "else", ":", "obj", "=", "self", ".", "copy", "(", ")", "obj", ".", "set_axis", "(", "labels", ",", "axis", "=", "axis", ",", "inplace", "=", "True", ")", "return", "obj" ]
Assign desired index to given axis. Args: labels (pandas.Index or list-like): The Index to assign. axis (string or int): The axis to reassign. inplace (bool): Whether to make these modifications inplace. Returns: If inplace is False, returns a new DataFrame, otherwise None.
[ "Assign", "desired", "index", "to", "given", "axis", ".", "Args", ":", "labels", "(", "pandas", ".", "Index", "or", "list", "-", "like", ")", ":", "The", "Index", "to", "assign", ".", "axis", "(", "string", "or", "int", ")", ":", "The", "axis", "to", "reassign", ".", "inplace", "(", "bool", ")", ":", "Whether", "to", "make", "these", "modifications", "inplace", ".", "Returns", ":", "If", "inplace", "is", "False", "returns", "a", "new", "DataFrame", "otherwise", "None", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2428-L2463
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.sort_index
def sort_index( self, axis=0, level=None, ascending=True, inplace=False, kind="quicksort", na_position="last", sort_remaining=True, by=None, ): """Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame """ axis = self._get_axis_number(axis) if level is not None: new_query_compiler = self._default_to_pandas( "sort_index", axis=axis, level=level, ascending=ascending, inplace=False, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ) return self._create_or_update_from_compiler(new_query_compiler, inplace) if by is not None: warnings.warn( "by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2, ) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) new_query_compiler = self._query_compiler.sort_index( axis=axis, ascending=ascending, kind=kind, na_position=na_position ) if inplace: self._update_inplace(new_query_compiler=new_query_compiler) else: return self.__constructor__(query_compiler=new_query_compiler)
python
def sort_index( self, axis=0, level=None, ascending=True, inplace=False, kind="quicksort", na_position="last", sort_remaining=True, by=None, ): """Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame """ axis = self._get_axis_number(axis) if level is not None: new_query_compiler = self._default_to_pandas( "sort_index", axis=axis, level=level, ascending=ascending, inplace=False, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ) return self._create_or_update_from_compiler(new_query_compiler, inplace) if by is not None: warnings.warn( "by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2, ) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) new_query_compiler = self._query_compiler.sort_index( axis=axis, ascending=ascending, kind=kind, na_position=na_position ) if inplace: self._update_inplace(new_query_compiler=new_query_compiler) else: return self.__constructor__(query_compiler=new_query_compiler)
[ "def", "sort_index", "(", "self", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "\"quicksort\"", ",", "na_position", "=", "\"last\"", ",", "sort_remaining", "=", "True", ",", "by", "=", "None", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "level", "is", "not", "None", ":", "new_query_compiler", "=", "self", ".", "_default_to_pandas", "(", "\"sort_index\"", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "ascending", "=", "ascending", ",", "inplace", "=", "False", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ",", "sort_remaining", "=", "sort_remaining", ",", ")", "return", "self", ".", "_create_or_update_from_compiler", "(", "new_query_compiler", ",", "inplace", ")", "if", "by", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"by argument to sort_index is deprecated, \"", "\"please use .sort_values(by=...)\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "if", "level", "is", "not", "None", ":", "raise", "ValueError", "(", "\"unable to simultaneously sort by and level\"", ")", "return", "self", ".", "sort_values", "(", "by", ",", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "inplace", "=", "inplace", ")", "new_query_compiler", "=", "self", ".", "_query_compiler", ".", "sort_index", "(", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ")", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "new_query_compiler", "=", "new_query_compiler", ")", "else", ":", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "new_query_compiler", ")" ]
Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame
[ "Sort", "a", "DataFrame", "by", "one", "of", "the", "indices", "(", "columns", "or", "index", ")", ".", "Args", ":", "axis", ":", "The", "axis", "to", "sort", "over", ".", "level", ":", "The", "MultiIndex", "level", "to", "sort", "over", ".", "ascending", ":", "Ascending", "or", "descending", "inplace", ":", "Whether", "or", "not", "to", "update", "this", "DataFrame", "inplace", ".", "kind", ":", "How", "to", "perform", "the", "sort", ".", "na_position", ":", "Where", "to", "position", "NA", "on", "the", "sort", ".", "sort_remaining", ":", "On", "Multilevel", "Index", "sort", "based", "on", "all", "levels", ".", "by", ":", "(", "Deprecated", ")", "argument", "to", "pass", "to", "sort_values", ".", "Returns", ":", "A", "sorted", "DataFrame" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2504-L2559
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.sort_values
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): """Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame. """ axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] # Currently, sort_values will just reindex based on the sorted values. # TODO create a more efficient way to sort if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
python
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): """Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame. """ axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] # Currently, sort_values will just reindex based on the sorted values. # TODO create a more efficient way to sort if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
[ "def", "sort_values", "(", "self", ",", "by", ",", "axis", "=", "0", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "\"quicksort\"", ",", "na_position", "=", "\"last\"", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "not", "is_list_like", "(", "by", ")", ":", "by", "=", "[", "by", "]", "# Currently, sort_values will just reindex based on the sorted values.\r", "# TODO create a more efficient way to sort\r", "if", "axis", "==", "0", ":", "broadcast_value_dict", "=", "{", "col", ":", "self", "[", "col", "]", "for", "col", "in", "by", "}", "broadcast_values", "=", "pandas", ".", "DataFrame", "(", "broadcast_value_dict", ",", "index", "=", "self", ".", "index", ")", "new_index", "=", "broadcast_values", ".", "sort_values", "(", "by", "=", "by", ",", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ",", ")", ".", "index", "return", "self", ".", "reindex", "(", "index", "=", "new_index", ",", "copy", "=", "not", "inplace", ")", "else", ":", "broadcast_value_list", "=", "[", "self", "[", "row", ":", ":", "len", "(", "self", ".", "index", ")", "]", ".", "_to_pandas", "(", ")", "for", "row", "in", "by", "]", "index_builder", "=", "list", "(", "zip", "(", "broadcast_value_list", ",", "by", ")", ")", "broadcast_values", "=", "pandas", ".", "concat", "(", "[", "row", "for", "row", ",", "idx", "in", "index_builder", "]", ",", "copy", "=", "False", ")", "broadcast_values", ".", "columns", "=", "self", ".", "columns", "new_columns", "=", "broadcast_values", ".", "sort_values", "(", "by", "=", "by", ",", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ",", ")", ".", "columns", "return", "self", ".", "reindex", "(", "columns", "=", "new_columns", ",", "copy", "=", "not", "inplace", ")" ]
Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame.
[ "Sorts", "by", "a", "column", "/", "row", "or", "list", "of", "columns", "/", "rows", ".", "Args", ":", "by", ":", "A", "list", "of", "labels", "for", "the", "axis", "to", "sort", "over", ".", "axis", ":", "The", "axis", "to", "sort", ".", "ascending", ":", "Sort", "in", "ascending", "or", "descending", "order", ".", "inplace", ":", "If", "true", "do", "the", "operation", "inplace", ".", "kind", ":", "How", "to", "sort", ".", "na_position", ":", "Where", "to", "put", "np", ".", "nan", "values", ".", "Returns", ":", "A", "sorted", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2561-L2615
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.sub
def sub(self, other, axis="columns", level=None, fill_value=None): """Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied. """ return self._binary_op( "sub", other, axis=axis, level=level, fill_value=fill_value )
python
def sub(self, other, axis="columns", level=None, fill_value=None): """Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied. """ return self._binary_op( "sub", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "sub", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"sub\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Subtract a DataFrame/Series/scalar from this DataFrame. Args: other: The object to use to apply the subtraction to this. axis: The axis to apply the subtraction over. level: Mutlilevel index level to subtract over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the subtraciont applied.
[ "Subtract", "a", "DataFrame", "/", "Series", "/", "scalar", "from", "this", "DataFrame", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "subtraction", "to", "this", ".", "axis", ":", "The", "axis", "to", "apply", "the", "subtraction", "over", ".", "level", ":", "Mutlilevel", "index", "level", "to", "subtract", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "subtraciont", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2644-L2658
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.to_numpy
def to_numpy(self, dtype=None, copy=False): """Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array. """ return self._default_to_pandas("to_numpy", dtype=dtype, copy=copy)
python
def to_numpy(self, dtype=None, copy=False): """Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array. """ return self._default_to_pandas("to_numpy", dtype=dtype, copy=copy)
[ "def", "to_numpy", "(", "self", ",", "dtype", "=", "None", ",", "copy", "=", "False", ")", ":", "return", "self", ".", "_default_to_pandas", "(", "\"to_numpy\"", ",", "dtype", "=", "dtype", ",", "copy", "=", "copy", ")" ]
Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array.
[ "Convert", "the", "DataFrame", "to", "a", "NumPy", "array", ".", "Args", ":", "dtype", ":", "The", "dtype", "to", "pass", "to", "numpy", ".", "asarray", "()", "copy", ":", "Whether", "to", "ensure", "that", "the", "returned", "value", "is", "a", "not", "a", "view", "on", "another", "array", ".", "Returns", ":", "A", "numpy", "array", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2901-L2912
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.truediv
def truediv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "truediv", other, axis=axis, level=level, fill_value=fill_value )
python
def truediv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "truediv", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "truediv", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"truediv\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
[ "Divides", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "divide", "against", "this", ".", "axis", ":", "The", "axis", "to", "divide", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "divide", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Divide", "applied", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L3012-L3026
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.var
def var( self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs ): """Computes variance across the DataFrame. Args: axis (int): The axis to take the variance on. skipna (bool): True to skip NA values, false otherwise. ddof (int): degrees of freedom Returns: The variance of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.var( axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only, **kwargs ) )
python
def var( self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs ): """Computes variance across the DataFrame. Args: axis (int): The axis to take the variance on. skipna (bool): True to skip NA values, false otherwise. ddof (int): degrees of freedom Returns: The variance of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.var( axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only, **kwargs ) )
[ "def", "var", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "None", ",", "level", "=", "None", ",", "ddof", "=", "1", ",", "numeric_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "numeric_only", "is", "not", "None", "and", "not", "numeric_only", ":", "self", ".", "_validate_dtypes", "(", "numeric_only", "=", "True", ")", "return", "self", ".", "_reduce_dimension", "(", "self", ".", "_query_compiler", ".", "var", "(", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "level", "=", "level", ",", "ddof", "=", "ddof", ",", "numeric_only", "=", "numeric_only", ",", "*", "*", "kwargs", ")", ")" ]
Computes variance across the DataFrame. Args: axis (int): The axis to take the variance on. skipna (bool): True to skip NA values, false otherwise. ddof (int): degrees of freedom Returns: The variance of the DataFrame.
[ "Computes", "variance", "across", "the", "DataFrame", ".", "Args", ":", "axis", "(", "int", ")", ":", "The", "axis", "to", "take", "the", "variance", "on", ".", "skipna", "(", "bool", ")", ":", "True", "to", "skip", "NA", "values", "false", "otherwise", ".", "ddof", "(", "int", ")", ":", "degrees", "of", "freedom", "Returns", ":", "The", "variance", "of", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L3068-L3093
train
modin-project/modin
modin/pandas/base.py
BasePandasDataset.size
def size(self): """Get the number of elements in the DataFrame. Returns: The number of elements in the DataFrame. """ return len(self._query_compiler.index) * len(self._query_compiler.columns)
python
def size(self): """Get the number of elements in the DataFrame. Returns: The number of elements in the DataFrame. """ return len(self._query_compiler.index) * len(self._query_compiler.columns)
[ "def", "size", "(", "self", ")", ":", "return", "len", "(", "self", ".", "_query_compiler", ".", "index", ")", "*", "len", "(", "self", ".", "_query_compiler", ".", "columns", ")" ]
Get the number of elements in the DataFrame. Returns: The number of elements in the DataFrame.
[ "Get", "the", "number", "of", "elements", "in", "the", "DataFrame", ".", "Returns", ":", "The", "number", "of", "elements", "in", "the", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L3238-L3244
train
modin-project/modin
modin/engines/python/pandas_on_python/frame/partition.py
PandasOnPythonFramePartition.get
def get(self): """Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`. """ if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
python
def get(self): """Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`. """ if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "call_queue", ":", "return", "self", ".", "apply", "(", "lambda", "df", ":", "df", ")", ".", "data", "else", ":", "return", "self", ".", "data", ".", "copy", "(", ")" ]
Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`.
[ "Flushes", "the", "call_queue", "and", "returns", "the", "data", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/python/pandas_on_python/frame/partition.py#L23-L34
train
modin-project/modin
modin/engines/python/pandas_on_python/frame/partition.py
PandasOnPythonFramePartition.apply
def apply(self, func, **kwargs): """Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it. """ self.call_queue.append((func, kwargs)) def call_queue_closure(data, call_queues): result = data.copy() for func, kwargs in call_queues: try: result = func(result, **kwargs) except Exception as e: self.call_queue = [] raise e return result new_data = call_queue_closure(self.data, self.call_queue) self.call_queue = [] return PandasOnPythonFramePartition(new_data)
python
def apply(self, func, **kwargs): """Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it. """ self.call_queue.append((func, kwargs)) def call_queue_closure(data, call_queues): result = data.copy() for func, kwargs in call_queues: try: result = func(result, **kwargs) except Exception as e: self.call_queue = [] raise e return result new_data = call_queue_closure(self.data, self.call_queue) self.call_queue = [] return PandasOnPythonFramePartition(new_data)
[ "def", "apply", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "self", ".", "call_queue", ".", "append", "(", "(", "func", ",", "kwargs", ")", ")", "def", "call_queue_closure", "(", "data", ",", "call_queues", ")", ":", "result", "=", "data", ".", "copy", "(", ")", "for", "func", ",", "kwargs", "in", "call_queues", ":", "try", ":", "result", "=", "func", "(", "result", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "self", ".", "call_queue", "=", "[", "]", "raise", "e", "return", "result", "new_data", "=", "call_queue_closure", "(", "self", ".", "data", ",", "self", ".", "call_queue", ")", "self", ".", "call_queue", "=", "[", "]", "return", "PandasOnPythonFramePartition", "(", "new_data", ")" ]
Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it.
[ "Apply", "some", "callable", "function", "to", "the", "data", "in", "this", "partition", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/python/pandas_on_python/frame/partition.py#L36-L64
train
modin-project/modin
modin/engines/dask/pandas_on_dask_delayed/frame/partition.py
DaskFramePartition.apply
def apply(self, func, **kwargs): """Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it. """ import dask # applies the func lazily delayed_call = self.delayed_call self.delayed_call = self.dask_obj return self.__class__(dask.delayed(func)(delayed_call, **kwargs))
python
def apply(self, func, **kwargs): """Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it. """ import dask # applies the func lazily delayed_call = self.delayed_call self.delayed_call = self.dask_obj return self.__class__(dask.delayed(func)(delayed_call, **kwargs))
[ "def", "apply", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "import", "dask", "# applies the func lazily", "delayed_call", "=", "self", ".", "delayed_call", "self", ".", "delayed_call", "=", "self", ".", "dask_obj", "return", "self", ".", "__class__", "(", "dask", ".", "delayed", "(", "func", ")", "(", "delayed_call", ",", "*", "*", "kwargs", ")", ")" ]
Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it.
[ "Apply", "some", "callable", "function", "to", "the", "data", "in", "this", "partition", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/dask/pandas_on_dask_delayed/frame/partition.py#L29-L48
train
modin-project/modin
modin/engines/dask/pandas_on_dask_delayed/frame/partition.py
DaskFramePartition.add_to_apply_calls
def add_to_apply_calls(self, func, **kwargs): """Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return """ import dask self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs) return self
python
def add_to_apply_calls(self, func, **kwargs): """Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return """ import dask self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs) return self
[ "def", "add_to_apply_calls", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "import", "dask", "self", ".", "delayed_call", "=", "dask", ".", "delayed", "(", "func", ")", "(", "self", ".", "delayed_call", ",", "*", "*", "kwargs", ")", "return", "self" ]
Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return
[ "Add", "the", "function", "to", "the", "apply", "function", "call", "stack", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/dask/pandas_on_dask_delayed/frame/partition.py#L50-L59
train
modin-project/modin
modin/experimental/engines/pyarrow_on_ray/io.py
_read_csv_with_offset_pyarrow_on_ray
def _read_csv_with_offset_pyarrow_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a pyarrow Table. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the pyarrow `read_csv` function. header: The header of the file. Returns: A list containing the split pyarrow Tables and the the number of rows of the tables as the last element. This is used to determine the total length of the DataFrame to build a default Index. """ bio = open(fname, "rb") # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) bio.close() table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = get_default_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [table.num_rows]
python
def _read_csv_with_offset_pyarrow_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a pyarrow Table. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the pyarrow `read_csv` function. header: The header of the file. Returns: A list containing the split pyarrow Tables and the the number of rows of the tables as the last element. This is used to determine the total length of the DataFrame to build a default Index. """ bio = open(fname, "rb") # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) bio.close() table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = get_default_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [table.num_rows]
[ "def", "_read_csv_with_offset_pyarrow_on_ray", "(", "fname", ",", "num_splits", ",", "start", ",", "end", ",", "kwargs", ",", "header", ")", ":", "# pragma: no cover", "bio", "=", "open", "(", "fname", ",", "\"rb\"", ")", "# The header line for the CSV file", "first_line", "=", "bio", ".", "readline", "(", ")", "bio", ".", "seek", "(", "start", ")", "to_read", "=", "header", "+", "first_line", "+", "bio", ".", "read", "(", "end", "-", "start", ")", "bio", ".", "close", "(", ")", "table", "=", "csv", ".", "read_csv", "(", "BytesIO", "(", "to_read", ")", ",", "parse_options", "=", "csv", ".", "ParseOptions", "(", "header_rows", "=", "1", ")", ")", "chunksize", "=", "get_default_chunksize", "(", "table", ".", "num_columns", ",", "num_splits", ")", "chunks", "=", "[", "pa", ".", "Table", ".", "from_arrays", "(", "table", ".", "columns", "[", "chunksize", "*", "i", ":", "chunksize", "*", "(", "i", "+", "1", ")", "]", ")", "for", "i", "in", "range", "(", "num_splits", ")", "]", "return", "chunks", "+", "[", "table", ".", "num_rows", "]" ]
Use a Ray task to read a chunk of a CSV into a pyarrow Table. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the pyarrow `read_csv` function. header: The header of the file. Returns: A list containing the split pyarrow Tables and the the number of rows of the tables as the last element. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "a", "chunk", "of", "a", "CSV", "into", "a", "pyarrow", "Table", ".", "Note", ":", "Ray", "functions", "are", "not", "detected", "by", "codecov", "(", "thus", "pragma", ":", "no", "cover", ")", "Args", ":", "fname", ":", "The", "filename", "of", "the", "file", "to", "open", ".", "num_splits", ":", "The", "number", "of", "splits", "(", "partitions", ")", "to", "separate", "the", "DataFrame", "into", ".", "start", ":", "The", "start", "byte", "offset", ".", "end", ":", "The", "end", "byte", "offset", ".", "kwargs", ":", "The", "kwargs", "for", "the", "pyarrow", "read_csv", "function", ".", "header", ":", "The", "header", "of", "the", "file", ".", "Returns", ":", "A", "list", "containing", "the", "split", "pyarrow", "Tables", "and", "the", "the", "number", "of", "rows", "of", "the", "tables", "as", "the", "last", "element", ".", "This", "is", "used", "to", "determine", "the", "total", "length", "of", "the", "DataFrame", "to", "build", "a", "default", "Index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/io.py#L23-L54
train
modin-project/modin
modin/data_management/utils.py
compute_chunksize
def compute_chunksize(df, num_splits, default_block_size=32, axis=None): """Computes the number of rows and/or columns to include in each partition. Args: df: The DataFrame to split. num_splits: The maximum number of splits to separate the DataFrame into. default_block_size: Minimum number of rows/columns (default set to 32x32). axis: The axis to split. (0: Index, 1: Columns, None: Both) Returns: If axis is 1 or 0, returns an integer number of rows/columns to split the DataFrame. If axis is None, return a tuple containing both. """ if axis == 0 or axis is None: row_chunksize = get_default_chunksize(len(df.index), num_splits) # Take the min of the default and the memory-usage chunksize first to avoid a # large amount of small partitions. row_chunksize = max(1, row_chunksize, default_block_size) if axis == 0: return row_chunksize # We always execute this because we can only get here if axis is 1 or None. col_chunksize = get_default_chunksize(len(df.columns), num_splits) # Take the min of the default and the memory-usage chunksize first to avoid a # large amount of small partitions. col_chunksize = max(1, col_chunksize, default_block_size) if axis == 1: return col_chunksize return row_chunksize, col_chunksize
python
def compute_chunksize(df, num_splits, default_block_size=32, axis=None): """Computes the number of rows and/or columns to include in each partition. Args: df: The DataFrame to split. num_splits: The maximum number of splits to separate the DataFrame into. default_block_size: Minimum number of rows/columns (default set to 32x32). axis: The axis to split. (0: Index, 1: Columns, None: Both) Returns: If axis is 1 or 0, returns an integer number of rows/columns to split the DataFrame. If axis is None, return a tuple containing both. """ if axis == 0 or axis is None: row_chunksize = get_default_chunksize(len(df.index), num_splits) # Take the min of the default and the memory-usage chunksize first to avoid a # large amount of small partitions. row_chunksize = max(1, row_chunksize, default_block_size) if axis == 0: return row_chunksize # We always execute this because we can only get here if axis is 1 or None. col_chunksize = get_default_chunksize(len(df.columns), num_splits) # Take the min of the default and the memory-usage chunksize first to avoid a # large amount of small partitions. col_chunksize = max(1, col_chunksize, default_block_size) if axis == 1: return col_chunksize return row_chunksize, col_chunksize
[ "def", "compute_chunksize", "(", "df", ",", "num_splits", ",", "default_block_size", "=", "32", ",", "axis", "=", "None", ")", ":", "if", "axis", "==", "0", "or", "axis", "is", "None", ":", "row_chunksize", "=", "get_default_chunksize", "(", "len", "(", "df", ".", "index", ")", ",", "num_splits", ")", "# Take the min of the default and the memory-usage chunksize first to avoid a", "# large amount of small partitions.", "row_chunksize", "=", "max", "(", "1", ",", "row_chunksize", ",", "default_block_size", ")", "if", "axis", "==", "0", ":", "return", "row_chunksize", "# We always execute this because we can only get here if axis is 1 or None.", "col_chunksize", "=", "get_default_chunksize", "(", "len", "(", "df", ".", "columns", ")", ",", "num_splits", ")", "# Take the min of the default and the memory-usage chunksize first to avoid a", "# large amount of small partitions.", "col_chunksize", "=", "max", "(", "1", ",", "col_chunksize", ",", "default_block_size", ")", "if", "axis", "==", "1", ":", "return", "col_chunksize", "return", "row_chunksize", ",", "col_chunksize" ]
Computes the number of rows and/or columns to include in each partition. Args: df: The DataFrame to split. num_splits: The maximum number of splits to separate the DataFrame into. default_block_size: Minimum number of rows/columns (default set to 32x32). axis: The axis to split. (0: Index, 1: Columns, None: Both) Returns: If axis is 1 or 0, returns an integer number of rows/columns to split the DataFrame. If axis is None, return a tuple containing both.
[ "Computes", "the", "number", "of", "rows", "and", "/", "or", "columns", "to", "include", "in", "each", "partition", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/data_management/utils.py#L24-L52
train
modin-project/modin
modin/data_management/utils.py
_get_nan_block_id
def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): """A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block. """ global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) # TODO Not use pandas.DataFrame here, but something more general. _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
python
def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): """A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block. """ global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) # TODO Not use pandas.DataFrame here, but something more general. _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
[ "def", "_get_nan_block_id", "(", "partition_class", ",", "n_row", "=", "1", ",", "n_col", "=", "1", ",", "transpose", "=", "False", ")", ":", "global", "_NAN_BLOCKS", "if", "transpose", ":", "n_row", ",", "n_col", "=", "n_col", ",", "n_row", "shape", "=", "(", "n_row", ",", "n_col", ")", "if", "shape", "not", "in", "_NAN_BLOCKS", ":", "arr", "=", "np", ".", "tile", "(", "np", ".", "array", "(", "np", ".", "NaN", ")", ",", "shape", ")", "# TODO Not use pandas.DataFrame here, but something more general.", "_NAN_BLOCKS", "[", "shape", "]", "=", "partition_class", ".", "put", "(", "pandas", ".", "DataFrame", "(", "data", "=", "arr", ")", ")", "return", "_NAN_BLOCKS", "[", "shape", "]" ]
A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block.
[ "A", "memory", "efficient", "way", "to", "get", "a", "block", "of", "NaNs", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/data_management/utils.py#L55-L75
train
modin-project/modin
modin/data_management/utils.py
split_result_of_axis_func_pandas
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """ if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
python
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """ if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
[ "def", "split_result_of_axis_func_pandas", "(", "axis", ",", "num_splits", ",", "result", ",", "length_list", "=", "None", ")", ":", "if", "num_splits", "==", "1", ":", "return", "result", "if", "length_list", "is", "not", "None", ":", "length_list", ".", "insert", "(", "0", ",", "0", ")", "sums", "=", "np", ".", "cumsum", "(", "length_list", ")", "if", "axis", "==", "0", ":", "return", "[", "result", ".", "iloc", "[", "sums", "[", "i", "]", ":", "sums", "[", "i", "+", "1", "]", "]", "for", "i", "in", "range", "(", "len", "(", "sums", ")", "-", "1", ")", "]", "else", ":", "return", "[", "result", ".", "iloc", "[", ":", ",", "sums", "[", "i", "]", ":", "sums", "[", "i", "+", "1", "]", "]", "for", "i", "in", "range", "(", "len", "(", "sums", ")", "-", "1", ")", "]", "# We do this to restore block partitioning", "chunksize", "=", "compute_chunksize", "(", "result", ",", "num_splits", ",", "axis", "=", "axis", ")", "if", "axis", "==", "0", ":", "return", "[", "result", ".", "iloc", "[", "chunksize", "*", "i", ":", "chunksize", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "num_splits", ")", "]", "else", ":", "return", "[", "result", ".", "iloc", "[", ":", ",", "chunksize", "*", "i", ":", "chunksize", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "num_splits", ")", "]" ]
Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames.
[ "Split", "the", "Pandas", "result", "evenly", "based", "on", "the", "provided", "number", "of", "splits", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/data_management/utils.py#L78-L111
train
modin-project/modin
modin/pandas/indexing.py
_parse_tuple
def _parse_tuple(tup): """Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D """ row_loc, col_loc = slice(None), slice(None) if is_tuple(tup): row_loc = tup[0] if len(tup) == 2: col_loc = tup[1] if len(tup) > 2: raise IndexingError("Too many indexers") else: row_loc = tup ndim = _compute_ndim(row_loc, col_loc) row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) row_loc = [row_loc] if row_scaler else row_loc col_loc = [col_loc] if col_scaler else col_loc return row_loc, col_loc, ndim, row_scaler, col_scaler
python
def _parse_tuple(tup): """Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D """ row_loc, col_loc = slice(None), slice(None) if is_tuple(tup): row_loc = tup[0] if len(tup) == 2: col_loc = tup[1] if len(tup) > 2: raise IndexingError("Too many indexers") else: row_loc = tup ndim = _compute_ndim(row_loc, col_loc) row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) row_loc = [row_loc] if row_scaler else row_loc col_loc = [col_loc] if col_scaler else col_loc return row_loc, col_loc, ndim, row_scaler, col_scaler
[ "def", "_parse_tuple", "(", "tup", ")", ":", "row_loc", ",", "col_loc", "=", "slice", "(", "None", ")", ",", "slice", "(", "None", ")", "if", "is_tuple", "(", "tup", ")", ":", "row_loc", "=", "tup", "[", "0", "]", "if", "len", "(", "tup", ")", "==", "2", ":", "col_loc", "=", "tup", "[", "1", "]", "if", "len", "(", "tup", ")", ">", "2", ":", "raise", "IndexingError", "(", "\"Too many indexers\"", ")", "else", ":", "row_loc", "=", "tup", "ndim", "=", "_compute_ndim", "(", "row_loc", ",", "col_loc", ")", "row_scaler", "=", "is_scalar", "(", "row_loc", ")", "col_scaler", "=", "is_scalar", "(", "col_loc", ")", "row_loc", "=", "[", "row_loc", "]", "if", "row_scaler", "else", "row_loc", "col_loc", "=", "[", "col_loc", "]", "if", "col_scaler", "else", "col_loc", "return", "row_loc", ",", "col_loc", ",", "ndim", ",", "row_scaler", ",", "col_scaler" ]
Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D
[ "Unpack", "the", "user", "input", "for", "getitem", "and", "setitem", "and", "compute", "ndim" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L77-L101
train
modin-project/modin
modin/pandas/indexing.py
_is_enlargement
def _is_enlargement(locator, global_index): """Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs ! """ if ( is_list_like(locator) and not is_slice(locator) and len(locator) > 0 and not is_boolean_array(locator) and (isinstance(locator, type(global_index[0])) and locator not in global_index) ): n_diff_elems = len(pandas.Index(locator).difference(global_index)) is_enlargement_boolean = n_diff_elems > 0 return is_enlargement_boolean return False
python
def _is_enlargement(locator, global_index): """Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs ! """ if ( is_list_like(locator) and not is_slice(locator) and len(locator) > 0 and not is_boolean_array(locator) and (isinstance(locator, type(global_index[0])) and locator not in global_index) ): n_diff_elems = len(pandas.Index(locator).difference(global_index)) is_enlargement_boolean = n_diff_elems > 0 return is_enlargement_boolean return False
[ "def", "_is_enlargement", "(", "locator", ",", "global_index", ")", ":", "if", "(", "is_list_like", "(", "locator", ")", "and", "not", "is_slice", "(", "locator", ")", "and", "len", "(", "locator", ")", ">", "0", "and", "not", "is_boolean_array", "(", "locator", ")", "and", "(", "isinstance", "(", "locator", ",", "type", "(", "global_index", "[", "0", "]", ")", ")", "and", "locator", "not", "in", "global_index", ")", ")", ":", "n_diff_elems", "=", "len", "(", "pandas", ".", "Index", "(", "locator", ")", ".", "difference", "(", "global_index", ")", ")", "is_enlargement_boolean", "=", "n_diff_elems", ">", "0", "return", "is_enlargement_boolean", "return", "False" ]
Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs !
[ "Determine", "if", "a", "locator", "will", "enlarge", "the", "global", "index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L104-L120
train
modin-project/modin
modin/pandas/indexing.py
_compute_ndim
def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """ row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
python
def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """ row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
[ "def", "_compute_ndim", "(", "row_loc", ",", "col_loc", ")", ":", "row_scaler", "=", "is_scalar", "(", "row_loc", ")", "col_scaler", "=", "is_scalar", "(", "col_loc", ")", "if", "row_scaler", "and", "col_scaler", ":", "ndim", "=", "0", "elif", "row_scaler", "^", "col_scaler", ":", "ndim", "=", "1", "else", ":", "ndim", "=", "2", "return", "ndim" ]
Compute the ndim of result from locators
[ "Compute", "the", "ndim", "of", "result", "from", "locators" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L127-L140
train
modin-project/modin
modin/pandas/indexing.py
_LocationIndexerBase._broadcast_item
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): """Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue. """ # It is valid to pass a DataFrame or Series to __setitem__ that is larger than # the target the user is trying to overwrite. This if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
python
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): """Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue. """ # It is valid to pass a DataFrame or Series to __setitem__ that is larger than # the target the user is trying to overwrite. This if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
[ "def", "_broadcast_item", "(", "self", ",", "row_lookup", ",", "col_lookup", ",", "item", ",", "to_shape", ")", ":", "# It is valid to pass a DataFrame or Series to __setitem__ that is larger than", "# the target the user is trying to overwrite. This", "if", "isinstance", "(", "item", ",", "(", "pandas", ".", "Series", ",", "pandas", ".", "DataFrame", ",", "DataFrame", ")", ")", ":", "if", "not", "all", "(", "idx", "in", "item", ".", "index", "for", "idx", "in", "row_lookup", ")", ":", "raise", "ValueError", "(", "\"Must have equal len keys and value when setting with \"", "\"an iterable\"", ")", "if", "hasattr", "(", "item", ",", "\"columns\"", ")", ":", "if", "not", "all", "(", "idx", "in", "item", ".", "columns", "for", "idx", "in", "col_lookup", ")", ":", "raise", "ValueError", "(", "\"Must have equal len keys and value when setting \"", "\"with an iterable\"", ")", "item", "=", "item", ".", "reindex", "(", "index", "=", "row_lookup", ",", "columns", "=", "col_lookup", ")", "else", ":", "item", "=", "item", ".", "reindex", "(", "index", "=", "row_lookup", ")", "try", ":", "item", "=", "np", ".", "array", "(", "item", ")", "if", "np", ".", "prod", "(", "to_shape", ")", "==", "np", ".", "prod", "(", "item", ".", "shape", ")", ":", "return", "item", ".", "reshape", "(", "to_shape", ")", "else", ":", "return", "np", ".", "broadcast_to", "(", "item", ",", "to_shape", ")", "except", "ValueError", ":", "from_shape", "=", "np", ".", "array", "(", "item", ")", ".", "shape", "raise", "ValueError", "(", "\"could not broadcast input array from shape {from_shape} into shape \"", "\"{to_shape}\"", ".", "format", "(", "from_shape", "=", "from_shape", ",", "to_shape", "=", "to_shape", ")", ")" ]
Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue.
[ "Use", "numpy", "to", "broadcast", "or", "reshape", "item", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L187-L221
train
modin-project/modin
modin/pandas/indexing.py
_LocationIndexerBase._write_items
def _write_items(self, row_lookup, col_lookup, item): """Perform remote write and replace blocks. """ self.qc.write_items(row_lookup, col_lookup, item)
python
def _write_items(self, row_lookup, col_lookup, item): """Perform remote write and replace blocks. """ self.qc.write_items(row_lookup, col_lookup, item)
[ "def", "_write_items", "(", "self", ",", "row_lookup", ",", "col_lookup", ",", "item", ")", ":", "self", ".", "qc", ".", "write_items", "(", "row_lookup", ",", "col_lookup", ",", "item", ")" ]
Perform remote write and replace blocks.
[ "Perform", "remote", "write", "and", "replace", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L223-L226
train
modin-project/modin
modin/pandas/indexing.py
_LocIndexer._handle_enlargement
def _handle_enlargement(self, row_loc, col_loc): """Handle Enlargement (if there is one). Returns: None """ if _is_enlargement(row_loc, self.qc.index) or _is_enlargement( col_loc, self.qc.columns ): _warn_enlargement() self.qc.enlarge_partitions( new_row_labels=self._compute_enlarge_labels(row_loc, self.qc.index), new_col_labels=self._compute_enlarge_labels(col_loc, self.qc.columns), )
python
def _handle_enlargement(self, row_loc, col_loc): """Handle Enlargement (if there is one). Returns: None """ if _is_enlargement(row_loc, self.qc.index) or _is_enlargement( col_loc, self.qc.columns ): _warn_enlargement() self.qc.enlarge_partitions( new_row_labels=self._compute_enlarge_labels(row_loc, self.qc.index), new_col_labels=self._compute_enlarge_labels(col_loc, self.qc.columns), )
[ "def", "_handle_enlargement", "(", "self", ",", "row_loc", ",", "col_loc", ")", ":", "if", "_is_enlargement", "(", "row_loc", ",", "self", ".", "qc", ".", "index", ")", "or", "_is_enlargement", "(", "col_loc", ",", "self", ".", "qc", ".", "columns", ")", ":", "_warn_enlargement", "(", ")", "self", ".", "qc", ".", "enlarge_partitions", "(", "new_row_labels", "=", "self", ".", "_compute_enlarge_labels", "(", "row_loc", ",", "self", ".", "qc", ".", "index", ")", ",", "new_col_labels", "=", "self", ".", "_compute_enlarge_labels", "(", "col_loc", ",", "self", ".", "qc", ".", "columns", ")", ",", ")" ]
Handle Enlargement (if there is one). Returns: None
[ "Handle", "Enlargement", "(", "if", "there", "is", "one", ")", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L279-L292
train
modin-project/modin
modin/pandas/indexing.py
_LocIndexer._compute_enlarge_labels
def _compute_enlarge_labels(self, locator, base_index): """Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added """ # base_index_type can be pd.Index or pd.DatetimeIndex # depending on user input and pandas behavior # See issue #2264 base_index_type = type(base_index) locator_as_index = base_index_type(locator) nan_labels = locator_as_index.difference(base_index) common_labels = locator_as_index.intersection(base_index) if len(common_labels) == 0: raise KeyError( "None of [{labels}] are in the [{base_index_name}]".format( labels=list(locator_as_index), base_index_name=base_index ) ) return nan_labels
python
def _compute_enlarge_labels(self, locator, base_index): """Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added """ # base_index_type can be pd.Index or pd.DatetimeIndex # depending on user input and pandas behavior # See issue #2264 base_index_type = type(base_index) locator_as_index = base_index_type(locator) nan_labels = locator_as_index.difference(base_index) common_labels = locator_as_index.intersection(base_index) if len(common_labels) == 0: raise KeyError( "None of [{labels}] are in the [{base_index_name}]".format( labels=list(locator_as_index), base_index_name=base_index ) ) return nan_labels
[ "def", "_compute_enlarge_labels", "(", "self", ",", "locator", ",", "base_index", ")", ":", "# base_index_type can be pd.Index or pd.DatetimeIndex", "# depending on user input and pandas behavior", "# See issue #2264", "base_index_type", "=", "type", "(", "base_index", ")", "locator_as_index", "=", "base_index_type", "(", "locator", ")", "nan_labels", "=", "locator_as_index", ".", "difference", "(", "base_index", ")", "common_labels", "=", "locator_as_index", ".", "intersection", "(", "base_index", ")", "if", "len", "(", "common_labels", ")", "==", "0", ":", "raise", "KeyError", "(", "\"None of [{labels}] are in the [{base_index_name}]\"", ".", "format", "(", "labels", "=", "list", "(", "locator_as_index", ")", ",", "base_index_name", "=", "base_index", ")", ")", "return", "nan_labels" ]
Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added
[ "Helper", "for", "_enlarge_axis", "compute", "common", "labels", "and", "extra", "labels", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/indexing.py#L294-L315
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_split_result_for_readers
def _split_result_for_readers(axis, num_splits, df): # pragma: no cover """Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames. """ splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
python
def _split_result_for_readers(axis, num_splits, df): # pragma: no cover """Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames. """ splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
[ "def", "_split_result_for_readers", "(", "axis", ",", "num_splits", ",", "df", ")", ":", "# pragma: no cover", "splits", "=", "split_result_of_axis_func_pandas", "(", "axis", ",", "num_splits", ",", "df", ")", "if", "not", "isinstance", "(", "splits", ",", "list", ")", ":", "splits", "=", "[", "splits", "]", "return", "splits" ]
Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames.
[ "Splits", "the", "DataFrame", "read", "into", "smaller", "DataFrames", "and", "handles", "all", "edge", "cases", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L18-L32
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_parquet_columns
def _read_parquet_columns(path, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_parquet_columns(path, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_parquet_columns", "(", "path", ",", "columns", ",", "num_splits", ",", "kwargs", ")", ":", "# pragma: no cover", "import", "pyarrow", ".", "parquet", "as", "pq", "df", "=", "pq", ".", "read_pandas", "(", "path", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", ".", "to_pandas", "(", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "Parquet", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L36-L56
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_csv_with_offset_pandas_on_ray
def _read_csv_with_offset_pandas_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ index_col = kwargs.get("index_col", None) bio = file_open(fname, "rb") bio.seek(start) to_read = header + bio.read(end - start) bio.close() pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs) pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns)) if index_col is not None: index = pandas_df.index # Partitions must have RangeIndex pandas_df.index = pandas.RangeIndex(0, len(pandas_df)) else: # We will use the lengths to build the index if we are not given an # `index_col`. index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
python
def _read_csv_with_offset_pandas_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ index_col = kwargs.get("index_col", None) bio = file_open(fname, "rb") bio.seek(start) to_read = header + bio.read(end - start) bio.close() pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs) pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns)) if index_col is not None: index = pandas_df.index # Partitions must have RangeIndex pandas_df.index = pandas.RangeIndex(0, len(pandas_df)) else: # We will use the lengths to build the index if we are not given an # `index_col`. index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
[ "def", "_read_csv_with_offset_pandas_on_ray", "(", "fname", ",", "num_splits", ",", "start", ",", "end", ",", "kwargs", ",", "header", ")", ":", "# pragma: no cover", "index_col", "=", "kwargs", ".", "get", "(", "\"index_col\"", ",", "None", ")", "bio", "=", "file_open", "(", "fname", ",", "\"rb\"", ")", "bio", ".", "seek", "(", "start", ")", "to_read", "=", "header", "+", "bio", ".", "read", "(", "end", "-", "start", ")", "bio", ".", "close", "(", ")", "pandas_df", "=", "pandas", ".", "read_csv", "(", "BytesIO", "(", "to_read", ")", ",", "*", "*", "kwargs", ")", "pandas_df", ".", "columns", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "pandas_df", ".", "columns", ")", ")", "if", "index_col", "is", "not", "None", ":", "index", "=", "pandas_df", ".", "index", "# Partitions must have RangeIndex", "pandas_df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "0", ",", "len", "(", "pandas_df", ")", ")", "else", ":", "# We will use the lengths to build the index if we are not given an", "# `index_col`.", "index", "=", "len", "(", "pandas_df", ")", "return", "_split_result_for_readers", "(", "1", ",", "num_splits", ",", "pandas_df", ")", "+", "[", "index", "]" ]
Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "a", "chunk", "of", "a", "CSV", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L60-L96
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_hdf_columns
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_hdf_columns", "(", "path_or_buf", ",", "columns", ",", "num_splits", ",", "kwargs", ")", ":", "# pragma: no cover", "df", "=", "pandas", ".", "read_hdf", "(", "path_or_buf", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "HDF5", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L100-L119
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_feather_columns
def _read_feather_columns(path, columns, num_splits): # pragma: no cover """Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ from pyarrow import feather df = feather.read_feather(path, columns=columns) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
python
def _read_feather_columns(path, columns, num_splits): # pragma: no cover """Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ from pyarrow import feather df = feather.read_feather(path, columns=columns) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_feather_columns", "(", "path", ",", "columns", ",", "num_splits", ")", ":", "# pragma: no cover", "from", "pyarrow", "import", "feather", "df", "=", "feather", ".", "read_feather", "(", "path", ",", "columns", "=", "columns", ")", "# Append the length of the index here to build it externally", "return", "_split_result_for_readers", "(", "0", ",", "num_splits", ",", "df", ")", "+", "[", "len", "(", "df", ".", "index", ")", "]" ]
Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "Feather", "into", "a", "Pandas", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L123-L143
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
_read_sql_with_limit_offset
def _read_sql_with_limit_offset( num_splits, sql, con, index_col, kwargs ): # pragma: no cover """Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover) """ pandas_df = pandas.read_sql(sql, con, index_col=index_col, **kwargs) if index_col is None: index = len(pandas_df) else: index = pandas_df.index return _split_result_for_readers(1, num_splits, pandas_df) + [index]
python
def _read_sql_with_limit_offset( num_splits, sql, con, index_col, kwargs ): # pragma: no cover """Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover) """ pandas_df = pandas.read_sql(sql, con, index_col=index_col, **kwargs) if index_col is None: index = len(pandas_df) else: index = pandas_df.index return _split_result_for_readers(1, num_splits, pandas_df) + [index]
[ "def", "_read_sql_with_limit_offset", "(", "num_splits", ",", "sql", ",", "con", ",", "index_col", ",", "kwargs", ")", ":", "# pragma: no cover", "pandas_df", "=", "pandas", ".", "read_sql", "(", "sql", ",", "con", ",", "index_col", "=", "index_col", ",", "*", "*", "kwargs", ")", "if", "index_col", "is", "None", ":", "index", "=", "len", "(", "pandas_df", ")", "else", ":", "index", "=", "pandas_df", ".", "index", "return", "_split_result_for_readers", "(", "1", ",", "num_splits", ",", "pandas_df", ")", "+", "[", "index", "]" ]
Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover)
[ "Use", "a", "Ray", "task", "to", "read", "a", "chunk", "of", "SQL", "source", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L147-L159
train
modin-project/modin
modin/engines/ray/generic/io.py
get_index
def get_index(index_name, *partition_indices): # pragma: no cover """Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)""" index = partition_indices[0].append(partition_indices[1:]) index.names = index_name return index
python
def get_index(index_name, *partition_indices): # pragma: no cover """Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)""" index = partition_indices[0].append(partition_indices[1:]) index.names = index_name return index
[ "def", "get_index", "(", "index_name", ",", "*", "partition_indices", ")", ":", "# pragma: no cover", "index", "=", "partition_indices", "[", "0", "]", ".", "append", "(", "partition_indices", "[", "1", ":", "]", ")", "index", ".", "names", "=", "index_name", "return", "index" ]
Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)
[ "Get", "the", "index", "from", "the", "indices", "returned", "by", "the", "workers", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L64-L70
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_parquet
def read_parquet(cls, path, engine, columns, **kwargs): """Load a parquet object from the file path, returning a DataFrame. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the parquet file. We only support local files for now. engine: Ray only support pyarrow reader. This argument doesn't do anything for now. kwargs: Pass into parquet's read_pandas function. Notes: ParquetFile API is used. Please refer to the documentation here https://arrow.apache.org/docs/python/parquet.html """ from pyarrow.parquet import ParquetFile if cls.read_parquet_remote_task is None: return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs) if not columns: pf = ParquetFile(path) columns = [ name for name in pf.metadata.schema.names if not PQ_INDEX_REGEX.match(name) ] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] # Each item in this list will be a list of columns of original df # partitioned to smaller pieces along rows. # We need to transpose the oids array to fit our schema. blk_partitions = np.array( [ cls.read_parquet_remote_task._remote( args=(path, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
python
def read_parquet(cls, path, engine, columns, **kwargs): """Load a parquet object from the file path, returning a DataFrame. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the parquet file. We only support local files for now. engine: Ray only support pyarrow reader. This argument doesn't do anything for now. kwargs: Pass into parquet's read_pandas function. Notes: ParquetFile API is used. Please refer to the documentation here https://arrow.apache.org/docs/python/parquet.html """ from pyarrow.parquet import ParquetFile if cls.read_parquet_remote_task is None: return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs) if not columns: pf = ParquetFile(path) columns = [ name for name in pf.metadata.schema.names if not PQ_INDEX_REGEX.match(name) ] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] # Each item in this list will be a list of columns of original df # partitioned to smaller pieces along rows. # We need to transpose the oids array to fit our schema. blk_partitions = np.array( [ cls.read_parquet_remote_task._remote( args=(path, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_parquet", "(", "cls", ",", "path", ",", "engine", ",", "columns", ",", "*", "*", "kwargs", ")", ":", "from", "pyarrow", ".", "parquet", "import", "ParquetFile", "if", "cls", ".", "read_parquet_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_parquet", "(", "path", ",", "engine", ",", "columns", ",", "*", "*", "kwargs", ")", "if", "not", "columns", ":", "pf", "=", "ParquetFile", "(", "path", ")", "columns", "=", "[", "name", "for", "name", "in", "pf", ".", "metadata", ".", "schema", ".", "names", "if", "not", "PQ_INDEX_REGEX", ".", "match", "(", "name", ")", "]", "num_partitions", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "num_splits", "=", "min", "(", "len", "(", "columns", ")", ",", "num_partitions", ")", "# Each item in this list will be a list of column names of the original df", "column_splits", "=", "(", "len", "(", "columns", ")", "//", "num_partitions", "if", "len", "(", "columns", ")", "%", "num_partitions", "==", "0", "else", "len", "(", "columns", ")", "//", "num_partitions", "+", "1", ")", "col_partitions", "=", "[", "columns", "[", "i", ":", "i", "+", "column_splits", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "columns", ")", ",", "column_splits", ")", "]", "# Each item in this list will be a list of columns of original df", "# partitioned to smaller pieces along rows.", "# We need to transpose the oids array to fit our schema.", "blk_partitions", "=", "np", ".", "array", "(", "[", "cls", ".", "read_parquet_remote_task", ".", "_remote", "(", "args", "=", "(", "path", ",", "cols", ",", "num_splits", ",", "kwargs", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ",", ")", "for", "cols", "in", "col_partitions", "]", ")", ".", "T", "remote_partitions", "=", "np", ".", "array", "(", "[", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "row", "]", "for", "row", "in", "blk_partitions", "[", ":", "-", "1", "]", "]", ")", "index_len", "=", "ray", ".", "get", "(", "blk_partitions", "[", "-", "1", "]", "[", "0", "]", ")", "index", "=", "pandas", ".", "RangeIndex", "(", "index_len", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "remote_partitions", ")", ",", "index", ",", "columns", ")", "return", "new_query_compiler" ]
Load a parquet object from the file path, returning a DataFrame. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the parquet file. We only support local files for now. engine: Ray only support pyarrow reader. This argument doesn't do anything for now. kwargs: Pass into parquet's read_pandas function. Notes: ParquetFile API is used. Please refer to the documentation here https://arrow.apache.org/docs/python/parquet.html
[ "Load", "a", "parquet", "object", "from", "the", "file", "path", "returning", "a", "DataFrame", ".", "Ray", "DataFrame", "only", "supports", "pyarrow", "engine", "for", "now", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L129-L193
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO._read_csv_from_file_pandas_on_ray
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}): """Constructs a DataFrame from a CSV file. Args: filepath (str): path to the CSV file. npartitions (int): number of partitions for the DataFrame. kwargs (dict): args excluding filepath provided to read_csv. Returns: DataFrame or Series constructed from CSV file. """ names = kwargs.get("names", None) index_col = kwargs.get("index_col", None) if names is None: # For the sake of the empty df, we assume no `index_col` to get the correct # column names before we build the index. Because we pass `names` in, this # step has to happen without removing the `index_col` otherwise it will not # be assigned correctly kwargs["index_col"] = None names = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ).columns kwargs["index_col"] = index_col empty_pd_df = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ) column_names = empty_pd_df.columns skipfooter = kwargs.get("skipfooter", None) skiprows = kwargs.pop("skiprows", None) parse_dates = kwargs.pop("parse_dates", False) partition_kwargs = dict( kwargs, header=None, names=names, skipfooter=0, skiprows=None, parse_dates=parse_dates, ) with file_open(filepath, "rb") as f: # Get the BOM if necessary prefix = b"" if kwargs.get("encoding", None) is not None: prefix = f.readline() partition_kwargs["skiprows"] = 1 f.seek(0, os.SEEK_SET) # Return to beginning of file prefix_id = ray.put(prefix) partition_kwargs_id = ray.put(partition_kwargs) # Skip the header since we already have the header information and skip the # rows we are told to skip. kwargs["skiprows"] = skiprows cls._skip_header(f, kwargs) # Launch tasks to read partitions partition_ids = [] index_ids = [] total_bytes = file_size(f) # Max number of partitions available num_parts = cls.frame_mgr_cls._compute_num_partitions() # This is the number of splits for the columns num_splits = min(len(column_names), num_parts) # This is the chunksize each partition will read chunk_size = max(1, (total_bytes - f.tell()) // num_parts) while f.tell() < total_bytes: start = f.tell() f.seek(chunk_size, os.SEEK_CUR) f.readline() # Read a whole number of lines partition_id = cls.read_csv_remote_task._remote( args=( filepath, num_splits, start, f.tell(), partition_kwargs_id, prefix_id, ), num_return_vals=num_splits + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: new_index = pandas.RangeIndex(sum(ray.get(index_ids))) else: new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids) new_index = ray.get(new_index_ids) # If parse_dates is present, the column names that we have might not be # the same length as the returned column names. If we do need to modify # the column names, we remove the old names from the column names and # insert the new one at the front of the Index. if parse_dates is not None: # Check if is list of lists if isinstance(parse_dates, list) and isinstance(parse_dates[0], list): for group in parse_dates: new_col_name = "_".join(group) column_names = column_names.drop(group).insert(0, new_col_name) # Check if it is a dictionary elif isinstance(parse_dates, dict): for new_col_name, group in parse_dates.items(): column_names = column_names.drop(group).insert(0, new_col_name) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, column_names ) if skipfooter: new_query_compiler = new_query_compiler.drop( new_query_compiler.index[-skipfooter:] ) if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1: return new_query_compiler[new_query_compiler.columns[0]] return new_query_compiler
python
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}): """Constructs a DataFrame from a CSV file. Args: filepath (str): path to the CSV file. npartitions (int): number of partitions for the DataFrame. kwargs (dict): args excluding filepath provided to read_csv. Returns: DataFrame or Series constructed from CSV file. """ names = kwargs.get("names", None) index_col = kwargs.get("index_col", None) if names is None: # For the sake of the empty df, we assume no `index_col` to get the correct # column names before we build the index. Because we pass `names` in, this # step has to happen without removing the `index_col` otherwise it will not # be assigned correctly kwargs["index_col"] = None names = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ).columns kwargs["index_col"] = index_col empty_pd_df = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ) column_names = empty_pd_df.columns skipfooter = kwargs.get("skipfooter", None) skiprows = kwargs.pop("skiprows", None) parse_dates = kwargs.pop("parse_dates", False) partition_kwargs = dict( kwargs, header=None, names=names, skipfooter=0, skiprows=None, parse_dates=parse_dates, ) with file_open(filepath, "rb") as f: # Get the BOM if necessary prefix = b"" if kwargs.get("encoding", None) is not None: prefix = f.readline() partition_kwargs["skiprows"] = 1 f.seek(0, os.SEEK_SET) # Return to beginning of file prefix_id = ray.put(prefix) partition_kwargs_id = ray.put(partition_kwargs) # Skip the header since we already have the header information and skip the # rows we are told to skip. kwargs["skiprows"] = skiprows cls._skip_header(f, kwargs) # Launch tasks to read partitions partition_ids = [] index_ids = [] total_bytes = file_size(f) # Max number of partitions available num_parts = cls.frame_mgr_cls._compute_num_partitions() # This is the number of splits for the columns num_splits = min(len(column_names), num_parts) # This is the chunksize each partition will read chunk_size = max(1, (total_bytes - f.tell()) // num_parts) while f.tell() < total_bytes: start = f.tell() f.seek(chunk_size, os.SEEK_CUR) f.readline() # Read a whole number of lines partition_id = cls.read_csv_remote_task._remote( args=( filepath, num_splits, start, f.tell(), partition_kwargs_id, prefix_id, ), num_return_vals=num_splits + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: new_index = pandas.RangeIndex(sum(ray.get(index_ids))) else: new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids) new_index = ray.get(new_index_ids) # If parse_dates is present, the column names that we have might not be # the same length as the returned column names. If we do need to modify # the column names, we remove the old names from the column names and # insert the new one at the front of the Index. if parse_dates is not None: # Check if is list of lists if isinstance(parse_dates, list) and isinstance(parse_dates[0], list): for group in parse_dates: new_col_name = "_".join(group) column_names = column_names.drop(group).insert(0, new_col_name) # Check if it is a dictionary elif isinstance(parse_dates, dict): for new_col_name, group in parse_dates.items(): column_names = column_names.drop(group).insert(0, new_col_name) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, column_names ) if skipfooter: new_query_compiler = new_query_compiler.drop( new_query_compiler.index[-skipfooter:] ) if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1: return new_query_compiler[new_query_compiler.columns[0]] return new_query_compiler
[ "def", "_read_csv_from_file_pandas_on_ray", "(", "cls", ",", "filepath", ",", "kwargs", "=", "{", "}", ")", ":", "names", "=", "kwargs", ".", "get", "(", "\"names\"", ",", "None", ")", "index_col", "=", "kwargs", ".", "get", "(", "\"index_col\"", ",", "None", ")", "if", "names", "is", "None", ":", "# For the sake of the empty df, we assume no `index_col` to get the correct", "# column names before we build the index. Because we pass `names` in, this", "# step has to happen without removing the `index_col` otherwise it will not", "# be assigned correctly", "kwargs", "[", "\"index_col\"", "]", "=", "None", "names", "=", "pandas", ".", "read_csv", "(", "file_open", "(", "filepath", ",", "\"rb\"", ")", ",", "*", "*", "dict", "(", "kwargs", ",", "nrows", "=", "0", ",", "skipfooter", "=", "0", ")", ")", ".", "columns", "kwargs", "[", "\"index_col\"", "]", "=", "index_col", "empty_pd_df", "=", "pandas", ".", "read_csv", "(", "file_open", "(", "filepath", ",", "\"rb\"", ")", ",", "*", "*", "dict", "(", "kwargs", ",", "nrows", "=", "0", ",", "skipfooter", "=", "0", ")", ")", "column_names", "=", "empty_pd_df", ".", "columns", "skipfooter", "=", "kwargs", ".", "get", "(", "\"skipfooter\"", ",", "None", ")", "skiprows", "=", "kwargs", ".", "pop", "(", "\"skiprows\"", ",", "None", ")", "parse_dates", "=", "kwargs", ".", "pop", "(", "\"parse_dates\"", ",", "False", ")", "partition_kwargs", "=", "dict", "(", "kwargs", ",", "header", "=", "None", ",", "names", "=", "names", ",", "skipfooter", "=", "0", ",", "skiprows", "=", "None", ",", "parse_dates", "=", "parse_dates", ",", ")", "with", "file_open", "(", "filepath", ",", "\"rb\"", ")", "as", "f", ":", "# Get the BOM if necessary", "prefix", "=", "b\"\"", "if", "kwargs", ".", "get", "(", "\"encoding\"", ",", "None", ")", "is", "not", "None", ":", "prefix", "=", "f", ".", "readline", "(", ")", "partition_kwargs", "[", "\"skiprows\"", "]", "=", "1", "f", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "# Return to beginning of file", "prefix_id", "=", "ray", ".", "put", "(", "prefix", ")", "partition_kwargs_id", "=", "ray", ".", "put", "(", "partition_kwargs", ")", "# Skip the header since we already have the header information and skip the", "# rows we are told to skip.", "kwargs", "[", "\"skiprows\"", "]", "=", "skiprows", "cls", ".", "_skip_header", "(", "f", ",", "kwargs", ")", "# Launch tasks to read partitions", "partition_ids", "=", "[", "]", "index_ids", "=", "[", "]", "total_bytes", "=", "file_size", "(", "f", ")", "# Max number of partitions available", "num_parts", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "# This is the number of splits for the columns", "num_splits", "=", "min", "(", "len", "(", "column_names", ")", ",", "num_parts", ")", "# This is the chunksize each partition will read", "chunk_size", "=", "max", "(", "1", ",", "(", "total_bytes", "-", "f", ".", "tell", "(", ")", ")", "//", "num_parts", ")", "while", "f", ".", "tell", "(", ")", "<", "total_bytes", ":", "start", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "chunk_size", ",", "os", ".", "SEEK_CUR", ")", "f", ".", "readline", "(", ")", "# Read a whole number of lines", "partition_id", "=", "cls", ".", "read_csv_remote_task", ".", "_remote", "(", "args", "=", "(", "filepath", ",", "num_splits", ",", "start", ",", "f", ".", "tell", "(", ")", ",", "partition_kwargs_id", ",", "prefix_id", ",", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ",", ")", "partition_ids", ".", "append", "(", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "partition_id", "[", ":", "-", "1", "]", "]", ")", "index_ids", ".", "append", "(", "partition_id", "[", "-", "1", "]", ")", "if", "index_col", "is", "None", ":", "new_index", "=", "pandas", ".", "RangeIndex", "(", "sum", "(", "ray", ".", "get", "(", "index_ids", ")", ")", ")", "else", ":", "new_index_ids", "=", "get_index", ".", "remote", "(", "[", "empty_pd_df", ".", "index", ".", "name", "]", ",", "*", "index_ids", ")", "new_index", "=", "ray", ".", "get", "(", "new_index_ids", ")", "# If parse_dates is present, the column names that we have might not be", "# the same length as the returned column names. If we do need to modify", "# the column names, we remove the old names from the column names and", "# insert the new one at the front of the Index.", "if", "parse_dates", "is", "not", "None", ":", "# Check if is list of lists", "if", "isinstance", "(", "parse_dates", ",", "list", ")", "and", "isinstance", "(", "parse_dates", "[", "0", "]", ",", "list", ")", ":", "for", "group", "in", "parse_dates", ":", "new_col_name", "=", "\"_\"", ".", "join", "(", "group", ")", "column_names", "=", "column_names", ".", "drop", "(", "group", ")", ".", "insert", "(", "0", ",", "new_col_name", ")", "# Check if it is a dictionary", "elif", "isinstance", "(", "parse_dates", ",", "dict", ")", ":", "for", "new_col_name", ",", "group", "in", "parse_dates", ".", "items", "(", ")", ":", "column_names", "=", "column_names", ".", "drop", "(", "group", ")", ".", "insert", "(", "0", ",", "new_col_name", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "np", ".", "array", "(", "partition_ids", ")", ")", ",", "new_index", ",", "column_names", ")", "if", "skipfooter", ":", "new_query_compiler", "=", "new_query_compiler", ".", "drop", "(", "new_query_compiler", ".", "index", "[", "-", "skipfooter", ":", "]", ")", "if", "kwargs", ".", "get", "(", "\"squeeze\"", ",", "False", ")", "and", "len", "(", "new_query_compiler", ".", "columns", ")", "==", "1", ":", "return", "new_query_compiler", "[", "new_query_compiler", ".", "columns", "[", "0", "]", "]", "return", "new_query_compiler" ]
Constructs a DataFrame from a CSV file. Args: filepath (str): path to the CSV file. npartitions (int): number of partitions for the DataFrame. kwargs (dict): args excluding filepath provided to read_csv. Returns: DataFrame or Series constructed from CSV file.
[ "Constructs", "a", "DataFrame", "from", "a", "CSV", "file", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L242-L357
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO._read
def _read(cls, filepath_or_buffer, **kwargs): """Read csv file from local disk. Args: filepath_or_buffer: The filepath of the csv file. We only support local files for now. kwargs: Keyword arguments in pandas.read_csv """ # The intention of the inspection code is to reduce the amount of # communication we have to do between processes and nodes. We take a quick # pass over the arguments and remove those that are default values so we # don't have to serialize and send them to the workers. Because the # arguments list is so long, this does end up saving time based on the # number of nodes in the cluster. try: args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv) defaults = dict(zip(args[2:], defaults)) filtered_kwargs = { kw: kwargs[kw] for kw in kwargs if kw in defaults and not isinstance(kwargs[kw], type(defaults[kw])) or kwargs[kw] != defaults[kw] } # This happens on Python2, we will just default to serializing the entire dictionary except AttributeError: filtered_kwargs = kwargs if isinstance(filepath_or_buffer, str): if not file_exists(filepath_or_buffer): ErrorMessage.default_to_pandas("File path could not be resolved") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) elif not isinstance(filepath_or_buffer, py.path.local): read_from_pandas = True # Pandas read_csv supports pathlib.Path try: import pathlib if isinstance(filepath_or_buffer, pathlib.Path): read_from_pandas = False except ImportError: # pragma: no cover pass if read_from_pandas: ErrorMessage.default_to_pandas("Reading from buffer.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) if ( _infer_compression(filepath_or_buffer, kwargs.get("compression")) is not None ): ErrorMessage.default_to_pandas("Compression detected.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) chunksize = kwargs.get("chunksize") if chunksize is not None: ErrorMessage.default_to_pandas("Reading chunks from a file.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) skiprows = kwargs.get("skiprows") if skiprows is not None and not isinstance(skiprows, int): ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) # TODO: replace this by reading lines from file. if kwargs.get("nrows") is not None: ErrorMessage.default_to_pandas("`read_csv` with `nrows`") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) else: return cls._read_csv_from_file_pandas_on_ray( filepath_or_buffer, filtered_kwargs )
python
def _read(cls, filepath_or_buffer, **kwargs): """Read csv file from local disk. Args: filepath_or_buffer: The filepath of the csv file. We only support local files for now. kwargs: Keyword arguments in pandas.read_csv """ # The intention of the inspection code is to reduce the amount of # communication we have to do between processes and nodes. We take a quick # pass over the arguments and remove those that are default values so we # don't have to serialize and send them to the workers. Because the # arguments list is so long, this does end up saving time based on the # number of nodes in the cluster. try: args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv) defaults = dict(zip(args[2:], defaults)) filtered_kwargs = { kw: kwargs[kw] for kw in kwargs if kw in defaults and not isinstance(kwargs[kw], type(defaults[kw])) or kwargs[kw] != defaults[kw] } # This happens on Python2, we will just default to serializing the entire dictionary except AttributeError: filtered_kwargs = kwargs if isinstance(filepath_or_buffer, str): if not file_exists(filepath_or_buffer): ErrorMessage.default_to_pandas("File path could not be resolved") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) elif not isinstance(filepath_or_buffer, py.path.local): read_from_pandas = True # Pandas read_csv supports pathlib.Path try: import pathlib if isinstance(filepath_or_buffer, pathlib.Path): read_from_pandas = False except ImportError: # pragma: no cover pass if read_from_pandas: ErrorMessage.default_to_pandas("Reading from buffer.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) if ( _infer_compression(filepath_or_buffer, kwargs.get("compression")) is not None ): ErrorMessage.default_to_pandas("Compression detected.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) chunksize = kwargs.get("chunksize") if chunksize is not None: ErrorMessage.default_to_pandas("Reading chunks from a file.") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) skiprows = kwargs.get("skiprows") if skiprows is not None and not isinstance(skiprows, int): ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.") return cls._read_csv_from_pandas(filepath_or_buffer, kwargs) # TODO: replace this by reading lines from file. if kwargs.get("nrows") is not None: ErrorMessage.default_to_pandas("`read_csv` with `nrows`") return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs) else: return cls._read_csv_from_file_pandas_on_ray( filepath_or_buffer, filtered_kwargs )
[ "def", "_read", "(", "cls", ",", "filepath_or_buffer", ",", "*", "*", "kwargs", ")", ":", "# The intention of the inspection code is to reduce the amount of", "# communication we have to do between processes and nodes. We take a quick", "# pass over the arguments and remove those that are default values so we", "# don't have to serialize and send them to the workers. Because the", "# arguments list is so long, this does end up saving time based on the", "# number of nodes in the cluster.", "try", ":", "args", ",", "_", ",", "_", ",", "defaults", ",", "_", ",", "_", ",", "_", "=", "inspect", ".", "getfullargspec", "(", "cls", ".", "read_csv", ")", "defaults", "=", "dict", "(", "zip", "(", "args", "[", "2", ":", "]", ",", "defaults", ")", ")", "filtered_kwargs", "=", "{", "kw", ":", "kwargs", "[", "kw", "]", "for", "kw", "in", "kwargs", "if", "kw", "in", "defaults", "and", "not", "isinstance", "(", "kwargs", "[", "kw", "]", ",", "type", "(", "defaults", "[", "kw", "]", ")", ")", "or", "kwargs", "[", "kw", "]", "!=", "defaults", "[", "kw", "]", "}", "# This happens on Python2, we will just default to serializing the entire dictionary", "except", "AttributeError", ":", "filtered_kwargs", "=", "kwargs", "if", "isinstance", "(", "filepath_or_buffer", ",", "str", ")", ":", "if", "not", "file_exists", "(", "filepath_or_buffer", ")", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"File path could not be resolved\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "filtered_kwargs", ")", "elif", "not", "isinstance", "(", "filepath_or_buffer", ",", "py", ".", "path", ".", "local", ")", ":", "read_from_pandas", "=", "True", "# Pandas read_csv supports pathlib.Path", "try", ":", "import", "pathlib", "if", "isinstance", "(", "filepath_or_buffer", ",", "pathlib", ".", "Path", ")", ":", "read_from_pandas", "=", "False", "except", "ImportError", ":", "# pragma: no cover", "pass", "if", "read_from_pandas", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"Reading from buffer.\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "kwargs", ")", "if", "(", "_infer_compression", "(", "filepath_or_buffer", ",", "kwargs", ".", "get", "(", "\"compression\"", ")", ")", "is", "not", "None", ")", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"Compression detected.\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "filtered_kwargs", ")", "chunksize", "=", "kwargs", ".", "get", "(", "\"chunksize\"", ")", "if", "chunksize", "is", "not", "None", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"Reading chunks from a file.\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "filtered_kwargs", ")", "skiprows", "=", "kwargs", ".", "get", "(", "\"skiprows\"", ")", "if", "skiprows", "is", "not", "None", "and", "not", "isinstance", "(", "skiprows", ",", "int", ")", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"skiprows parameter not optimized yet.\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "kwargs", ")", "# TODO: replace this by reading lines from file.", "if", "kwargs", ".", "get", "(", "\"nrows\"", ")", "is", "not", "None", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"`read_csv` with `nrows`\"", ")", "return", "cls", ".", "_read_csv_from_pandas", "(", "filepath_or_buffer", ",", "filtered_kwargs", ")", "else", ":", "return", "cls", ".", "_read_csv_from_file_pandas_on_ray", "(", "filepath_or_buffer", ",", "filtered_kwargs", ")" ]
Read csv file from local disk. Args: filepath_or_buffer: The filepath of the csv file. We only support local files for now. kwargs: Keyword arguments in pandas.read_csv
[ "Read", "csv", "file", "from", "local", "disk", ".", "Args", ":", "filepath_or_buffer", ":", "The", "filepath", "of", "the", "csv", "file", ".", "We", "only", "support", "local", "files", "for", "now", ".", "kwargs", ":", "Keyword", "arguments", "in", "pandas", ".", "read_csv" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L483-L551
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_hdf
def read_hdf(cls, path_or_buf, **kwargs): """Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file. """ if cls.read_hdf_remote_task is None: return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if format is None: ErrorMessage.default_to_pandas( "File format seems to be `fixed`. For better distribution consider saving the file in `table` format. " "df.to_hdf(format=`table`)." ) return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get("columns", None) if not columns: empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_hdf_remote_task._remote( args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
python
def read_hdf(cls, path_or_buf, **kwargs): """Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file. """ if cls.read_hdf_remote_task is None: return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if format is None: ErrorMessage.default_to_pandas( "File format seems to be `fixed`. For better distribution consider saving the file in `table` format. " "df.to_hdf(format=`table`)." ) return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get("columns", None) if not columns: empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_hdf_remote_task._remote( args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_hdf", "(", "cls", ",", "path_or_buf", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "read_hdf_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_hdf", "(", "path_or_buf", ",", "*", "*", "kwargs", ")", "format", "=", "cls", ".", "_validate_hdf_format", "(", "path_or_buf", "=", "path_or_buf", ")", "if", "format", "is", "None", ":", "ErrorMessage", ".", "default_to_pandas", "(", "\"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. \"", "\"df.to_hdf(format=`table`).\"", ")", "return", "cls", ".", "from_pandas", "(", "pandas", ".", "read_hdf", "(", "path_or_buf", "=", "path_or_buf", ",", "*", "*", "kwargs", ")", ")", "columns", "=", "kwargs", ".", "get", "(", "\"columns\"", ",", "None", ")", "if", "not", "columns", ":", "empty_pd_df", "=", "pandas", ".", "read_hdf", "(", "path_or_buf", ",", "start", "=", "0", ",", "stop", "=", "0", ")", "columns", "=", "empty_pd_df", ".", "columns", "num_partitions", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "num_splits", "=", "min", "(", "len", "(", "columns", ")", ",", "num_partitions", ")", "# Each item in this list will be a list of column names of the original df", "column_splits", "=", "(", "len", "(", "columns", ")", "//", "num_partitions", "if", "len", "(", "columns", ")", "%", "num_partitions", "==", "0", "else", "len", "(", "columns", ")", "//", "num_partitions", "+", "1", ")", "col_partitions", "=", "[", "columns", "[", "i", ":", "i", "+", "column_splits", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "columns", ")", ",", "column_splits", ")", "]", "blk_partitions", "=", "np", ".", "array", "(", "[", "cls", ".", "read_hdf_remote_task", ".", "_remote", "(", "args", "=", "(", "path_or_buf", ",", "cols", ",", "num_splits", ",", "kwargs", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ",", ")", "for", "cols", "in", "col_partitions", "]", ")", ".", "T", "remote_partitions", "=", "np", ".", "array", "(", "[", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "row", "]", "for", "row", "in", "blk_partitions", "[", ":", "-", "1", "]", "]", ")", "index_len", "=", "ray", ".", "get", "(", "blk_partitions", "[", "-", "1", "]", "[", "0", "]", ")", "index", "=", "pandas", ".", "RangeIndex", "(", "index_len", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "remote_partitions", ")", ",", "index", ",", "columns", ")", "return", "new_query_compiler" ]
Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file.
[ "Load", "a", "h5", "file", "from", "the", "file", "path", "or", "buffer", "returning", "a", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L565-L625
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_feather
def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
python
def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_feather", "(", "cls", ",", "path", ",", "columns", "=", "None", ",", "use_threads", "=", "True", ")", ":", "if", "cls", ".", "read_feather_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_feather", "(", "path", ",", "columns", "=", "columns", ",", "use_threads", "=", "use_threads", ")", "if", "columns", "is", "None", ":", "from", "pyarrow", ".", "feather", "import", "FeatherReader", "fr", "=", "FeatherReader", "(", "path", ")", "columns", "=", "[", "fr", ".", "get_column_name", "(", "i", ")", "for", "i", "in", "range", "(", "fr", ".", "num_columns", ")", "]", "num_partitions", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "num_splits", "=", "min", "(", "len", "(", "columns", ")", ",", "num_partitions", ")", "# Each item in this list will be a list of column names of the original df", "column_splits", "=", "(", "len", "(", "columns", ")", "//", "num_partitions", "if", "len", "(", "columns", ")", "%", "num_partitions", "==", "0", "else", "len", "(", "columns", ")", "//", "num_partitions", "+", "1", ")", "col_partitions", "=", "[", "columns", "[", "i", ":", "i", "+", "column_splits", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "columns", ")", ",", "column_splits", ")", "]", "blk_partitions", "=", "np", ".", "array", "(", "[", "cls", ".", "read_feather_remote_task", ".", "_remote", "(", "args", "=", "(", "path", ",", "cols", ",", "num_splits", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ")", "for", "cols", "in", "col_partitions", "]", ")", ".", "T", "remote_partitions", "=", "np", ".", "array", "(", "[", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "row", "]", "for", "row", "in", "blk_partitions", "[", ":", "-", "1", "]", "]", ")", "index_len", "=", "ray", ".", "get", "(", "blk_partitions", "[", "-", "1", "]", "[", "0", "]", ")", "index", "=", "pandas", ".", "RangeIndex", "(", "index_len", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "remote_partitions", ")", ",", "index", ",", "columns", ")", "return", "new_query_compiler" ]
Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format
[ "Read", "a", "pandas", ".", "DataFrame", "from", "Feather", "format", ".", "Ray", "DataFrame", "only", "supports", "pyarrow", "engine", "for", "now", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L628-L686
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.to_sql
def to_sql(cls, qc, **kwargs): """Write records stored in a DataFrame to a SQL database. Args: qc: the query compiler of the DF that we want to run to_sql on kwargs: parameters for pandas.to_sql(**kwargs) """ # we first insert an empty DF in order to create the full table in the database # This also helps to validate the input against pandas # we would like to_sql() to complete only when all rows have been inserted into the database # since the mapping operation is non-blocking, each partition will return an empty DF # so at the end, the blocking operation will be this empty DF to_pandas empty_df = qc.head(1).to_pandas().head(0) empty_df.to_sql(**kwargs) # so each partition will append its respective DF kwargs["if_exists"] = "append" columns = qc.columns def func(df, **kwargs): df.columns = columns df.to_sql(**kwargs) return pandas.DataFrame() map_func = qc._prepare_method(func, **kwargs) result = qc._map_across_full_axis(1, map_func) # blocking operation result.to_pandas()
python
def to_sql(cls, qc, **kwargs): """Write records stored in a DataFrame to a SQL database. Args: qc: the query compiler of the DF that we want to run to_sql on kwargs: parameters for pandas.to_sql(**kwargs) """ # we first insert an empty DF in order to create the full table in the database # This also helps to validate the input against pandas # we would like to_sql() to complete only when all rows have been inserted into the database # since the mapping operation is non-blocking, each partition will return an empty DF # so at the end, the blocking operation will be this empty DF to_pandas empty_df = qc.head(1).to_pandas().head(0) empty_df.to_sql(**kwargs) # so each partition will append its respective DF kwargs["if_exists"] = "append" columns = qc.columns def func(df, **kwargs): df.columns = columns df.to_sql(**kwargs) return pandas.DataFrame() map_func = qc._prepare_method(func, **kwargs) result = qc._map_across_full_axis(1, map_func) # blocking operation result.to_pandas()
[ "def", "to_sql", "(", "cls", ",", "qc", ",", "*", "*", "kwargs", ")", ":", "# we first insert an empty DF in order to create the full table in the database", "# This also helps to validate the input against pandas", "# we would like to_sql() to complete only when all rows have been inserted into the database", "# since the mapping operation is non-blocking, each partition will return an empty DF", "# so at the end, the blocking operation will be this empty DF to_pandas", "empty_df", "=", "qc", ".", "head", "(", "1", ")", ".", "to_pandas", "(", ")", ".", "head", "(", "0", ")", "empty_df", ".", "to_sql", "(", "*", "*", "kwargs", ")", "# so each partition will append its respective DF", "kwargs", "[", "\"if_exists\"", "]", "=", "\"append\"", "columns", "=", "qc", ".", "columns", "def", "func", "(", "df", ",", "*", "*", "kwargs", ")", ":", "df", ".", "columns", "=", "columns", "df", ".", "to_sql", "(", "*", "*", "kwargs", ")", "return", "pandas", ".", "DataFrame", "(", ")", "map_func", "=", "qc", ".", "_prepare_method", "(", "func", ",", "*", "*", "kwargs", ")", "result", "=", "qc", ".", "_map_across_full_axis", "(", "1", ",", "map_func", ")", "# blocking operation", "result", ".", "to_pandas", "(", ")" ]
Write records stored in a DataFrame to a SQL database. Args: qc: the query compiler of the DF that we want to run to_sql on kwargs: parameters for pandas.to_sql(**kwargs)
[ "Write", "records", "stored", "in", "a", "DataFrame", "to", "a", "SQL", "database", ".", "Args", ":", "qc", ":", "the", "query", "compiler", "of", "the", "DF", "that", "we", "want", "to", "run", "to_sql", "on", "kwargs", ":", "parameters", "for", "pandas", ".", "to_sql", "(", "**", "kwargs", ")" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L689-L715
train
modin-project/modin
modin/engines/ray/generic/io.py
RayIO.read_sql
def read_sql(cls, sql, con, index_col=None, **kwargs): """Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function. """ if cls.read_sql_remote_task is None: return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs) row_cnt_query = "SELECT COUNT(*) FROM ({})".format(sql) row_cnt = pandas.read_sql(row_cnt_query, con).squeeze() cols_names_df = pandas.read_sql( "SELECT * FROM ({}) LIMIT 0".format(sql), con, index_col=index_col ) cols_names = cols_names_df.columns num_parts = cls.frame_mgr_cls._compute_num_partitions() partition_ids = [] index_ids = [] limit = math.ceil(row_cnt / num_parts) for part in range(num_parts): offset = part * limit query = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, limit, offset) partition_id = cls.read_sql_remote_task._remote( args=(num_parts, query, con, index_col, kwargs), num_return_vals=num_parts + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: # sum all lens returned from partitions index_lens = ray.get(index_ids) new_index = pandas.RangeIndex(sum(index_lens)) else: # concat index returned from partitions index_lst = [x for part_index in ray.get(index_ids) for x in part_index] new_index = pandas.Index(index_lst).set_names(index_col) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
python
def read_sql(cls, sql, con, index_col=None, **kwargs): """Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function. """ if cls.read_sql_remote_task is None: return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs) row_cnt_query = "SELECT COUNT(*) FROM ({})".format(sql) row_cnt = pandas.read_sql(row_cnt_query, con).squeeze() cols_names_df = pandas.read_sql( "SELECT * FROM ({}) LIMIT 0".format(sql), con, index_col=index_col ) cols_names = cols_names_df.columns num_parts = cls.frame_mgr_cls._compute_num_partitions() partition_ids = [] index_ids = [] limit = math.ceil(row_cnt / num_parts) for part in range(num_parts): offset = part * limit query = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, limit, offset) partition_id = cls.read_sql_remote_task._remote( args=(num_parts, query, con, index_col, kwargs), num_return_vals=num_parts + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: # sum all lens returned from partitions index_lens = ray.get(index_ids) new_index = pandas.RangeIndex(sum(index_lens)) else: # concat index returned from partitions index_lst = [x for part_index in ray.get(index_ids) for x in part_index] new_index = pandas.Index(index_lst).set_names(index_col) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
[ "def", "read_sql", "(", "cls", ",", "sql", ",", "con", ",", "index_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "read_sql_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_sql", "(", "sql", ",", "con", ",", "index_col", "=", "index_col", ",", "*", "*", "kwargs", ")", "row_cnt_query", "=", "\"SELECT COUNT(*) FROM ({})\"", ".", "format", "(", "sql", ")", "row_cnt", "=", "pandas", ".", "read_sql", "(", "row_cnt_query", ",", "con", ")", ".", "squeeze", "(", ")", "cols_names_df", "=", "pandas", ".", "read_sql", "(", "\"SELECT * FROM ({}) LIMIT 0\"", ".", "format", "(", "sql", ")", ",", "con", ",", "index_col", "=", "index_col", ")", "cols_names", "=", "cols_names_df", ".", "columns", "num_parts", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "partition_ids", "=", "[", "]", "index_ids", "=", "[", "]", "limit", "=", "math", ".", "ceil", "(", "row_cnt", "/", "num_parts", ")", "for", "part", "in", "range", "(", "num_parts", ")", ":", "offset", "=", "part", "*", "limit", "query", "=", "\"SELECT * FROM ({}) LIMIT {} OFFSET {}\"", ".", "format", "(", "sql", ",", "limit", ",", "offset", ")", "partition_id", "=", "cls", ".", "read_sql_remote_task", ".", "_remote", "(", "args", "=", "(", "num_parts", ",", "query", ",", "con", ",", "index_col", ",", "kwargs", ")", ",", "num_return_vals", "=", "num_parts", "+", "1", ",", ")", "partition_ids", ".", "append", "(", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "partition_id", "[", ":", "-", "1", "]", "]", ")", "index_ids", ".", "append", "(", "partition_id", "[", "-", "1", "]", ")", "if", "index_col", "is", "None", ":", "# sum all lens returned from partitions", "index_lens", "=", "ray", ".", "get", "(", "index_ids", ")", "new_index", "=", "pandas", ".", "RangeIndex", "(", "sum", "(", "index_lens", ")", ")", "else", ":", "# concat index returned from partitions", "index_lst", "=", "[", "x", "for", "part_index", "in", "ray", ".", "get", "(", "index_ids", ")", "for", "x", "in", "part_index", "]", "new_index", "=", "pandas", ".", "Index", "(", "index_lst", ")", ".", "set_names", "(", "index_col", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "np", ".", "array", "(", "partition_ids", ")", ")", ",", "new_index", ",", "cols_names", ")", "return", "new_query_compiler" ]
Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function.
[ "Reads", "a", "SQL", "query", "or", "database", "table", "into", "a", "DataFrame", ".", "Args", ":", "sql", ":", "string", "or", "SQLAlchemy", "Selectable", "(", "select", "or", "text", "object", ")", "SQL", "query", "to", "be", "executed", "or", "a", "table", "name", ".", "con", ":", "SQLAlchemy", "connectable", "(", "engine", "/", "connection", ")", "or", "database", "string", "URI", "or", "DBAPI2", "connection", "(", "fallback", "mode", ")", "index_col", ":", "Column", "(", "s", ")", "to", "set", "as", "index", "(", "MultiIndex", ")", ".", "kwargs", ":", "Pass", "into", "pandas", ".", "read_sql", "function", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L718-L763
train
modin-project/modin
modin/pandas/datetimes.py
to_datetime
def to_datetime( arg, errors="raise", dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin="unix", cache=False, ): """Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp """ if not isinstance(arg, DataFrame): return pandas.to_datetime( arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) # Pandas seems to ignore this kwarg so we will too pandas.to_datetime( pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) return arg._query_compiler.to_datetime()
python
def to_datetime( arg, errors="raise", dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin="unix", cache=False, ): """Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp """ if not isinstance(arg, DataFrame): return pandas.to_datetime( arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) # Pandas seems to ignore this kwarg so we will too pandas.to_datetime( pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) return arg._query_compiler.to_datetime()
[ "def", "to_datetime", "(", "arg", ",", "errors", "=", "\"raise\"", ",", "dayfirst", "=", "False", ",", "yearfirst", "=", "False", ",", "utc", "=", "None", ",", "box", "=", "True", ",", "format", "=", "None", ",", "exact", "=", "True", ",", "unit", "=", "None", ",", "infer_datetime_format", "=", "False", ",", "origin", "=", "\"unix\"", ",", "cache", "=", "False", ",", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "DataFrame", ")", ":", "return", "pandas", ".", "to_datetime", "(", "arg", ",", "errors", "=", "errors", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "utc", "=", "utc", ",", "box", "=", "box", ",", "format", "=", "format", ",", "exact", "=", "exact", ",", "unit", "=", "unit", ",", "infer_datetime_format", "=", "infer_datetime_format", ",", "origin", "=", "origin", ",", "cache", "=", "cache", ",", ")", "# Pandas seems to ignore this kwarg so we will too", "pandas", ".", "to_datetime", "(", "pandas", ".", "DataFrame", "(", "columns", "=", "arg", ".", "columns", ")", ",", "errors", "=", "errors", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "utc", "=", "utc", ",", "box", "=", "box", ",", "format", "=", "format", ",", "exact", "=", "exact", ",", "unit", "=", "unit", ",", "infer_datetime_format", "=", "infer_datetime_format", ",", "origin", "=", "origin", ",", "cache", "=", "cache", ",", ")", "return", "arg", ".", "_query_compiler", ".", "to_datetime", "(", ")" ]
Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp
[ "Convert", "the", "arg", "to", "datetime", "format", ".", "If", "not", "Ray", "DataFrame", "this", "falls", "back", "on", "pandas", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/datetimes.py#L10-L77
train
modin-project/modin
modin/experimental/pandas/io_exp.py
read_sql
def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, partition_column=None, lower_bound=None, upper_bound=None, max_sessions=None, ): """ Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe """ _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) return DataFrame(query_compiler=ExperimentalBaseFactory.read_sql(**kwargs))
python
def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, partition_column=None, lower_bound=None, upper_bound=None, max_sessions=None, ): """ Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe """ _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) return DataFrame(query_compiler=ExperimentalBaseFactory.read_sql(**kwargs))
[ "def", "read_sql", "(", "sql", ",", "con", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "params", "=", "None", ",", "parse_dates", "=", "None", ",", "columns", "=", "None", ",", "chunksize", "=", "None", ",", "partition_column", "=", "None", ",", "lower_bound", "=", "None", ",", "upper_bound", "=", "None", ",", "max_sessions", "=", "None", ",", ")", ":", "_", ",", "_", ",", "_", ",", "kwargs", "=", "inspect", ".", "getargvalues", "(", "inspect", ".", "currentframe", "(", ")", ")", "return", "DataFrame", "(", "query_compiler", "=", "ExperimentalBaseFactory", ".", "read_sql", "(", "*", "*", "kwargs", ")", ")" ]
Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe
[ "Read", "SQL", "query", "or", "database", "table", "into", "a", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/pandas/io_exp.py#L7-L53
train
modin-project/modin
modin/engines/ray/generic/frame/partition_manager.py
RayFrameManager.block_lengths
def block_lengths(self): """Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed. """ if self._lengths_cache is None: try: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same length in a # row of blocks. self._lengths_cache = np.array( ray.get([obj.length().oid for obj in self._partitions_cache.T[0]]) if len(self._partitions_cache.T) > 0 else [] ) except RayTaskError as e: handle_ray_task_error(e) return self._lengths_cache
python
def block_lengths(self): """Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed. """ if self._lengths_cache is None: try: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same length in a # row of blocks. self._lengths_cache = np.array( ray.get([obj.length().oid for obj in self._partitions_cache.T[0]]) if len(self._partitions_cache.T) > 0 else [] ) except RayTaskError as e: handle_ray_task_error(e) return self._lengths_cache
[ "def", "block_lengths", "(", "self", ")", ":", "if", "self", ".", "_lengths_cache", "is", "None", ":", "try", ":", "# The first column will have the correct lengths. We have an", "# invariant that requires that all blocks be the same length in a", "# row of blocks.", "self", ".", "_lengths_cache", "=", "np", ".", "array", "(", "ray", ".", "get", "(", "[", "obj", ".", "length", "(", ")", ".", "oid", "for", "obj", "in", "self", ".", "_partitions_cache", ".", "T", "[", "0", "]", "]", ")", "if", "len", "(", "self", ".", "_partitions_cache", ".", "T", ")", ">", "0", "else", "[", "]", ")", "except", "RayTaskError", "as", "e", ":", "handle_ray_task_error", "(", "e", ")", "return", "self", ".", "_lengths_cache" ]
Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed.
[ "Gets", "the", "lengths", "of", "the", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/frame/partition_manager.py#L24-L42
train
modin-project/modin
modin/engines/ray/generic/frame/partition_manager.py
RayFrameManager.block_widths
def block_widths(self): """Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed. """ if self._widths_cache is None: try: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same width in a # column of blocks. self._widths_cache = np.array( ray.get([obj.width().oid for obj in self._partitions_cache[0]]) if len(self._partitions_cache) > 0 else [] ) except RayTaskError as e: handle_ray_task_error(e) return self._widths_cache
python
def block_widths(self): """Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed. """ if self._widths_cache is None: try: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same width in a # column of blocks. self._widths_cache = np.array( ray.get([obj.width().oid for obj in self._partitions_cache[0]]) if len(self._partitions_cache) > 0 else [] ) except RayTaskError as e: handle_ray_task_error(e) return self._widths_cache
[ "def", "block_widths", "(", "self", ")", ":", "if", "self", ".", "_widths_cache", "is", "None", ":", "try", ":", "# The first column will have the correct lengths. We have an", "# invariant that requires that all blocks be the same width in a", "# column of blocks.", "self", ".", "_widths_cache", "=", "np", ".", "array", "(", "ray", ".", "get", "(", "[", "obj", ".", "width", "(", ")", ".", "oid", "for", "obj", "in", "self", ".", "_partitions_cache", "[", "0", "]", "]", ")", "if", "len", "(", "self", ".", "_partitions_cache", ")", ">", "0", "else", "[", "]", ")", "except", "RayTaskError", "as", "e", ":", "handle_ray_task_error", "(", "e", ")", "return", "self", ".", "_widths_cache" ]
Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed.
[ "Gets", "the", "widths", "of", "the", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/frame/partition_manager.py#L45-L63
train
modin-project/modin
modin/engines/ray/pandas_on_ray/frame/partition.py
deploy_ray_func
def deploy_ray_func(func, partition, kwargs): # pragma: no cover """Deploy a function to a partition in Ray. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: func: The function to apply. partition: The partition to apply the function to. kwargs: A dictionary of keyword arguments for the function. Returns: The result of the function. """ try: return func(partition, **kwargs) # Sometimes Arrow forces us to make a copy of an object before we operate # on it. We don't want the error to propagate to the user, and we want to # avoid copying unless we absolutely have to. except ValueError: return func(partition.copy(), **kwargs)
python
def deploy_ray_func(func, partition, kwargs): # pragma: no cover """Deploy a function to a partition in Ray. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: func: The function to apply. partition: The partition to apply the function to. kwargs: A dictionary of keyword arguments for the function. Returns: The result of the function. """ try: return func(partition, **kwargs) # Sometimes Arrow forces us to make a copy of an object before we operate # on it. We don't want the error to propagate to the user, and we want to # avoid copying unless we absolutely have to. except ValueError: return func(partition.copy(), **kwargs)
[ "def", "deploy_ray_func", "(", "func", ",", "partition", ",", "kwargs", ")", ":", "# pragma: no cover", "try", ":", "return", "func", "(", "partition", ",", "*", "*", "kwargs", ")", "# Sometimes Arrow forces us to make a copy of an object before we operate", "# on it. We don't want the error to propagate to the user, and we want to", "# avoid copying unless we absolutely have to.", "except", "ValueError", ":", "return", "func", "(", "partition", ".", "copy", "(", ")", ",", "*", "*", "kwargs", ")" ]
Deploy a function to a partition in Ray. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: func: The function to apply. partition: The partition to apply the function to. kwargs: A dictionary of keyword arguments for the function. Returns: The result of the function.
[ "Deploy", "a", "function", "to", "a", "partition", "in", "Ray", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/frame/partition.py#L124-L143
train
modin-project/modin
modin/engines/ray/pandas_on_ray/frame/partition.py
PandasOnRayFramePartition.get
def get(self): """Gets the object out of the plasma store. Returns: The object from the plasma store. """ if len(self.call_queue): return self.apply(lambda x: x).get() try: return ray.get(self.oid) except RayTaskError as e: handle_ray_task_error(e)
python
def get(self): """Gets the object out of the plasma store. Returns: The object from the plasma store. """ if len(self.call_queue): return self.apply(lambda x: x).get() try: return ray.get(self.oid) except RayTaskError as e: handle_ray_task_error(e)
[ "def", "get", "(", "self", ")", ":", "if", "len", "(", "self", ".", "call_queue", ")", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "x", ")", ".", "get", "(", ")", "try", ":", "return", "ray", ".", "get", "(", "self", ".", "oid", ")", "except", "RayTaskError", "as", "e", ":", "handle_ray_task_error", "(", "e", ")" ]
Gets the object out of the plasma store. Returns: The object from the plasma store.
[ "Gets", "the", "object", "out", "of", "the", "plasma", "store", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/frame/partition.py#L21-L32
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.block_lengths
def block_lengths(self): """Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed. """ if self._lengths_cache is None: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same length in a # row of blocks. self._lengths_cache = np.array( [obj.length().get() for obj in self._partitions_cache.T[0]] if len(self._partitions_cache.T) > 0 else [] ) return self._lengths_cache
python
def block_lengths(self): """Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed. """ if self._lengths_cache is None: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same length in a # row of blocks. self._lengths_cache = np.array( [obj.length().get() for obj in self._partitions_cache.T[0]] if len(self._partitions_cache.T) > 0 else [] ) return self._lengths_cache
[ "def", "block_lengths", "(", "self", ")", ":", "if", "self", ".", "_lengths_cache", "is", "None", ":", "# The first column will have the correct lengths. We have an", "# invariant that requires that all blocks be the same length in a", "# row of blocks.", "self", ".", "_lengths_cache", "=", "np", ".", "array", "(", "[", "obj", ".", "length", "(", ")", ".", "get", "(", ")", "for", "obj", "in", "self", ".", "_partitions_cache", ".", "T", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ".", "T", ")", ">", "0", "else", "[", "]", ")", "return", "self", ".", "_lengths_cache" ]
Gets the lengths of the blocks. Note: This works with the property structure `_lengths_cache` to avoid having to recompute these values each time they are needed.
[ "Gets", "the", "lengths", "of", "the", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L132-L147
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.block_widths
def block_widths(self): """Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed. """ if self._widths_cache is None: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same width in a # column of blocks. self._widths_cache = np.array( [obj.width().get() for obj in self._partitions_cache[0]] if len(self._partitions_cache) > 0 else [] ) return self._widths_cache
python
def block_widths(self): """Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed. """ if self._widths_cache is None: # The first column will have the correct lengths. We have an # invariant that requires that all blocks be the same width in a # column of blocks. self._widths_cache = np.array( [obj.width().get() for obj in self._partitions_cache[0]] if len(self._partitions_cache) > 0 else [] ) return self._widths_cache
[ "def", "block_widths", "(", "self", ")", ":", "if", "self", ".", "_widths_cache", "is", "None", ":", "# The first column will have the correct lengths. We have an", "# invariant that requires that all blocks be the same width in a", "# column of blocks.", "self", ".", "_widths_cache", "=", "np", ".", "array", "(", "[", "obj", ".", "width", "(", ")", ".", "get", "(", ")", "for", "obj", "in", "self", ".", "_partitions_cache", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ")", ">", "0", "else", "[", "]", ")", "return", "self", ".", "_widths_cache" ]
Gets the widths of the blocks. Note: This works with the property structure `_widths_cache` to avoid having to recompute these values each time they are needed.
[ "Gets", "the", "widths", "of", "the", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L153-L168
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.map_across_blocks
def map_across_blocks(self, map_func): """Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ preprocessed_map_func = self.preprocess_func(map_func) new_partitions = np.array( [ [part.apply(preprocessed_map_func) for part in row_of_parts] for row_of_parts in self.partitions ] ) return self.__constructor__(new_partitions)
python
def map_across_blocks(self, map_func): """Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ preprocessed_map_func = self.preprocess_func(map_func) new_partitions = np.array( [ [part.apply(preprocessed_map_func) for part in row_of_parts] for row_of_parts in self.partitions ] ) return self.__constructor__(new_partitions)
[ "def", "map_across_blocks", "(", "self", ",", "map_func", ")", ":", "preprocessed_map_func", "=", "self", ".", "preprocess_func", "(", "map_func", ")", "new_partitions", "=", "np", ".", "array", "(", "[", "[", "part", ".", "apply", "(", "preprocessed_map_func", ")", "for", "part", "in", "row_of_parts", "]", "for", "row_of_parts", "in", "self", ".", "partitions", "]", ")", "return", "self", ".", "__constructor__", "(", "new_partitions", ")" ]
Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Applies", "map_func", "to", "every", "partition", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L200-L216
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.copartition_datasets
def copartition_datasets(self, axis, other, left_func, right_func): """Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right. """ if left_func is None: new_self = self else: new_self = self.map_across_full_axis(axis, left_func) # This block of code will only shuffle if absolutely necessary. If we do need to # shuffle, we use the identity function and then reshuffle. if right_func is None: if axis == 0 and not np.array_equal( other.block_lengths, new_self.block_lengths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_lengths ) elif axis == 1 and not np.array_equal( other.block_widths, new_self.block_widths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_widths ) else: new_other = other # Most of the time, we will be given an operation to do. We perform that with # manual_shuffle. else: new_other = other.manual_shuffle( axis, right_func, new_self.block_lengths if axis == 0 else new_self.block_widths, ) return new_self, new_other
python
def copartition_datasets(self, axis, other, left_func, right_func): """Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right. """ if left_func is None: new_self = self else: new_self = self.map_across_full_axis(axis, left_func) # This block of code will only shuffle if absolutely necessary. If we do need to # shuffle, we use the identity function and then reshuffle. if right_func is None: if axis == 0 and not np.array_equal( other.block_lengths, new_self.block_lengths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_lengths ) elif axis == 1 and not np.array_equal( other.block_widths, new_self.block_widths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_widths ) else: new_other = other # Most of the time, we will be given an operation to do. We perform that with # manual_shuffle. else: new_other = other.manual_shuffle( axis, right_func, new_self.block_lengths if axis == 0 else new_self.block_widths, ) return new_self, new_other
[ "def", "copartition_datasets", "(", "self", ",", "axis", ",", "other", ",", "left_func", ",", "right_func", ")", ":", "if", "left_func", "is", "None", ":", "new_self", "=", "self", "else", ":", "new_self", "=", "self", ".", "map_across_full_axis", "(", "axis", ",", "left_func", ")", "# This block of code will only shuffle if absolutely necessary. If we do need to", "# shuffle, we use the identity function and then reshuffle.", "if", "right_func", "is", "None", ":", "if", "axis", "==", "0", "and", "not", "np", ".", "array_equal", "(", "other", ".", "block_lengths", ",", "new_self", ".", "block_lengths", ")", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "lambda", "x", ":", "x", ",", "new_self", ".", "block_lengths", ")", "elif", "axis", "==", "1", "and", "not", "np", ".", "array_equal", "(", "other", ".", "block_widths", ",", "new_self", ".", "block_widths", ")", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "lambda", "x", ":", "x", ",", "new_self", ".", "block_widths", ")", "else", ":", "new_other", "=", "other", "# Most of the time, we will be given an operation to do. We perform that with", "# manual_shuffle.", "else", ":", "new_other", "=", "other", ".", "manual_shuffle", "(", "axis", ",", "right_func", ",", "new_self", ".", "block_lengths", "if", "axis", "==", "0", "else", "new_self", ".", "block_widths", ",", ")", "return", "new_self", ",", "new_other" ]
Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right.
[ "Copartition", "two", "BlockPartitions", "objects", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L231-L275
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.map_across_full_axis
def map_across_full_axis(self, axis, map_func): """Applies `map_func` to every partition. Note: This method should be used in the case that `map_func` relies on some global information about the axis. Args: axis: The axis to perform the map across (0 - index, 1 - columns). map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ # Since we are already splitting the DataFrame back up after an # operation, we will just use this time to compute the number of # partitions as best we can right now. num_splits = self._compute_num_partitions() preprocessed_map_func = self.preprocess_func(map_func) partitions = self.column_partitions if not axis else self.row_partitions # For mapping across the entire axis, we don't maintain partitioning because we # may want to line to partitioning up with another BlockPartitions object. Since # we don't need to maintain the partitioning, this gives us the opportunity to # load-balance the data as well. result_blocks = np.array( [ part.apply(preprocessed_map_func, num_splits=num_splits) for part in partitions ] ) # If we are mapping over columns, they are returned to use the same as # rows, so we need to transpose the returned 2D numpy array to return # the structure to the correct order. return ( self.__constructor__(result_blocks.T) if not axis else self.__constructor__(result_blocks) )
python
def map_across_full_axis(self, axis, map_func): """Applies `map_func` to every partition. Note: This method should be used in the case that `map_func` relies on some global information about the axis. Args: axis: The axis to perform the map across (0 - index, 1 - columns). map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ # Since we are already splitting the DataFrame back up after an # operation, we will just use this time to compute the number of # partitions as best we can right now. num_splits = self._compute_num_partitions() preprocessed_map_func = self.preprocess_func(map_func) partitions = self.column_partitions if not axis else self.row_partitions # For mapping across the entire axis, we don't maintain partitioning because we # may want to line to partitioning up with another BlockPartitions object. Since # we don't need to maintain the partitioning, this gives us the opportunity to # load-balance the data as well. result_blocks = np.array( [ part.apply(preprocessed_map_func, num_splits=num_splits) for part in partitions ] ) # If we are mapping over columns, they are returned to use the same as # rows, so we need to transpose the returned 2D numpy array to return # the structure to the correct order. return ( self.__constructor__(result_blocks.T) if not axis else self.__constructor__(result_blocks) )
[ "def", "map_across_full_axis", "(", "self", ",", "axis", ",", "map_func", ")", ":", "# Since we are already splitting the DataFrame back up after an", "# operation, we will just use this time to compute the number of", "# partitions as best we can right now.", "num_splits", "=", "self", ".", "_compute_num_partitions", "(", ")", "preprocessed_map_func", "=", "self", ".", "preprocess_func", "(", "map_func", ")", "partitions", "=", "self", ".", "column_partitions", "if", "not", "axis", "else", "self", ".", "row_partitions", "# For mapping across the entire axis, we don't maintain partitioning because we", "# may want to line to partitioning up with another BlockPartitions object. Since", "# we don't need to maintain the partitioning, this gives us the opportunity to", "# load-balance the data as well.", "result_blocks", "=", "np", ".", "array", "(", "[", "part", ".", "apply", "(", "preprocessed_map_func", ",", "num_splits", "=", "num_splits", ")", "for", "part", "in", "partitions", "]", ")", "# If we are mapping over columns, they are returned to use the same as", "# rows, so we need to transpose the returned 2D numpy array to return", "# the structure to the correct order.", "return", "(", "self", ".", "__constructor__", "(", "result_blocks", ".", "T", ")", "if", "not", "axis", "else", "self", ".", "__constructor__", "(", "result_blocks", ")", ")" ]
Applies `map_func` to every partition. Note: This method should be used in the case that `map_func` relies on some global information about the axis. Args: axis: The axis to perform the map across (0 - index, 1 - columns). map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Applies", "map_func", "to", "every", "partition", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L277-L313
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.take
def take(self, axis, n): """Take the first (or last) n rows or columns from the blocks Note: Axis = 0 will be equivalent to `head` or `tail` Axis = 1 will be equivalent to `front` or `back` Args: axis: The axis to extract (0 for extracting rows, 1 for extracting columns) n: The number of rows or columns to extract, negative denotes to extract from the bottom of the object Returns: A new BaseFrameManager object, the type of object that called this. """ # These are the partitions that we will extract over if not axis: partitions = self.partitions bin_lengths = self.block_lengths else: partitions = self.partitions.T bin_lengths = self.block_widths if n < 0: length_bins = np.cumsum(bin_lengths[::-1]) n *= -1 idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n # In this case, we require no remote compute. This is much faster. if remaining == 0: result = partitions[-idx:] else: # Reverse for ease of iteration and then re-reverse at the end partitions = partitions[::-1] # We build this iloc to avoid creating a bunch of helper methods. # This code creates slice objects to be passed to `iloc` to grab # the last n rows or columns depending on axis. slice_obj = ( slice(-remaining, None) if axis == 0 else (slice(None), slice(-remaining, None)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) # We use idx + 1 here because the loop is not inclusive, and we # need to iterate through idx. result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] )[::-1] else: length_bins = np.cumsum(bin_lengths) idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n # In this case, we require no remote compute. This is much faster. if remaining == 0: result = partitions[:idx] else: # We build this iloc to avoid creating a bunch of helper methods. # This code creates slice objects to be passed to `iloc` to grab # the first n rows or columns depending on axis. slice_obj = ( slice(remaining) if axis == 0 else (slice(None), slice(remaining)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) # See note above about idx + 1 result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] ) return self.__constructor__(result.T) if axis else self.__constructor__(result)
python
def take(self, axis, n): """Take the first (or last) n rows or columns from the blocks Note: Axis = 0 will be equivalent to `head` or `tail` Axis = 1 will be equivalent to `front` or `back` Args: axis: The axis to extract (0 for extracting rows, 1 for extracting columns) n: The number of rows or columns to extract, negative denotes to extract from the bottom of the object Returns: A new BaseFrameManager object, the type of object that called this. """ # These are the partitions that we will extract over if not axis: partitions = self.partitions bin_lengths = self.block_lengths else: partitions = self.partitions.T bin_lengths = self.block_widths if n < 0: length_bins = np.cumsum(bin_lengths[::-1]) n *= -1 idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n # In this case, we require no remote compute. This is much faster. if remaining == 0: result = partitions[-idx:] else: # Reverse for ease of iteration and then re-reverse at the end partitions = partitions[::-1] # We build this iloc to avoid creating a bunch of helper methods. # This code creates slice objects to be passed to `iloc` to grab # the last n rows or columns depending on axis. slice_obj = ( slice(-remaining, None) if axis == 0 else (slice(None), slice(-remaining, None)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) # We use idx + 1 here because the loop is not inclusive, and we # need to iterate through idx. result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] )[::-1] else: length_bins = np.cumsum(bin_lengths) idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n # In this case, we require no remote compute. This is much faster. if remaining == 0: result = partitions[:idx] else: # We build this iloc to avoid creating a bunch of helper methods. # This code creates slice objects to be passed to `iloc` to grab # the first n rows or columns depending on axis. slice_obj = ( slice(remaining) if axis == 0 else (slice(None), slice(remaining)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) # See note above about idx + 1 result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] ) return self.__constructor__(result.T) if axis else self.__constructor__(result)
[ "def", "take", "(", "self", ",", "axis", ",", "n", ")", ":", "# These are the partitions that we will extract over", "if", "not", "axis", ":", "partitions", "=", "self", ".", "partitions", "bin_lengths", "=", "self", ".", "block_lengths", "else", ":", "partitions", "=", "self", ".", "partitions", ".", "T", "bin_lengths", "=", "self", ".", "block_widths", "if", "n", "<", "0", ":", "length_bins", "=", "np", ".", "cumsum", "(", "bin_lengths", "[", ":", ":", "-", "1", "]", ")", "n", "*=", "-", "1", "idx", "=", "int", "(", "np", ".", "digitize", "(", "n", ",", "length_bins", ")", ")", "if", "idx", ">", "0", ":", "remaining", "=", "int", "(", "n", "-", "length_bins", "[", "idx", "-", "1", "]", ")", "else", ":", "remaining", "=", "n", "# In this case, we require no remote compute. This is much faster.", "if", "remaining", "==", "0", ":", "result", "=", "partitions", "[", "-", "idx", ":", "]", "else", ":", "# Reverse for ease of iteration and then re-reverse at the end", "partitions", "=", "partitions", "[", ":", ":", "-", "1", "]", "# We build this iloc to avoid creating a bunch of helper methods.", "# This code creates slice objects to be passed to `iloc` to grab", "# the last n rows or columns depending on axis.", "slice_obj", "=", "(", "slice", "(", "-", "remaining", ",", "None", ")", "if", "axis", "==", "0", "else", "(", "slice", "(", "None", ")", ",", "slice", "(", "-", "remaining", ",", "None", ")", ")", ")", "func", "=", "self", ".", "preprocess_func", "(", "lambda", "df", ":", "df", ".", "iloc", "[", "slice_obj", "]", ")", "# We use idx + 1 here because the loop is not inclusive, and we", "# need to iterate through idx.", "result", "=", "np", ".", "array", "(", "[", "partitions", "[", "i", "]", "if", "i", "!=", "idx", "else", "[", "obj", ".", "apply", "(", "func", ")", "for", "obj", "in", "partitions", "[", "i", "]", "]", "for", "i", "in", "range", "(", "idx", "+", "1", ")", "]", ")", "[", ":", ":", "-", "1", "]", "else", ":", "length_bins", "=", "np", ".", "cumsum", "(", "bin_lengths", ")", "idx", "=", "int", "(", "np", ".", "digitize", "(", "n", ",", "length_bins", ")", ")", "if", "idx", ">", "0", ":", "remaining", "=", "int", "(", "n", "-", "length_bins", "[", "idx", "-", "1", "]", ")", "else", ":", "remaining", "=", "n", "# In this case, we require no remote compute. This is much faster.", "if", "remaining", "==", "0", ":", "result", "=", "partitions", "[", ":", "idx", "]", "else", ":", "# We build this iloc to avoid creating a bunch of helper methods.", "# This code creates slice objects to be passed to `iloc` to grab", "# the first n rows or columns depending on axis.", "slice_obj", "=", "(", "slice", "(", "remaining", ")", "if", "axis", "==", "0", "else", "(", "slice", "(", "None", ")", ",", "slice", "(", "remaining", ")", ")", ")", "func", "=", "self", ".", "preprocess_func", "(", "lambda", "df", ":", "df", ".", "iloc", "[", "slice_obj", "]", ")", "# See note above about idx + 1", "result", "=", "np", ".", "array", "(", "[", "partitions", "[", "i", "]", "if", "i", "!=", "idx", "else", "[", "obj", ".", "apply", "(", "func", ")", "for", "obj", "in", "partitions", "[", "i", "]", "]", "for", "i", "in", "range", "(", "idx", "+", "1", ")", "]", ")", "return", "self", ".", "__constructor__", "(", "result", ".", "T", ")", "if", "axis", "else", "self", ".", "__constructor__", "(", "result", ")" ]
Take the first (or last) n rows or columns from the blocks Note: Axis = 0 will be equivalent to `head` or `tail` Axis = 1 will be equivalent to `front` or `back` Args: axis: The axis to extract (0 for extracting rows, 1 for extracting columns) n: The number of rows or columns to extract, negative denotes to extract from the bottom of the object Returns: A new BaseFrameManager object, the type of object that called this.
[ "Take", "the", "first", "(", "or", "last", ")", "n", "rows", "or", "columns", "from", "the", "blocks" ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L315-L396
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.concat
def concat(self, axis, other_blocks): """Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this. """ if type(other_blocks) is list: other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__( np.concatenate([self.partitions] + other_blocks, axis=axis) ) else: return self.__constructor__( np.append(self.partitions, other_blocks.partitions, axis=axis) )
python
def concat(self, axis, other_blocks): """Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this. """ if type(other_blocks) is list: other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__( np.concatenate([self.partitions] + other_blocks, axis=axis) ) else: return self.__constructor__( np.append(self.partitions, other_blocks.partitions, axis=axis) )
[ "def", "concat", "(", "self", ",", "axis", ",", "other_blocks", ")", ":", "if", "type", "(", "other_blocks", ")", "is", "list", ":", "other_blocks", "=", "[", "blocks", ".", "partitions", "for", "blocks", "in", "other_blocks", "]", "return", "self", ".", "__constructor__", "(", "np", ".", "concatenate", "(", "[", "self", ".", "partitions", "]", "+", "other_blocks", ",", "axis", "=", "axis", ")", ")", "else", ":", "return", "self", ".", "__constructor__", "(", "np", ".", "append", "(", "self", ".", "partitions", ",", "other_blocks", ".", "partitions", ",", "axis", "=", "axis", ")", ")" ]
Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this.
[ "Concatenate", "the", "blocks", "with", "another", "set", "of", "blocks", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L398-L421
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.to_pandas
def to_pandas(self, is_transposed=False): """Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame """ # In the case this is transposed, it is easier to just temporarily # transpose back then transpose after the conversion. The performance # is the same as if we individually transposed the blocks and # concatenated them, but the code is much smaller. if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [ [obj.to_pandas() for obj in part] for part in self.partitions ] if all( isinstance(part, pandas.Series) for row in retrieved_objects for part in row ): axis = 0 elif all( isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row ): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [ pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if not all(part.empty for part in row) ] if len(df_rows) == 0: return pandas.DataFrame() else: return pandas.concat(df_rows)
python
def to_pandas(self, is_transposed=False): """Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame """ # In the case this is transposed, it is easier to just temporarily # transpose back then transpose after the conversion. The performance # is the same as if we individually transposed the blocks and # concatenated them, but the code is much smaller. if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [ [obj.to_pandas() for obj in part] for part in self.partitions ] if all( isinstance(part, pandas.Series) for row in retrieved_objects for part in row ): axis = 0 elif all( isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row ): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [ pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if not all(part.empty for part in row) ] if len(df_rows) == 0: return pandas.DataFrame() else: return pandas.concat(df_rows)
[ "def", "to_pandas", "(", "self", ",", "is_transposed", "=", "False", ")", ":", "# In the case this is transposed, it is easier to just temporarily", "# transpose back then transpose after the conversion. The performance", "# is the same as if we individually transposed the blocks and", "# concatenated them, but the code is much smaller.", "if", "is_transposed", ":", "return", "self", ".", "transpose", "(", ")", ".", "to_pandas", "(", "False", ")", ".", "T", "else", ":", "retrieved_objects", "=", "[", "[", "obj", ".", "to_pandas", "(", ")", "for", "obj", "in", "part", "]", "for", "part", "in", "self", ".", "partitions", "]", "if", "all", "(", "isinstance", "(", "part", ",", "pandas", ".", "Series", ")", "for", "row", "in", "retrieved_objects", "for", "part", "in", "row", ")", ":", "axis", "=", "0", "elif", "all", "(", "isinstance", "(", "part", ",", "pandas", ".", "DataFrame", ")", "for", "row", "in", "retrieved_objects", "for", "part", "in", "row", ")", ":", "axis", "=", "1", "else", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "True", ")", "df_rows", "=", "[", "pandas", ".", "concat", "(", "[", "part", "for", "part", "in", "row", "]", ",", "axis", "=", "axis", ")", "for", "row", "in", "retrieved_objects", "if", "not", "all", "(", "part", ".", "empty", "for", "part", "in", "row", ")", "]", "if", "len", "(", "df_rows", ")", "==", "0", ":", "return", "pandas", ".", "DataFrame", "(", ")", "else", ":", "return", "pandas", ".", "concat", "(", "df_rows", ")" ]
Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame
[ "Convert", "this", "object", "into", "a", "Pandas", "DataFrame", "from", "the", "partitions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L439-L481
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager.get_indices
def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """ ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
python
def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """ ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
[ "def", "get_indices", "(", "self", ",", "axis", "=", "0", ",", "index_func", "=", "None", ",", "old_blocks", "=", "None", ")", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "not", "callable", "(", "index_func", ")", ")", "func", "=", "self", ".", "preprocess_func", "(", "index_func", ")", "if", "axis", "==", "0", ":", "# We grab the first column of blocks and extract the indices", "# Note: We use _partitions_cache in the context of this function to make", "# sure that none of the partitions are modified or filtered out before we", "# get the index information.", "# DO NOT CHANGE TO self.partitions under any circumstance.", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", ".", "T", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ".", "T", ")", "else", "[", "]", ")", "# This is important because sometimes we have resized the data. The new", "# sizes will not be valid if we are trying to compute the index on a", "# new object that has a different length.", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ")", "else", "[", "]", ")", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_widths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_widths", ")", ".", "cumsum", "(", ")", "full_indices", "=", "new_indices", "[", "0", "]", "if", "len", "(", "new_indices", ")", "else", "new_indices", "if", "old_blocks", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "new_indices", ")", ")", ":", "# If the length is 0 there is nothing to append.", "if", "i", "==", "0", "or", "len", "(", "new_indices", "[", "i", "]", ")", "==", "0", ":", "continue", "# The try-except here is intended to catch issues where we are", "# trying to get a string index out of the internal index.", "try", ":", "append_val", "=", "new_indices", "[", "i", "]", "+", "cumulative_block_lengths", "[", "i", "-", "1", "]", "except", "TypeError", ":", "append_val", "=", "new_indices", "[", "i", "]", "full_indices", "=", "full_indices", ".", "append", "(", "append_val", ")", "else", ":", "full_indices", "=", "full_indices", ".", "append", "(", "new_indices", "[", "1", ":", "]", ")", "return", "full_indices" ]
This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object.
[ "This", "gets", "the", "internal", "indices", "stored", "in", "the", "partitions", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L503-L566
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager._get_blocks_containing_index
def _get_blocks_containing_index(self, axis, index): """Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index). """ if not axis: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths)) cumulative_column_widths = np.array(self.block_widths).cumsum() block_idx = int(np.digitize(index, cumulative_column_widths)) if block_idx == len(cumulative_column_widths): block_idx -= 1 # Compute the internal index based on the previous lengths. This # is a global index, so we must subtract the lengths first. internal_idx = ( index if not block_idx else index - cumulative_column_widths[block_idx - 1] ) else: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_lengths)) cumulative_row_lengths = np.array(self.block_lengths).cumsum() block_idx = int(np.digitize(index, cumulative_row_lengths)) # See note above about internal index internal_idx = ( index if not block_idx else index - cumulative_row_lengths[block_idx - 1] ) return block_idx, internal_idx
python
def _get_blocks_containing_index(self, axis, index): """Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index). """ if not axis: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths)) cumulative_column_widths = np.array(self.block_widths).cumsum() block_idx = int(np.digitize(index, cumulative_column_widths)) if block_idx == len(cumulative_column_widths): block_idx -= 1 # Compute the internal index based on the previous lengths. This # is a global index, so we must subtract the lengths first. internal_idx = ( index if not block_idx else index - cumulative_column_widths[block_idx - 1] ) else: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_lengths)) cumulative_row_lengths = np.array(self.block_lengths).cumsum() block_idx = int(np.digitize(index, cumulative_row_lengths)) # See note above about internal index internal_idx = ( index if not block_idx else index - cumulative_row_lengths[block_idx - 1] ) return block_idx, internal_idx
[ "def", "_get_blocks_containing_index", "(", "self", ",", "axis", ",", "index", ")", ":", "if", "not", "axis", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "index", ">", "sum", "(", "self", ".", "block_widths", ")", ")", "cumulative_column_widths", "=", "np", ".", "array", "(", "self", ".", "block_widths", ")", ".", "cumsum", "(", ")", "block_idx", "=", "int", "(", "np", ".", "digitize", "(", "index", ",", "cumulative_column_widths", ")", ")", "if", "block_idx", "==", "len", "(", "cumulative_column_widths", ")", ":", "block_idx", "-=", "1", "# Compute the internal index based on the previous lengths. This", "# is a global index, so we must subtract the lengths first.", "internal_idx", "=", "(", "index", "if", "not", "block_idx", "else", "index", "-", "cumulative_column_widths", "[", "block_idx", "-", "1", "]", ")", "else", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "index", ">", "sum", "(", "self", ".", "block_lengths", ")", ")", "cumulative_row_lengths", "=", "np", ".", "array", "(", "self", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "block_idx", "=", "int", "(", "np", ".", "digitize", "(", "index", ",", "cumulative_row_lengths", ")", ")", "# See note above about internal index", "internal_idx", "=", "(", "index", "if", "not", "block_idx", "else", "index", "-", "cumulative_row_lengths", "[", "block_idx", "-", "1", "]", ")", "return", "block_idx", ",", "internal_idx" ]
Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index).
[ "Convert", "a", "global", "index", "to", "a", "block", "index", "and", "local", "index", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L580-L618
train
modin-project/modin
modin/engines/base/frame/partition_manager.py
BaseFrameManager._get_dict_of_block_index
def _get_dict_of_block_index(self, axis, indices, ordered=False): """Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices. """ # Get the internal index and create a dictionary so we only have to # travel to each partition once. all_partitions_and_idx = [ self._get_blocks_containing_index(axis, i) for i in indices ] # In ordered, we have to maintain the order of the list of indices provided. # This means that we need to return a list instead of a dictionary. if ordered: # In ordered, the partitions dict is a list of tuples partitions_dict = [] # This variable is used to store the most recent partition that we added to # the partitions_dict. This allows us to only visit a partition once when we # have multiple values that will be operated on in that partition. last_part = -1 for part_idx, internal_idx in all_partitions_and_idx: if part_idx == last_part: # We append to the list, which is the value part of the tuple. partitions_dict[-1][-1].append(internal_idx) else: # This is where we add new values. partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: # For unordered, we can just return a dictionary mapping partition to the # list of indices being operated on. partitions_dict = {} for part_idx, internal_idx in all_partitions_and_idx: if part_idx not in partitions_dict: partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
python
def _get_dict_of_block_index(self, axis, indices, ordered=False): """Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices. """ # Get the internal index and create a dictionary so we only have to # travel to each partition once. all_partitions_and_idx = [ self._get_blocks_containing_index(axis, i) for i in indices ] # In ordered, we have to maintain the order of the list of indices provided. # This means that we need to return a list instead of a dictionary. if ordered: # In ordered, the partitions dict is a list of tuples partitions_dict = [] # This variable is used to store the most recent partition that we added to # the partitions_dict. This allows us to only visit a partition once when we # have multiple values that will be operated on in that partition. last_part = -1 for part_idx, internal_idx in all_partitions_and_idx: if part_idx == last_part: # We append to the list, which is the value part of the tuple. partitions_dict[-1][-1].append(internal_idx) else: # This is where we add new values. partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: # For unordered, we can just return a dictionary mapping partition to the # list of indices being operated on. partitions_dict = {} for part_idx, internal_idx in all_partitions_and_idx: if part_idx not in partitions_dict: partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
[ "def", "_get_dict_of_block_index", "(", "self", ",", "axis", ",", "indices", ",", "ordered", "=", "False", ")", ":", "# Get the internal index and create a dictionary so we only have to", "# travel to each partition once.", "all_partitions_and_idx", "=", "[", "self", ".", "_get_blocks_containing_index", "(", "axis", ",", "i", ")", "for", "i", "in", "indices", "]", "# In ordered, we have to maintain the order of the list of indices provided.", "# This means that we need to return a list instead of a dictionary.", "if", "ordered", ":", "# In ordered, the partitions dict is a list of tuples", "partitions_dict", "=", "[", "]", "# This variable is used to store the most recent partition that we added to", "# the partitions_dict. This allows us to only visit a partition once when we", "# have multiple values that will be operated on in that partition.", "last_part", "=", "-", "1", "for", "part_idx", ",", "internal_idx", "in", "all_partitions_and_idx", ":", "if", "part_idx", "==", "last_part", ":", "# We append to the list, which is the value part of the tuple.", "partitions_dict", "[", "-", "1", "]", "[", "-", "1", "]", ".", "append", "(", "internal_idx", ")", "else", ":", "# This is where we add new values.", "partitions_dict", ".", "append", "(", "(", "part_idx", ",", "[", "internal_idx", "]", ")", ")", "last_part", "=", "part_idx", "else", ":", "# For unordered, we can just return a dictionary mapping partition to the", "# list of indices being operated on.", "partitions_dict", "=", "{", "}", "for", "part_idx", ",", "internal_idx", "in", "all_partitions_and_idx", ":", "if", "part_idx", "not", "in", "partitions_dict", ":", "partitions_dict", "[", "part_idx", "]", "=", "[", "internal_idx", "]", "else", ":", "partitions_dict", "[", "part_idx", "]", ".", "append", "(", "internal_idx", ")", "return", "partitions_dict" ]
Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices.
[ "Convert", "indices", "to", "a", "dict", "of", "block", "index", "to", "internal", "index", "mapping", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/partition_manager.py#L620-L668
train