repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
modin-project/modin
|
ci/benchmarks/utils.py
|
time_logger
|
def time_logger(name):
"""This logs the time usage of a code block"""
start_time = time.time()
yield
end_time = time.time()
total_time = end_time - start_time
logging.info("%s; time: %ss", name, total_time)
|
python
|
def time_logger(name):
"""This logs the time usage of a code block"""
start_time = time.time()
yield
end_time = time.time()
total_time = end_time - start_time
logging.info("%s; time: %ss", name, total_time)
|
[
"def",
"time_logger",
"(",
"name",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"yield",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"total_time",
"=",
"end_time",
"-",
"start_time",
"logging",
".",
"info",
"(",
"\"%s; time: %ss\"",
",",
"name",
",",
"total_time",
")"
] |
This logs the time usage of a code block
|
[
"This",
"logs",
"the",
"time",
"usage",
"of",
"a",
"code",
"block"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/ci/benchmarks/utils.py#L12-L19
|
train
|
modin-project/modin
|
modin/pandas/__init__.py
|
initialize_ray
|
def initialize_ray():
"""Initializes ray based on environment variables and internal defaults."""
if threading.current_thread().name == "MainThread":
plasma_directory = None
object_store_memory = os.environ.get("MODIN_MEMORY", None)
if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
from tempfile import gettempdir
plasma_directory = gettempdir()
# We may have already set the memory from the environment variable, we don't
# want to overwrite that value if we have.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
# Default to 8x memory for out of core
object_store_memory = 8 * mem_bytes
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
object_store_memory = int(
0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
else:
object_store_memory = int(object_store_memory)
ray.init(
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
)
# Register custom serializer for method objects to avoid warning message.
# We serialize `MethodType` objects when we use AxisPartition operations.
ray.register_custom_serializer(types.MethodType, use_pickle=True)
|
python
|
def initialize_ray():
"""Initializes ray based on environment variables and internal defaults."""
if threading.current_thread().name == "MainThread":
plasma_directory = None
object_store_memory = os.environ.get("MODIN_MEMORY", None)
if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
from tempfile import gettempdir
plasma_directory = gettempdir()
# We may have already set the memory from the environment variable, we don't
# want to overwrite that value if we have.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
# Default to 8x memory for out of core
object_store_memory = 8 * mem_bytes
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
object_store_memory = int(
0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
else:
object_store_memory = int(object_store_memory)
ray.init(
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
)
# Register custom serializer for method objects to avoid warning message.
# We serialize `MethodType` objects when we use AxisPartition operations.
ray.register_custom_serializer(types.MethodType, use_pickle=True)
|
[
"def",
"initialize_ray",
"(",
")",
":",
"if",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
"==",
"\"MainThread\"",
":",
"plasma_directory",
"=",
"None",
"object_store_memory",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"MODIN_MEMORY\"",
",",
"None",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"MODIN_OUT_OF_CORE\"",
",",
"\"False\"",
")",
".",
"title",
"(",
")",
"==",
"\"True\"",
":",
"from",
"tempfile",
"import",
"gettempdir",
"plasma_directory",
"=",
"gettempdir",
"(",
")",
"# We may have already set the memory from the environment variable, we don't",
"# want to overwrite that value if we have.",
"if",
"object_store_memory",
"is",
"None",
":",
"# Round down to the nearest Gigabyte.",
"mem_bytes",
"=",
"ray",
".",
"utils",
".",
"get_system_memory",
"(",
")",
"//",
"10",
"**",
"9",
"*",
"10",
"**",
"9",
"# Default to 8x memory for out of core",
"object_store_memory",
"=",
"8",
"*",
"mem_bytes",
"# In case anything failed above, we can still improve the memory for Modin.",
"if",
"object_store_memory",
"is",
"None",
":",
"# Round down to the nearest Gigabyte.",
"object_store_memory",
"=",
"int",
"(",
"0.6",
"*",
"ray",
".",
"utils",
".",
"get_system_memory",
"(",
")",
"//",
"10",
"**",
"9",
"*",
"10",
"**",
"9",
")",
"# If the memory pool is smaller than 2GB, just use the default in ray.",
"if",
"object_store_memory",
"==",
"0",
":",
"object_store_memory",
"=",
"None",
"else",
":",
"object_store_memory",
"=",
"int",
"(",
"object_store_memory",
")",
"ray",
".",
"init",
"(",
"include_webui",
"=",
"False",
",",
"ignore_reinit_error",
"=",
"True",
",",
"plasma_directory",
"=",
"plasma_directory",
",",
"object_store_memory",
"=",
"object_store_memory",
",",
")",
"# Register custom serializer for method objects to avoid warning message.",
"# We serialize `MethodType` objects when we use AxisPartition operations.",
"ray",
".",
"register_custom_serializer",
"(",
"types",
".",
"MethodType",
",",
"use_pickle",
"=",
"True",
")"
] |
Initializes ray based on environment variables and internal defaults.
|
[
"Initializes",
"ray",
"based",
"on",
"environment",
"variables",
"and",
"internal",
"defaults",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/__init__.py#L133-L168
|
train
|
modin-project/modin
|
modin/engines/dask/pandas_on_dask_delayed/frame/axis_partition.py
|
DaskFrameAxisPartition.apply
|
def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
"""Applies func to the object.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `DaskFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `DaskFramePartition` objects.
"""
import dask
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
DaskFramePartition(dask.delayed(obj))
for obj in deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*dask.compute(
*tuple(
self.list_of_blocks + other_axis_partition.list_of_blocks
)
)
)
]
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(dask.compute(*self.list_of_blocks))
return [
DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)
]
|
python
|
def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
"""Applies func to the object.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `DaskFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `DaskFramePartition` objects.
"""
import dask
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
DaskFramePartition(dask.delayed(obj))
for obj in deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*dask.compute(
*tuple(
self.list_of_blocks + other_axis_partition.list_of_blocks
)
)
)
]
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(dask.compute(*self.list_of_blocks))
return [
DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)
]
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"num_splits",
"=",
"None",
",",
"other_axis_partition",
"=",
"None",
",",
"maintain_partitioning",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"dask",
"if",
"num_splits",
"is",
"None",
":",
"num_splits",
"=",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
"if",
"other_axis_partition",
"is",
"not",
"None",
":",
"return",
"[",
"DaskFramePartition",
"(",
"dask",
".",
"delayed",
"(",
"obj",
")",
")",
"for",
"obj",
"in",
"deploy_func_between_two_axis_partitions",
"(",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
",",
"kwargs",
",",
"*",
"dask",
".",
"compute",
"(",
"*",
"tuple",
"(",
"self",
".",
"list_of_blocks",
"+",
"other_axis_partition",
".",
"list_of_blocks",
")",
")",
")",
"]",
"args",
"=",
"[",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
",",
"maintain_partitioning",
"]",
"args",
".",
"extend",
"(",
"dask",
".",
"compute",
"(",
"*",
"self",
".",
"list_of_blocks",
")",
")",
"return",
"[",
"DaskFramePartition",
"(",
"dask",
".",
"delayed",
"(",
"obj",
")",
")",
"for",
"obj",
"in",
"deploy_axis_func",
"(",
"*",
"args",
")",
"]"
] |
Applies func to the object.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `DaskFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `DaskFramePartition` objects.
|
[
"Applies",
"func",
"to",
"the",
"object",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/dask/pandas_on_dask_delayed/frame/axis_partition.py#L15-L63
|
train
|
modin-project/modin
|
modin/pandas/reshape.py
|
get_dummies
|
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
):
"""Convert categorical variable into indicator variables.
Args:
data (array-like, Series, or DataFrame): data to encode.
prefix (string, [string]): Prefix to apply to each encoded column
label.
prefix_sep (string, [string]): Separator between prefix and value.
dummy_na (bool): Add a column to indicate NaNs.
columns: Which columns to encode.
sparse (bool): Not Implemented: If True, returns SparseDataFrame.
drop_first (bool): Whether to remove the first level of encoded data.
dtype: The dtype for the get_dummies call.
Returns:
DataFrame or one-hot encoded data.
"""
if sparse:
raise NotImplementedError(
"SparseDataFrame is not implemented. "
"To contribute to Modin, please visit "
"github.com/modin-project/modin."
)
if not isinstance(data, DataFrame):
ErrorMessage.default_to_pandas("`get_dummies` on non-DataFrame")
return DataFrame(
pandas.get_dummies(
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
)
else:
new_manager = data._query_compiler.get_dummies(
columns,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
drop_first=drop_first,
dtype=dtype,
)
return DataFrame(query_compiler=new_manager)
|
python
|
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
):
"""Convert categorical variable into indicator variables.
Args:
data (array-like, Series, or DataFrame): data to encode.
prefix (string, [string]): Prefix to apply to each encoded column
label.
prefix_sep (string, [string]): Separator between prefix and value.
dummy_na (bool): Add a column to indicate NaNs.
columns: Which columns to encode.
sparse (bool): Not Implemented: If True, returns SparseDataFrame.
drop_first (bool): Whether to remove the first level of encoded data.
dtype: The dtype for the get_dummies call.
Returns:
DataFrame or one-hot encoded data.
"""
if sparse:
raise NotImplementedError(
"SparseDataFrame is not implemented. "
"To contribute to Modin, please visit "
"github.com/modin-project/modin."
)
if not isinstance(data, DataFrame):
ErrorMessage.default_to_pandas("`get_dummies` on non-DataFrame")
return DataFrame(
pandas.get_dummies(
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
)
else:
new_manager = data._query_compiler.get_dummies(
columns,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
drop_first=drop_first,
dtype=dtype,
)
return DataFrame(query_compiler=new_manager)
|
[
"def",
"get_dummies",
"(",
"data",
",",
"prefix",
"=",
"None",
",",
"prefix_sep",
"=",
"\"_\"",
",",
"dummy_na",
"=",
"False",
",",
"columns",
"=",
"None",
",",
"sparse",
"=",
"False",
",",
"drop_first",
"=",
"False",
",",
"dtype",
"=",
"None",
",",
")",
":",
"if",
"sparse",
":",
"raise",
"NotImplementedError",
"(",
"\"SparseDataFrame is not implemented. \"",
"\"To contribute to Modin, please visit \"",
"\"github.com/modin-project/modin.\"",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"DataFrame",
")",
":",
"ErrorMessage",
".",
"default_to_pandas",
"(",
"\"`get_dummies` on non-DataFrame\"",
")",
"return",
"DataFrame",
"(",
"pandas",
".",
"get_dummies",
"(",
"data",
",",
"prefix",
"=",
"prefix",
",",
"prefix_sep",
"=",
"prefix_sep",
",",
"dummy_na",
"=",
"dummy_na",
",",
"columns",
"=",
"columns",
",",
"sparse",
"=",
"sparse",
",",
"drop_first",
"=",
"drop_first",
",",
"dtype",
"=",
"dtype",
",",
")",
")",
"else",
":",
"new_manager",
"=",
"data",
".",
"_query_compiler",
".",
"get_dummies",
"(",
"columns",
",",
"prefix",
"=",
"prefix",
",",
"prefix_sep",
"=",
"prefix_sep",
",",
"dummy_na",
"=",
"dummy_na",
",",
"drop_first",
"=",
"drop_first",
",",
"dtype",
"=",
"dtype",
",",
")",
"return",
"DataFrame",
"(",
"query_compiler",
"=",
"new_manager",
")"
] |
Convert categorical variable into indicator variables.
Args:
data (array-like, Series, or DataFrame): data to encode.
prefix (string, [string]): Prefix to apply to each encoded column
label.
prefix_sep (string, [string]): Separator between prefix and value.
dummy_na (bool): Add a column to indicate NaNs.
columns: Which columns to encode.
sparse (bool): Not Implemented: If True, returns SparseDataFrame.
drop_first (bool): Whether to remove the first level of encoded data.
dtype: The dtype for the get_dummies call.
Returns:
DataFrame or one-hot encoded data.
|
[
"Convert",
"categorical",
"variable",
"into",
"indicator",
"variables",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/reshape.py#L12-L67
|
train
|
modin-project/modin
|
modin/engines/base/frame/axis_partition.py
|
PandasFrameAxisPartition.apply
|
def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
"""Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PandasOnRayFrameAxisPartition` object to apply to
func with this one.
maintain_partitioning: Whether or not to keep the partitioning in the same
orientation as it was previously. This is important because we may be
operating on an individual AxisPartition and not touching the rest.
In this case, we have to return the partitioning to its previous
orientation (the lengths will remain the same). This is ignored between
two axis partitions.
Returns:
A list of `RayRemotePartition` objects.
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return self._wrap_partitions(
self.deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*tuple(self.list_of_blocks + other_axis_partition.list_of_blocks)
)
)
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
python
|
def apply(
self,
func,
num_splits=None,
other_axis_partition=None,
maintain_partitioning=True,
**kwargs
):
"""Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PandasOnRayFrameAxisPartition` object to apply to
func with this one.
maintain_partitioning: Whether or not to keep the partitioning in the same
orientation as it was previously. This is important because we may be
operating on an individual AxisPartition and not touching the rest.
In this case, we have to return the partitioning to its previous
orientation (the lengths will remain the same). This is ignored between
two axis partitions.
Returns:
A list of `RayRemotePartition` objects.
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return self._wrap_partitions(
self.deploy_func_between_two_axis_partitions(
self.axis,
func,
num_splits,
len(self.list_of_blocks),
kwargs,
*tuple(self.list_of_blocks + other_axis_partition.list_of_blocks)
)
)
args = [self.axis, func, num_splits, kwargs, maintain_partitioning]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"num_splits",
"=",
"None",
",",
"other_axis_partition",
"=",
"None",
",",
"maintain_partitioning",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"num_splits",
"is",
"None",
":",
"num_splits",
"=",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
"if",
"other_axis_partition",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_wrap_partitions",
"(",
"self",
".",
"deploy_func_between_two_axis_partitions",
"(",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
",",
"kwargs",
",",
"*",
"tuple",
"(",
"self",
".",
"list_of_blocks",
"+",
"other_axis_partition",
".",
"list_of_blocks",
")",
")",
")",
"args",
"=",
"[",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
",",
"maintain_partitioning",
"]",
"args",
".",
"extend",
"(",
"self",
".",
"list_of_blocks",
")",
"return",
"self",
".",
"_wrap_partitions",
"(",
"self",
".",
"deploy_axis_func",
"(",
"*",
"args",
")",
")"
] |
Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PandasOnRayFrameAxisPartition` object to apply to
func with this one.
maintain_partitioning: Whether or not to keep the partitioning in the same
orientation as it was previously. This is important because we may be
operating on an individual AxisPartition and not touching the rest.
In this case, we have to return the partitioning to its previous
orientation (the lengths will remain the same). This is ignored between
two axis partitions.
Returns:
A list of `RayRemotePartition` objects.
|
[
"Applies",
"func",
"to",
"the",
"object",
"in",
"the",
"plasma",
"store",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/axis_partition.py#L98-L141
|
train
|
modin-project/modin
|
modin/engines/base/frame/axis_partition.py
|
PandasFrameAxisPartition.shuffle
|
def shuffle(self, func, lengths, **kwargs):
"""Shuffle the order of the data in this axis based on the `lengths`.
Extends `BaseFrameAxisPartition.shuffle`.
Args:
func: The function to apply before splitting.
lengths: The list of partition lengths to split the result into.
Returns:
A list of RemotePartition objects split by `lengths`.
"""
num_splits = len(lengths)
# We add these to kwargs and will pop them off before performing the operation.
kwargs["manual_partition"] = True
kwargs["_lengths"] = lengths
args = [self.axis, func, num_splits, kwargs, False]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
python
|
def shuffle(self, func, lengths, **kwargs):
"""Shuffle the order of the data in this axis based on the `lengths`.
Extends `BaseFrameAxisPartition.shuffle`.
Args:
func: The function to apply before splitting.
lengths: The list of partition lengths to split the result into.
Returns:
A list of RemotePartition objects split by `lengths`.
"""
num_splits = len(lengths)
# We add these to kwargs and will pop them off before performing the operation.
kwargs["manual_partition"] = True
kwargs["_lengths"] = lengths
args = [self.axis, func, num_splits, kwargs, False]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args))
|
[
"def",
"shuffle",
"(",
"self",
",",
"func",
",",
"lengths",
",",
"*",
"*",
"kwargs",
")",
":",
"num_splits",
"=",
"len",
"(",
"lengths",
")",
"# We add these to kwargs and will pop them off before performing the operation.",
"kwargs",
"[",
"\"manual_partition\"",
"]",
"=",
"True",
"kwargs",
"[",
"\"_lengths\"",
"]",
"=",
"lengths",
"args",
"=",
"[",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
",",
"False",
"]",
"args",
".",
"extend",
"(",
"self",
".",
"list_of_blocks",
")",
"return",
"self",
".",
"_wrap_partitions",
"(",
"self",
".",
"deploy_axis_func",
"(",
"*",
"args",
")",
")"
] |
Shuffle the order of the data in this axis based on the `lengths`.
Extends `BaseFrameAxisPartition.shuffle`.
Args:
func: The function to apply before splitting.
lengths: The list of partition lengths to split the result into.
Returns:
A list of RemotePartition objects split by `lengths`.
|
[
"Shuffle",
"the",
"order",
"of",
"the",
"data",
"in",
"this",
"axis",
"based",
"on",
"the",
"lengths",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/axis_partition.py#L143-L161
|
train
|
modin-project/modin
|
modin/engines/base/frame/axis_partition.py
|
PandasFrameAxisPartition.deploy_axis_func
|
def deploy_axis_func(
cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions
):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
# Pop these off first because they aren't expected by the function.
manual_partition = kwargs.pop("manual_partition", False)
lengths = kwargs.pop("_lengths", None)
dataframe = pandas.concat(partitions, axis=axis, copy=False)
result = func(dataframe, **kwargs)
if isinstance(result, pandas.Series):
if num_splits == 1:
return result
return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]
if manual_partition:
# The split function is expecting a list
lengths = list(lengths)
# We set lengths to None so we don't use the old lengths for the resulting partition
# layout. This is done if the number of splits is changing or we are told not to
# keep the old partitioning.
elif num_splits != len(partitions) or not maintain_partitioning:
lengths = None
else:
if axis == 0:
lengths = [len(part) for part in partitions]
if sum(lengths) != len(result):
lengths = None
else:
lengths = [len(part.columns) for part in partitions]
if sum(lengths) != len(result.columns):
lengths = None
return split_result_of_axis_func_pandas(axis, num_splits, result, lengths)
|
python
|
def deploy_axis_func(
cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions
):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
# Pop these off first because they aren't expected by the function.
manual_partition = kwargs.pop("manual_partition", False)
lengths = kwargs.pop("_lengths", None)
dataframe = pandas.concat(partitions, axis=axis, copy=False)
result = func(dataframe, **kwargs)
if isinstance(result, pandas.Series):
if num_splits == 1:
return result
return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]
if manual_partition:
# The split function is expecting a list
lengths = list(lengths)
# We set lengths to None so we don't use the old lengths for the resulting partition
# layout. This is done if the number of splits is changing or we are told not to
# keep the old partitioning.
elif num_splits != len(partitions) or not maintain_partitioning:
lengths = None
else:
if axis == 0:
lengths = [len(part) for part in partitions]
if sum(lengths) != len(result):
lengths = None
else:
lengths = [len(part.columns) for part in partitions]
if sum(lengths) != len(result.columns):
lengths = None
return split_result_of_axis_func_pandas(axis, num_splits, result, lengths)
|
[
"def",
"deploy_axis_func",
"(",
"cls",
",",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
",",
"maintain_partitioning",
",",
"*",
"partitions",
")",
":",
"# Pop these off first because they aren't expected by the function.",
"manual_partition",
"=",
"kwargs",
".",
"pop",
"(",
"\"manual_partition\"",
",",
"False",
")",
"lengths",
"=",
"kwargs",
".",
"pop",
"(",
"\"_lengths\"",
",",
"None",
")",
"dataframe",
"=",
"pandas",
".",
"concat",
"(",
"partitions",
",",
"axis",
"=",
"axis",
",",
"copy",
"=",
"False",
")",
"result",
"=",
"func",
"(",
"dataframe",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"result",
",",
"pandas",
".",
"Series",
")",
":",
"if",
"num_splits",
"==",
"1",
":",
"return",
"result",
"return",
"[",
"result",
"]",
"+",
"[",
"pandas",
".",
"Series",
"(",
"[",
"]",
")",
"for",
"_",
"in",
"range",
"(",
"num_splits",
"-",
"1",
")",
"]",
"if",
"manual_partition",
":",
"# The split function is expecting a list",
"lengths",
"=",
"list",
"(",
"lengths",
")",
"# We set lengths to None so we don't use the old lengths for the resulting partition",
"# layout. This is done if the number of splits is changing or we are told not to",
"# keep the old partitioning.",
"elif",
"num_splits",
"!=",
"len",
"(",
"partitions",
")",
"or",
"not",
"maintain_partitioning",
":",
"lengths",
"=",
"None",
"else",
":",
"if",
"axis",
"==",
"0",
":",
"lengths",
"=",
"[",
"len",
"(",
"part",
")",
"for",
"part",
"in",
"partitions",
"]",
"if",
"sum",
"(",
"lengths",
")",
"!=",
"len",
"(",
"result",
")",
":",
"lengths",
"=",
"None",
"else",
":",
"lengths",
"=",
"[",
"len",
"(",
"part",
".",
"columns",
")",
"for",
"part",
"in",
"partitions",
"]",
"if",
"sum",
"(",
"lengths",
")",
"!=",
"len",
"(",
"result",
".",
"columns",
")",
":",
"lengths",
"=",
"None",
"return",
"split_result_of_axis_func_pandas",
"(",
"axis",
",",
"num_splits",
",",
"result",
",",
"lengths",
")"
] |
Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
|
[
"Deploy",
"a",
"function",
"along",
"a",
"full",
"axis",
"in",
"Ray",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/axis_partition.py#L164-L210
|
train
|
modin-project/modin
|
modin/engines/base/frame/axis_partition.py
|
PandasFrameAxisPartition.deploy_func_between_two_axis_partitions
|
def deploy_func_between_two_axis_partitions(
cls, axis, func, num_splits, len_of_left, kwargs, *partitions
):
"""Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
"""
lt_frame = pandas.concat(list(partitions[:len_of_left]), axis=axis, copy=False)
rt_frame = pandas.concat(list(partitions[len_of_left:]), axis=axis, copy=False)
result = func(lt_frame, rt_frame, **kwargs)
return split_result_of_axis_func_pandas(axis, num_splits, result)
|
python
|
def deploy_func_between_two_axis_partitions(
cls, axis, func, num_splits, len_of_left, kwargs, *partitions
):
"""Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
"""
lt_frame = pandas.concat(list(partitions[:len_of_left]), axis=axis, copy=False)
rt_frame = pandas.concat(list(partitions[len_of_left:]), axis=axis, copy=False)
result = func(lt_frame, rt_frame, **kwargs)
return split_result_of_axis_func_pandas(axis, num_splits, result)
|
[
"def",
"deploy_func_between_two_axis_partitions",
"(",
"cls",
",",
"axis",
",",
"func",
",",
"num_splits",
",",
"len_of_left",
",",
"kwargs",
",",
"*",
"partitions",
")",
":",
"lt_frame",
"=",
"pandas",
".",
"concat",
"(",
"list",
"(",
"partitions",
"[",
":",
"len_of_left",
"]",
")",
",",
"axis",
"=",
"axis",
",",
"copy",
"=",
"False",
")",
"rt_frame",
"=",
"pandas",
".",
"concat",
"(",
"list",
"(",
"partitions",
"[",
"len_of_left",
":",
"]",
")",
",",
"axis",
"=",
"axis",
",",
"copy",
"=",
"False",
")",
"result",
"=",
"func",
"(",
"lt_frame",
",",
"rt_frame",
",",
"*",
"*",
"kwargs",
")",
"return",
"split_result_of_axis_func_pandas",
"(",
"axis",
",",
"num_splits",
",",
"result",
")"
] |
Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
|
[
"Deploy",
"a",
"function",
"along",
"a",
"full",
"axis",
"between",
"two",
"data",
"sets",
"in",
"Ray",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/axis_partition.py#L213-L236
|
train
|
modin-project/modin
|
modin/backends/pyarrow/query_compiler.py
|
PyarrowQueryCompiler.query
|
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
def gen_table_expr(table, expr):
resolver = {
name: FakeSeries(dtype.to_pandas_dtype())
for name, dtype in zip(table.schema.names, table.schema.types)
}
scope = Scope(level=0, resolvers=(resolver,))
return Expr(expr=expr, env=scope)
import pyarrow.gandiva as gandiva
unary_ops = {"~": "not"}
math_calls = {"log": "log", "exp": "exp", "log10": "log10", "cbrt": "cbrt"}
bin_ops = {
"+": "add",
"-": "subtract",
"*": "multiply",
"/": "divide",
"**": "power",
}
cmp_ops = {
"==": "equal",
"!=": "not_equal",
">": "greater_than",
"<": "less_than",
"<=": "less_than_or_equal_to",
">": "greater_than",
">=": "greater_than_or_equal_to",
"like": "like",
}
def build_node(table, terms, builder):
if isinstance(terms, Constant):
return builder.make_literal(
terms.value, (pa.from_numpy_dtype(terms.return_type))
)
if isinstance(terms, Term):
return builder.make_field(table.schema.field_by_name(terms.name))
if isinstance(terms, BinOp):
lnode = build_node(table, terms.lhs, builder)
rnode = build_node(table, terms.rhs, builder)
return_type = pa.from_numpy_dtype(terms.return_type)
if terms.op == "&":
return builder.make_and([lnode, rnode])
if terms.op == "|":
return builder.make_or([lnode, rnode])
if terms.op in cmp_ops:
assert return_type == pa.bool_()
return builder.make_function(
cmp_ops[terms.op], [lnode, rnode], return_type
)
if terms.op in bin_ops:
return builder.make_function(
bin_ops[terms.op], [lnode, rnode], return_type
)
if isinstance(terms, UnaryOp):
return_type = pa.from_numpy_dtype(terms.return_type)
return builder.make_function(
unary_ops[terms.op],
[build_node(table, terms.operand, builder)],
return_type,
)
if isinstance(terms, MathCall):
return_type = pa.from_numpy_dtype(terms.return_type)
childern = [
build_node(table, child, builder) for child in terms.operands
]
return builder.make_function(
math_calls[terms.op], childern, return_type
)
raise TypeError("Unsupported term type: %s" % terms)
def can_be_condition(expr):
if isinstance(expr.terms, BinOp):
if expr.terms.op in cmp_ops or expr.terms.op in ("&", "|"):
return True
elif isinstance(expr.terms, UnaryOp):
if expr.terms.op == "~":
return True
return False
def filter_with_selection_vector(table, s):
record_batch = table.to_batches()[0]
indices = s.to_array() # .to_numpy()
new_columns = [
pa.array(c.to_numpy()[indices]) for c in record_batch.columns
]
return pa.Table.from_arrays(new_columns, record_batch.schema.names)
def gandiva_query(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
sel_vec = filt.evaluate(table.to_batches()[0], pa.default_memory_pool())
result = filter_with_selection_vector(table, sel_vec)
return result
def gandiva_query2(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
return filt
def query_builder(arrow_table, **kwargs):
return gandiva_query(arrow_table, kwargs.get("expr", ""))
kwargs["expr"] = expr
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(
new_data, new_index, self.columns, self._dtype_cache
)
|
python
|
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
def gen_table_expr(table, expr):
resolver = {
name: FakeSeries(dtype.to_pandas_dtype())
for name, dtype in zip(table.schema.names, table.schema.types)
}
scope = Scope(level=0, resolvers=(resolver,))
return Expr(expr=expr, env=scope)
import pyarrow.gandiva as gandiva
unary_ops = {"~": "not"}
math_calls = {"log": "log", "exp": "exp", "log10": "log10", "cbrt": "cbrt"}
bin_ops = {
"+": "add",
"-": "subtract",
"*": "multiply",
"/": "divide",
"**": "power",
}
cmp_ops = {
"==": "equal",
"!=": "not_equal",
">": "greater_than",
"<": "less_than",
"<=": "less_than_or_equal_to",
">": "greater_than",
">=": "greater_than_or_equal_to",
"like": "like",
}
def build_node(table, terms, builder):
if isinstance(terms, Constant):
return builder.make_literal(
terms.value, (pa.from_numpy_dtype(terms.return_type))
)
if isinstance(terms, Term):
return builder.make_field(table.schema.field_by_name(terms.name))
if isinstance(terms, BinOp):
lnode = build_node(table, terms.lhs, builder)
rnode = build_node(table, terms.rhs, builder)
return_type = pa.from_numpy_dtype(terms.return_type)
if terms.op == "&":
return builder.make_and([lnode, rnode])
if terms.op == "|":
return builder.make_or([lnode, rnode])
if terms.op in cmp_ops:
assert return_type == pa.bool_()
return builder.make_function(
cmp_ops[terms.op], [lnode, rnode], return_type
)
if terms.op in bin_ops:
return builder.make_function(
bin_ops[terms.op], [lnode, rnode], return_type
)
if isinstance(terms, UnaryOp):
return_type = pa.from_numpy_dtype(terms.return_type)
return builder.make_function(
unary_ops[terms.op],
[build_node(table, terms.operand, builder)],
return_type,
)
if isinstance(terms, MathCall):
return_type = pa.from_numpy_dtype(terms.return_type)
childern = [
build_node(table, child, builder) for child in terms.operands
]
return builder.make_function(
math_calls[terms.op], childern, return_type
)
raise TypeError("Unsupported term type: %s" % terms)
def can_be_condition(expr):
if isinstance(expr.terms, BinOp):
if expr.terms.op in cmp_ops or expr.terms.op in ("&", "|"):
return True
elif isinstance(expr.terms, UnaryOp):
if expr.terms.op == "~":
return True
return False
def filter_with_selection_vector(table, s):
record_batch = table.to_batches()[0]
indices = s.to_array() # .to_numpy()
new_columns = [
pa.array(c.to_numpy()[indices]) for c in record_batch.columns
]
return pa.Table.from_arrays(new_columns, record_batch.schema.names)
def gandiva_query(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
sel_vec = filt.evaluate(table.to_batches()[0], pa.default_memory_pool())
result = filter_with_selection_vector(table, sel_vec)
return result
def gandiva_query2(table, query):
expr = gen_table_expr(table, query)
if not can_be_condition(expr):
raise ValueError("Root operation should be a filter.")
builder = gandiva.TreeExprBuilder()
root = build_node(table, expr.terms, builder)
cond = builder.make_condition(root)
filt = gandiva.make_filter(table.schema, cond)
return filt
def query_builder(arrow_table, **kwargs):
return gandiva_query(arrow_table, kwargs.get("expr", ""))
kwargs["expr"] = expr
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(
new_data, new_index, self.columns, self._dtype_cache
)
|
[
"def",
"query",
"(",
"self",
",",
"expr",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"gen_table_expr",
"(",
"table",
",",
"expr",
")",
":",
"resolver",
"=",
"{",
"name",
":",
"FakeSeries",
"(",
"dtype",
".",
"to_pandas_dtype",
"(",
")",
")",
"for",
"name",
",",
"dtype",
"in",
"zip",
"(",
"table",
".",
"schema",
".",
"names",
",",
"table",
".",
"schema",
".",
"types",
")",
"}",
"scope",
"=",
"Scope",
"(",
"level",
"=",
"0",
",",
"resolvers",
"=",
"(",
"resolver",
",",
")",
")",
"return",
"Expr",
"(",
"expr",
"=",
"expr",
",",
"env",
"=",
"scope",
")",
"import",
"pyarrow",
".",
"gandiva",
"as",
"gandiva",
"unary_ops",
"=",
"{",
"\"~\"",
":",
"\"not\"",
"}",
"math_calls",
"=",
"{",
"\"log\"",
":",
"\"log\"",
",",
"\"exp\"",
":",
"\"exp\"",
",",
"\"log10\"",
":",
"\"log10\"",
",",
"\"cbrt\"",
":",
"\"cbrt\"",
"}",
"bin_ops",
"=",
"{",
"\"+\"",
":",
"\"add\"",
",",
"\"-\"",
":",
"\"subtract\"",
",",
"\"*\"",
":",
"\"multiply\"",
",",
"\"/\"",
":",
"\"divide\"",
",",
"\"**\"",
":",
"\"power\"",
",",
"}",
"cmp_ops",
"=",
"{",
"\"==\"",
":",
"\"equal\"",
",",
"\"!=\"",
":",
"\"not_equal\"",
",",
"\">\"",
":",
"\"greater_than\"",
",",
"\"<\"",
":",
"\"less_than\"",
",",
"\"<=\"",
":",
"\"less_than_or_equal_to\"",
",",
"\">\"",
":",
"\"greater_than\"",
",",
"\">=\"",
":",
"\"greater_than_or_equal_to\"",
",",
"\"like\"",
":",
"\"like\"",
",",
"}",
"def",
"build_node",
"(",
"table",
",",
"terms",
",",
"builder",
")",
":",
"if",
"isinstance",
"(",
"terms",
",",
"Constant",
")",
":",
"return",
"builder",
".",
"make_literal",
"(",
"terms",
".",
"value",
",",
"(",
"pa",
".",
"from_numpy_dtype",
"(",
"terms",
".",
"return_type",
")",
")",
")",
"if",
"isinstance",
"(",
"terms",
",",
"Term",
")",
":",
"return",
"builder",
".",
"make_field",
"(",
"table",
".",
"schema",
".",
"field_by_name",
"(",
"terms",
".",
"name",
")",
")",
"if",
"isinstance",
"(",
"terms",
",",
"BinOp",
")",
":",
"lnode",
"=",
"build_node",
"(",
"table",
",",
"terms",
".",
"lhs",
",",
"builder",
")",
"rnode",
"=",
"build_node",
"(",
"table",
",",
"terms",
".",
"rhs",
",",
"builder",
")",
"return_type",
"=",
"pa",
".",
"from_numpy_dtype",
"(",
"terms",
".",
"return_type",
")",
"if",
"terms",
".",
"op",
"==",
"\"&\"",
":",
"return",
"builder",
".",
"make_and",
"(",
"[",
"lnode",
",",
"rnode",
"]",
")",
"if",
"terms",
".",
"op",
"==",
"\"|\"",
":",
"return",
"builder",
".",
"make_or",
"(",
"[",
"lnode",
",",
"rnode",
"]",
")",
"if",
"terms",
".",
"op",
"in",
"cmp_ops",
":",
"assert",
"return_type",
"==",
"pa",
".",
"bool_",
"(",
")",
"return",
"builder",
".",
"make_function",
"(",
"cmp_ops",
"[",
"terms",
".",
"op",
"]",
",",
"[",
"lnode",
",",
"rnode",
"]",
",",
"return_type",
")",
"if",
"terms",
".",
"op",
"in",
"bin_ops",
":",
"return",
"builder",
".",
"make_function",
"(",
"bin_ops",
"[",
"terms",
".",
"op",
"]",
",",
"[",
"lnode",
",",
"rnode",
"]",
",",
"return_type",
")",
"if",
"isinstance",
"(",
"terms",
",",
"UnaryOp",
")",
":",
"return_type",
"=",
"pa",
".",
"from_numpy_dtype",
"(",
"terms",
".",
"return_type",
")",
"return",
"builder",
".",
"make_function",
"(",
"unary_ops",
"[",
"terms",
".",
"op",
"]",
",",
"[",
"build_node",
"(",
"table",
",",
"terms",
".",
"operand",
",",
"builder",
")",
"]",
",",
"return_type",
",",
")",
"if",
"isinstance",
"(",
"terms",
",",
"MathCall",
")",
":",
"return_type",
"=",
"pa",
".",
"from_numpy_dtype",
"(",
"terms",
".",
"return_type",
")",
"childern",
"=",
"[",
"build_node",
"(",
"table",
",",
"child",
",",
"builder",
")",
"for",
"child",
"in",
"terms",
".",
"operands",
"]",
"return",
"builder",
".",
"make_function",
"(",
"math_calls",
"[",
"terms",
".",
"op",
"]",
",",
"childern",
",",
"return_type",
")",
"raise",
"TypeError",
"(",
"\"Unsupported term type: %s\"",
"%",
"terms",
")",
"def",
"can_be_condition",
"(",
"expr",
")",
":",
"if",
"isinstance",
"(",
"expr",
".",
"terms",
",",
"BinOp",
")",
":",
"if",
"expr",
".",
"terms",
".",
"op",
"in",
"cmp_ops",
"or",
"expr",
".",
"terms",
".",
"op",
"in",
"(",
"\"&\"",
",",
"\"|\"",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"expr",
".",
"terms",
",",
"UnaryOp",
")",
":",
"if",
"expr",
".",
"terms",
".",
"op",
"==",
"\"~\"",
":",
"return",
"True",
"return",
"False",
"def",
"filter_with_selection_vector",
"(",
"table",
",",
"s",
")",
":",
"record_batch",
"=",
"table",
".",
"to_batches",
"(",
")",
"[",
"0",
"]",
"indices",
"=",
"s",
".",
"to_array",
"(",
")",
"# .to_numpy()\r",
"new_columns",
"=",
"[",
"pa",
".",
"array",
"(",
"c",
".",
"to_numpy",
"(",
")",
"[",
"indices",
"]",
")",
"for",
"c",
"in",
"record_batch",
".",
"columns",
"]",
"return",
"pa",
".",
"Table",
".",
"from_arrays",
"(",
"new_columns",
",",
"record_batch",
".",
"schema",
".",
"names",
")",
"def",
"gandiva_query",
"(",
"table",
",",
"query",
")",
":",
"expr",
"=",
"gen_table_expr",
"(",
"table",
",",
"query",
")",
"if",
"not",
"can_be_condition",
"(",
"expr",
")",
":",
"raise",
"ValueError",
"(",
"\"Root operation should be a filter.\"",
")",
"builder",
"=",
"gandiva",
".",
"TreeExprBuilder",
"(",
")",
"root",
"=",
"build_node",
"(",
"table",
",",
"expr",
".",
"terms",
",",
"builder",
")",
"cond",
"=",
"builder",
".",
"make_condition",
"(",
"root",
")",
"filt",
"=",
"gandiva",
".",
"make_filter",
"(",
"table",
".",
"schema",
",",
"cond",
")",
"sel_vec",
"=",
"filt",
".",
"evaluate",
"(",
"table",
".",
"to_batches",
"(",
")",
"[",
"0",
"]",
",",
"pa",
".",
"default_memory_pool",
"(",
")",
")",
"result",
"=",
"filter_with_selection_vector",
"(",
"table",
",",
"sel_vec",
")",
"return",
"result",
"def",
"gandiva_query2",
"(",
"table",
",",
"query",
")",
":",
"expr",
"=",
"gen_table_expr",
"(",
"table",
",",
"query",
")",
"if",
"not",
"can_be_condition",
"(",
"expr",
")",
":",
"raise",
"ValueError",
"(",
"\"Root operation should be a filter.\"",
")",
"builder",
"=",
"gandiva",
".",
"TreeExprBuilder",
"(",
")",
"root",
"=",
"build_node",
"(",
"table",
",",
"expr",
".",
"terms",
",",
"builder",
")",
"cond",
"=",
"builder",
".",
"make_condition",
"(",
"root",
")",
"filt",
"=",
"gandiva",
".",
"make_filter",
"(",
"table",
".",
"schema",
",",
"cond",
")",
"return",
"filt",
"def",
"query_builder",
"(",
"arrow_table",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"gandiva_query",
"(",
"arrow_table",
",",
"kwargs",
".",
"get",
"(",
"\"expr\"",
",",
"\"\"",
")",
")",
"kwargs",
"[",
"\"expr\"",
"]",
"=",
"expr",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"query_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"1",
",",
"func",
")",
"# Query removes rows, so we need to update the index\r",
"new_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"new_data",
",",
"False",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"self",
".",
"columns",
",",
"self",
".",
"_dtype_cache",
")"
] |
Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
|
[
"Query",
"columns",
"of",
"the",
"DataManager",
"with",
"a",
"boolean",
"expression",
".",
"Args",
":",
"expr",
":",
"Boolean",
"expression",
"to",
"query",
"the",
"columns",
"with",
".",
"Returns",
":",
"DataManager",
"containing",
"the",
"rows",
"where",
"the",
"boolean",
"expression",
"is",
"satisfied",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pyarrow/query_compiler.py#L17-L153
|
train
|
modin-project/modin
|
modin/backends/pyarrow/query_compiler.py
|
PyarrowQueryCompiler.to_pandas
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
dtype_dict = {
col_name: pandas.Series(dtype=self.dtypes[col_name])
for col_name in self.columns
}
df = pandas.DataFrame(dtype_dict, self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
python
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
dtype_dict = {
col_name: pandas.Series(dtype=self.dtypes[col_name])
for col_name in self.columns
}
df = pandas.DataFrame(dtype_dict, self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
[
"def",
"to_pandas",
"(",
"self",
")",
":",
"df",
"=",
"self",
".",
"data",
".",
"to_pandas",
"(",
"is_transposed",
"=",
"self",
".",
"_is_transposed",
")",
"if",
"df",
".",
"empty",
":",
"dtype_dict",
"=",
"{",
"col_name",
":",
"pandas",
".",
"Series",
"(",
"dtype",
"=",
"self",
".",
"dtypes",
"[",
"col_name",
"]",
")",
"for",
"col_name",
"in",
"self",
".",
"columns",
"}",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"dtype_dict",
",",
"self",
".",
"index",
")",
"else",
":",
"ErrorMessage",
".",
"catch_bugs_and_request_email",
"(",
"len",
"(",
"df",
".",
"index",
")",
"!=",
"len",
"(",
"self",
".",
"index",
")",
"or",
"len",
"(",
"df",
".",
"columns",
")",
"!=",
"len",
"(",
"self",
".",
"columns",
")",
")",
"df",
".",
"index",
"=",
"self",
".",
"index",
"df",
".",
"columns",
"=",
"self",
".",
"columns",
"return",
"df"
] |
Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
|
[
"Converts",
"Modin",
"DataFrame",
"to",
"Pandas",
"DataFrame",
".",
"Returns",
":",
"Pandas",
"DataFrame",
"of",
"the",
"DataManager",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pyarrow/query_compiler.py#L174-L193
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py
|
deploy_ray_axis_func
|
def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
table = concat_arrow_table_partitions(axis, partitions)
try:
result = func(table, **kwargs)
except Exception:
result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))
return split_arrow_table_result(
axis, result, len(partitions), num_splits, table.schema.metadata
)
|
python
|
def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
table = concat_arrow_table_partitions(axis, partitions)
try:
result = func(table, **kwargs)
except Exception:
result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))
return split_arrow_table_result(
axis, result, len(partitions), num_splits, table.schema.metadata
)
|
[
"def",
"deploy_ray_axis_func",
"(",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
",",
"*",
"partitions",
")",
":",
"table",
"=",
"concat_arrow_table_partitions",
"(",
"axis",
",",
"partitions",
")",
"try",
":",
"result",
"=",
"func",
"(",
"table",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"result",
"=",
"pyarrow",
".",
"Table",
".",
"from_pandas",
"(",
"func",
"(",
"table",
".",
"to_pandas",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"split_arrow_table_result",
"(",
"axis",
",",
"result",
",",
"len",
"(",
"partitions",
")",
",",
"num_splits",
",",
"table",
".",
"schema",
".",
"metadata",
")"
] |
Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
|
[
"Deploy",
"a",
"function",
"along",
"a",
"full",
"axis",
"in",
"Ray",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py#L140-L161
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py
|
deploy_ray_func_between_two_axis_partitions
|
def deploy_ray_func_between_two_axis_partitions(
axis, func, num_splits, len_of_left, kwargs, *partitions
):
"""Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
"""
lt_table = concat_arrow_table_partitions(axis, partitions[:len_of_left])
rt_table = concat_arrow_table_partitions(axis, partitions[len_of_left:])
try:
result = func(lt_table, rt_table, **kwargs)
except Exception:
lt_frame = lt_table.from_pandas()
rt_frame = rt_table.from_pandas()
result = pyarrow.Table.from_pandas(func(lt_frame, rt_frame, **kwargs))
return split_arrow_table_result(
axis, result, len(result.num_rows), num_splits, result.schema.metadata
)
|
python
|
def deploy_ray_func_between_two_axis_partitions(
axis, func, num_splits, len_of_left, kwargs, *partitions
):
"""Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
"""
lt_table = concat_arrow_table_partitions(axis, partitions[:len_of_left])
rt_table = concat_arrow_table_partitions(axis, partitions[len_of_left:])
try:
result = func(lt_table, rt_table, **kwargs)
except Exception:
lt_frame = lt_table.from_pandas()
rt_frame = rt_table.from_pandas()
result = pyarrow.Table.from_pandas(func(lt_frame, rt_frame, **kwargs))
return split_arrow_table_result(
axis, result, len(result.num_rows), num_splits, result.schema.metadata
)
|
[
"def",
"deploy_ray_func_between_two_axis_partitions",
"(",
"axis",
",",
"func",
",",
"num_splits",
",",
"len_of_left",
",",
"kwargs",
",",
"*",
"partitions",
")",
":",
"lt_table",
"=",
"concat_arrow_table_partitions",
"(",
"axis",
",",
"partitions",
"[",
":",
"len_of_left",
"]",
")",
"rt_table",
"=",
"concat_arrow_table_partitions",
"(",
"axis",
",",
"partitions",
"[",
"len_of_left",
":",
"]",
")",
"try",
":",
"result",
"=",
"func",
"(",
"lt_table",
",",
"rt_table",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"lt_frame",
"=",
"lt_table",
".",
"from_pandas",
"(",
")",
"rt_frame",
"=",
"rt_table",
".",
"from_pandas",
"(",
")",
"result",
"=",
"pyarrow",
".",
"Table",
".",
"from_pandas",
"(",
"func",
"(",
"lt_frame",
",",
"rt_frame",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"split_arrow_table_result",
"(",
"axis",
",",
"result",
",",
"len",
"(",
"result",
".",
"num_rows",
")",
",",
"num_splits",
",",
"result",
".",
"schema",
".",
"metadata",
")"
] |
Deploy a function along a full axis between two data sets in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`).
len_of_left: The number of values in `partitions` that belong to the
left data set.
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or column)
for both data sets.
Returns:
A list of Pandas DataFrames.
|
[
"Deploy",
"a",
"function",
"along",
"a",
"full",
"axis",
"between",
"two",
"data",
"sets",
"in",
"Ray",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py#L165-L194
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py
|
PyarrowOnRayFrameAxisPartition.apply
|
def apply(self, func, num_splits=None, other_axis_partition=None, **kwargs):
"""Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `RayRemotePartition` objects.
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_func_between_two_axis_partitions._remote(
args=(self.axis, func, num_splits, len(self.list_of_blocks), kwargs)
+ tuple(self.list_of_blocks + other_axis_partition.list_of_blocks),
num_return_vals=num_splits,
)
]
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
python
|
def apply(self, func, num_splits=None, other_axis_partition=None, **kwargs):
"""Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `RayRemotePartition` objects.
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
if other_axis_partition is not None:
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_func_between_two_axis_partitions._remote(
args=(self.axis, func, num_splits, len(self.list_of_blocks), kwargs)
+ tuple(self.list_of_blocks + other_axis_partition.list_of_blocks),
num_return_vals=num_splits,
)
]
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"num_splits",
"=",
"None",
",",
"other_axis_partition",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"num_splits",
"is",
"None",
":",
"num_splits",
"=",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
"if",
"other_axis_partition",
"is",
"not",
"None",
":",
"return",
"[",
"PyarrowOnRayFramePartition",
"(",
"obj",
")",
"for",
"obj",
"in",
"deploy_ray_func_between_two_axis_partitions",
".",
"_remote",
"(",
"args",
"=",
"(",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
",",
"kwargs",
")",
"+",
"tuple",
"(",
"self",
".",
"list_of_blocks",
"+",
"other_axis_partition",
".",
"list_of_blocks",
")",
",",
"num_return_vals",
"=",
"num_splits",
",",
")",
"]",
"args",
"=",
"[",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
"]",
"args",
".",
"extend",
"(",
"self",
".",
"list_of_blocks",
")",
"return",
"[",
"PyarrowOnRayFramePartition",
"(",
"obj",
")",
"for",
"obj",
"in",
"deploy_ray_axis_func",
".",
"_remote",
"(",
"args",
",",
"num_return_vals",
"=",
"num_splits",
")",
"]"
] |
Applies func to the object in the plasma store.
See notes in Parent class about this method.
Args:
func: The function to apply.
num_splits: The number of times to split the result object.
other_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to
func with this one.
Returns:
A list of `RayRemotePartition` objects.
|
[
"Applies",
"func",
"to",
"the",
"object",
"in",
"the",
"plasma",
"store",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py#L16-L48
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py
|
PyarrowOnRayFrameAxisPartition.shuffle
|
def shuffle(self, func, num_splits=None, **kwargs):
"""Shuffle the order of the data in this axis based on the `func`.
Extends `BaseFrameAxisPartition.shuffle`.
:param func:
:param num_splits:
:param kwargs:
:return:
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
python
|
def shuffle(self, func, num_splits=None, **kwargs):
"""Shuffle the order of the data in this axis based on the `func`.
Extends `BaseFrameAxisPartition.shuffle`.
:param func:
:param num_splits:
:param kwargs:
:return:
"""
if num_splits is None:
num_splits = len(self.list_of_blocks)
args = [self.axis, func, num_splits, kwargs]
args.extend(self.list_of_blocks)
return [
PyarrowOnRayFramePartition(obj)
for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)
]
|
[
"def",
"shuffle",
"(",
"self",
",",
"func",
",",
"num_splits",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"num_splits",
"is",
"None",
":",
"num_splits",
"=",
"len",
"(",
"self",
".",
"list_of_blocks",
")",
"args",
"=",
"[",
"self",
".",
"axis",
",",
"func",
",",
"num_splits",
",",
"kwargs",
"]",
"args",
".",
"extend",
"(",
"self",
".",
"list_of_blocks",
")",
"return",
"[",
"PyarrowOnRayFramePartition",
"(",
"obj",
")",
"for",
"obj",
"in",
"deploy_ray_axis_func",
".",
"_remote",
"(",
"args",
",",
"num_return_vals",
"=",
"num_splits",
")",
"]"
] |
Shuffle the order of the data in this axis based on the `func`.
Extends `BaseFrameAxisPartition.shuffle`.
:param func:
:param num_splits:
:param kwargs:
:return:
|
[
"Shuffle",
"the",
"order",
"of",
"the",
"data",
"in",
"this",
"axis",
"based",
"on",
"the",
"func",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/axis_partition.py#L50-L68
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/partition.py
|
deploy_ray_func
|
def deploy_ray_func(func, partition, kwargs):
"""Deploy a function to a partition in Ray.
Args:
func: The function to apply.
partition: The partition to apply the function to.
kwargs: A dictionary of keyword arguments for the function.
Returns:
The result of the function.
"""
try:
result = func(partition, **kwargs)
# Sometimes Arrow forces us to make a copy of an object before we operate
# on it. We don't want the error to propagate to the user, and we want to
# avoid copying unless we absolutely have to.
except Exception:
result = func(partition.to_pandas(), **kwargs)
if isinstance(result, pandas.Series):
result = pandas.DataFrame(result).T
if isinstance(result, pandas.DataFrame):
return pyarrow.Table.from_pandas(result)
return result
|
python
|
def deploy_ray_func(func, partition, kwargs):
"""Deploy a function to a partition in Ray.
Args:
func: The function to apply.
partition: The partition to apply the function to.
kwargs: A dictionary of keyword arguments for the function.
Returns:
The result of the function.
"""
try:
result = func(partition, **kwargs)
# Sometimes Arrow forces us to make a copy of an object before we operate
# on it. We don't want the error to propagate to the user, and we want to
# avoid copying unless we absolutely have to.
except Exception:
result = func(partition.to_pandas(), **kwargs)
if isinstance(result, pandas.Series):
result = pandas.DataFrame(result).T
if isinstance(result, pandas.DataFrame):
return pyarrow.Table.from_pandas(result)
return result
|
[
"def",
"deploy_ray_func",
"(",
"func",
",",
"partition",
",",
"kwargs",
")",
":",
"try",
":",
"result",
"=",
"func",
"(",
"partition",
",",
"*",
"*",
"kwargs",
")",
"# Sometimes Arrow forces us to make a copy of an object before we operate",
"# on it. We don't want the error to propagate to the user, and we want to",
"# avoid copying unless we absolutely have to.",
"except",
"Exception",
":",
"result",
"=",
"func",
"(",
"partition",
".",
"to_pandas",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"result",
",",
"pandas",
".",
"Series",
")",
":",
"result",
"=",
"pandas",
".",
"DataFrame",
"(",
"result",
")",
".",
"T",
"if",
"isinstance",
"(",
"result",
",",
"pandas",
".",
"DataFrame",
")",
":",
"return",
"pyarrow",
".",
"Table",
".",
"from_pandas",
"(",
"result",
")",
"return",
"result"
] |
Deploy a function to a partition in Ray.
Args:
func: The function to apply.
partition: The partition to apply the function to.
kwargs: A dictionary of keyword arguments for the function.
Returns:
The result of the function.
|
[
"Deploy",
"a",
"function",
"to",
"a",
"partition",
"in",
"Ray",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/partition.py#L120-L142
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/partition.py
|
PyarrowOnRayFramePartition.get
|
def get(self):
"""Gets the object out of the plasma store.
Returns:
The object from the plasma store.
"""
if len(self.call_queue):
return self.apply(lambda x: x).get()
return ray.get(self.oid)
|
python
|
def get(self):
"""Gets the object out of the plasma store.
Returns:
The object from the plasma store.
"""
if len(self.call_queue):
return self.apply(lambda x: x).get()
return ray.get(self.oid)
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"call_queue",
")",
":",
"return",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
")",
".",
"get",
"(",
")",
"return",
"ray",
".",
"get",
"(",
"self",
".",
"oid",
")"
] |
Gets the object out of the plasma store.
Returns:
The object from the plasma store.
|
[
"Gets",
"the",
"object",
"out",
"of",
"the",
"plasma",
"store",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/partition.py#L19-L28
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/partition.py
|
PyarrowOnRayFramePartition.apply
|
def apply(self, func, **kwargs):
"""Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object.
"""
oid = self.oid
self.call_queue.append((func, kwargs))
def call_queue_closure(oid_obj, call_queues):
for func, kwargs in call_queues:
if isinstance(func, ray.ObjectID):
func = ray.get(func)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
oid_obj = func(oid_obj, **kwargs)
return oid_obj
oid = deploy_ray_func.remote(
call_queue_closure, oid, kwargs={"call_queues": self.call_queue}
)
self.call_queue = []
return PyarrowOnRayFramePartition(oid)
|
python
|
def apply(self, func, **kwargs):
"""Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object.
"""
oid = self.oid
self.call_queue.append((func, kwargs))
def call_queue_closure(oid_obj, call_queues):
for func, kwargs in call_queues:
if isinstance(func, ray.ObjectID):
func = ray.get(func)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
oid_obj = func(oid_obj, **kwargs)
return oid_obj
oid = deploy_ray_func.remote(
call_queue_closure, oid, kwargs={"call_queues": self.call_queue}
)
self.call_queue = []
return PyarrowOnRayFramePartition(oid)
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
":",
"oid",
"=",
"self",
".",
"oid",
"self",
".",
"call_queue",
".",
"append",
"(",
"(",
"func",
",",
"kwargs",
")",
")",
"def",
"call_queue_closure",
"(",
"oid_obj",
",",
"call_queues",
")",
":",
"for",
"func",
",",
"kwargs",
"in",
"call_queues",
":",
"if",
"isinstance",
"(",
"func",
",",
"ray",
".",
"ObjectID",
")",
":",
"func",
"=",
"ray",
".",
"get",
"(",
"func",
")",
"if",
"isinstance",
"(",
"kwargs",
",",
"ray",
".",
"ObjectID",
")",
":",
"kwargs",
"=",
"ray",
".",
"get",
"(",
"kwargs",
")",
"oid_obj",
"=",
"func",
"(",
"oid_obj",
",",
"*",
"*",
"kwargs",
")",
"return",
"oid_obj",
"oid",
"=",
"deploy_ray_func",
".",
"remote",
"(",
"call_queue_closure",
",",
"oid",
",",
"kwargs",
"=",
"{",
"\"call_queues\"",
":",
"self",
".",
"call_queue",
"}",
")",
"self",
".",
"call_queue",
"=",
"[",
"]",
"return",
"PyarrowOnRayFramePartition",
"(",
"oid",
")"
] |
Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object.
|
[
"Apply",
"a",
"function",
"to",
"the",
"object",
"stored",
"in",
"this",
"partition",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/partition.py#L30-L62
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/partition.py
|
PyarrowOnRayFramePartition.to_pandas
|
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
"""
dataframe = self.get().to_pandas()
assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
return dataframe
|
python
|
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
"""
dataframe = self.get().to_pandas()
assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
return dataframe
|
[
"def",
"to_pandas",
"(",
"self",
")",
":",
"dataframe",
"=",
"self",
".",
"get",
"(",
")",
".",
"to_pandas",
"(",
")",
"assert",
"type",
"(",
"dataframe",
")",
"is",
"pandas",
".",
"DataFrame",
"or",
"type",
"(",
"dataframe",
")",
"is",
"pandas",
".",
"Series",
"return",
"dataframe"
] |
Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
|
[
"Convert",
"the",
"object",
"stored",
"in",
"this",
"partition",
"to",
"a",
"Pandas",
"DataFrame",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/partition.py#L71-L80
|
train
|
modin-project/modin
|
modin/experimental/engines/pyarrow_on_ray/frame/partition.py
|
PyarrowOnRayFramePartition.put
|
def put(cls, obj):
"""Put an object in the Plasma store and wrap it in this object.
Args:
obj: The object to be put.
Returns:
A `RayRemotePartition` object.
"""
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
|
python
|
def put(cls, obj):
"""Put an object in the Plasma store and wrap it in this object.
Args:
obj: The object to be put.
Returns:
A `RayRemotePartition` object.
"""
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
|
[
"def",
"put",
"(",
"cls",
",",
"obj",
")",
":",
"return",
"PyarrowOnRayFramePartition",
"(",
"ray",
".",
"put",
"(",
"pyarrow",
".",
"Table",
".",
"from_pandas",
"(",
"obj",
")",
")",
")"
] |
Put an object in the Plasma store and wrap it in this object.
Args:
obj: The object to be put.
Returns:
A `RayRemotePartition` object.
|
[
"Put",
"an",
"object",
"in",
"the",
"Plasma",
"store",
"and",
"wrap",
"it",
"in",
"this",
"object",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pyarrow_on_ray/frame/partition.py#L83-L92
|
train
|
modin-project/modin
|
modin/pandas/general.py
|
isna
|
def isna(obj):
"""
Detect missing values for an array-like object.
Args:
obj: Object to check for null or missing values.
Returns:
bool or array-like of bool
"""
if isinstance(obj, BasePandasDataset):
return obj.isna()
else:
return pandas.isna(obj)
|
python
|
def isna(obj):
"""
Detect missing values for an array-like object.
Args:
obj: Object to check for null or missing values.
Returns:
bool or array-like of bool
"""
if isinstance(obj, BasePandasDataset):
return obj.isna()
else:
return pandas.isna(obj)
|
[
"def",
"isna",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"BasePandasDataset",
")",
":",
"return",
"obj",
".",
"isna",
"(",
")",
"else",
":",
"return",
"pandas",
".",
"isna",
"(",
"obj",
")"
] |
Detect missing values for an array-like object.
Args:
obj: Object to check for null or missing values.
Returns:
bool or array-like of bool
|
[
"Detect",
"missing",
"values",
"for",
"an",
"array",
"-",
"like",
"object",
".",
"Args",
":",
"obj",
":",
"Object",
"to",
"check",
"for",
"null",
"or",
"missing",
"values",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/general.py#L13-L25
|
train
|
modin-project/modin
|
modin/pandas/general.py
|
merge
|
def merge(
left,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
left: DataFrame.
right: DataFrame.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(left, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type {}".format(type(right))
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
|
python
|
def merge(
left,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
left: DataFrame.
right: DataFrame.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(left, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type {}".format(type(right))
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
|
[
"def",
"merge",
"(",
"left",
",",
"right",
",",
"how",
"=",
"\"inner\"",
",",
"on",
"=",
"None",
",",
"left_on",
"=",
"None",
",",
"right_on",
"=",
"None",
",",
"left_index",
"=",
"False",
",",
"right_index",
"=",
"False",
",",
"sort",
"=",
"False",
",",
"suffixes",
"=",
"(",
"\"_x\"",
",",
"\"_y\"",
")",
",",
"copy",
"=",
"True",
",",
"indicator",
"=",
"False",
",",
"validate",
"=",
"None",
",",
")",
":",
"if",
"not",
"isinstance",
"(",
"left",
",",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"can not merge DataFrame with instance of type {}\"",
".",
"format",
"(",
"type",
"(",
"right",
")",
")",
")",
"return",
"left",
".",
"merge",
"(",
"right",
",",
"how",
"=",
"how",
",",
"on",
"=",
"on",
",",
"left_on",
"=",
"left_on",
",",
"right_on",
"=",
"right_on",
",",
"left_index",
"=",
"left_index",
",",
"right_index",
"=",
"right_index",
",",
"sort",
"=",
"sort",
",",
"suffixes",
"=",
"suffixes",
",",
"copy",
"=",
"copy",
",",
"indicator",
"=",
"indicator",
",",
"validate",
"=",
"validate",
",",
")"
] |
Database style join, where common columns in "on" are merged.
Args:
left: DataFrame.
right: DataFrame.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
|
[
"Database",
"style",
"join",
"where",
"common",
"columns",
"in",
"on",
"are",
"merged",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/general.py#L41-L97
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
is_distributed
|
def is_distributed(partition_column, lower_bound, upper_bound):
""" Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
"""
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
)
|
python
|
def is_distributed(partition_column, lower_bound, upper_bound):
""" Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
"""
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
)
|
[
"def",
"is_distributed",
"(",
"partition_column",
",",
"lower_bound",
",",
"upper_bound",
")",
":",
"if",
"(",
"(",
"partition_column",
"is",
"not",
"None",
")",
"and",
"(",
"lower_bound",
"is",
"not",
"None",
")",
"and",
"(",
"upper_bound",
"is",
"not",
"None",
")",
")",
":",
"if",
"upper_bound",
">",
"lower_bound",
":",
"return",
"True",
"else",
":",
"raise",
"InvalidArguments",
"(",
"\"upper_bound must be greater than lower_bound.\"",
")",
"elif",
"(",
"partition_column",
"is",
"None",
")",
"and",
"(",
"lower_bound",
"is",
"None",
")",
"and",
"(",
"upper_bound",
"is",
"None",
")",
":",
"return",
"False",
"else",
":",
"raise",
"InvalidArguments",
"(",
"\"Invalid combination of partition_column, lower_bound, upper_bound.\"",
"\"All these arguments should be passed (distributed) or none of them (standard pandas).\"",
")"
] |
Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
|
[
"Check",
"if",
"is",
"possible",
"distribute",
"a",
"query",
"given",
"that",
"args"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L5-L31
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
is_table
|
def is_table(engine, sql):
""" Check with the given sql arg is query or table
Args:
engine: SQLAlchemy connection engine
sql: SQL query or table name
Returns:
True for table or False if not
"""
if engine.dialect.has_table(engine, sql):
return True
return False
|
python
|
def is_table(engine, sql):
""" Check with the given sql arg is query or table
Args:
engine: SQLAlchemy connection engine
sql: SQL query or table name
Returns:
True for table or False if not
"""
if engine.dialect.has_table(engine, sql):
return True
return False
|
[
"def",
"is_table",
"(",
"engine",
",",
"sql",
")",
":",
"if",
"engine",
".",
"dialect",
".",
"has_table",
"(",
"engine",
",",
"sql",
")",
":",
"return",
"True",
"return",
"False"
] |
Check with the given sql arg is query or table
Args:
engine: SQLAlchemy connection engine
sql: SQL query or table name
Returns:
True for table or False if not
|
[
"Check",
"with",
"the",
"given",
"sql",
"arg",
"is",
"query",
"or",
"table"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L34-L46
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
get_table_metadata
|
def get_table_metadata(engine, table):
""" Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
"""
metadata = MetaData()
metadata.reflect(bind=engine, only=[table])
table_metadata = Table(table, metadata, autoload=True)
return table_metadata
|
python
|
def get_table_metadata(engine, table):
""" Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
"""
metadata = MetaData()
metadata.reflect(bind=engine, only=[table])
table_metadata = Table(table, metadata, autoload=True)
return table_metadata
|
[
"def",
"get_table_metadata",
"(",
"engine",
",",
"table",
")",
":",
"metadata",
"=",
"MetaData",
"(",
")",
"metadata",
".",
"reflect",
"(",
"bind",
"=",
"engine",
",",
"only",
"=",
"[",
"table",
"]",
")",
"table_metadata",
"=",
"Table",
"(",
"table",
",",
"metadata",
",",
"autoload",
"=",
"True",
")",
"return",
"table_metadata"
] |
Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
|
[
"Extract",
"all",
"useful",
"infos",
"from",
"the",
"given",
"table"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L49-L62
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
get_table_columns
|
def get_table_columns(metadata):
""" Extract columns names and python typos from metadata
Args:
metadata: Table metadata
Returns:
dict with columns names and python types
"""
cols = OrderedDict()
for col in metadata.c:
name = str(col).rpartition(".")[2]
cols[name] = col.type.python_type.__name__
return cols
|
python
|
def get_table_columns(metadata):
""" Extract columns names and python typos from metadata
Args:
metadata: Table metadata
Returns:
dict with columns names and python types
"""
cols = OrderedDict()
for col in metadata.c:
name = str(col).rpartition(".")[2]
cols[name] = col.type.python_type.__name__
return cols
|
[
"def",
"get_table_columns",
"(",
"metadata",
")",
":",
"cols",
"=",
"OrderedDict",
"(",
")",
"for",
"col",
"in",
"metadata",
".",
"c",
":",
"name",
"=",
"str",
"(",
"col",
")",
".",
"rpartition",
"(",
"\".\"",
")",
"[",
"2",
"]",
"cols",
"[",
"name",
"]",
"=",
"col",
".",
"type",
".",
"python_type",
".",
"__name__",
"return",
"cols"
] |
Extract columns names and python typos from metadata
Args:
metadata: Table metadata
Returns:
dict with columns names and python types
|
[
"Extract",
"columns",
"names",
"and",
"python",
"typos",
"from",
"metadata"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L65-L78
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
check_query
|
def check_query(query):
""" Check query sanity
Args:
query: query string
Returns:
None
"""
q = query.lower()
if "select " not in q:
raise InvalidQuery("SELECT word not found in the query: {0}".format(query))
if " from " not in q:
raise InvalidQuery("FROM word not found in the query: {0}".format(query))
|
python
|
def check_query(query):
""" Check query sanity
Args:
query: query string
Returns:
None
"""
q = query.lower()
if "select " not in q:
raise InvalidQuery("SELECT word not found in the query: {0}".format(query))
if " from " not in q:
raise InvalidQuery("FROM word not found in the query: {0}".format(query))
|
[
"def",
"check_query",
"(",
"query",
")",
":",
"q",
"=",
"query",
".",
"lower",
"(",
")",
"if",
"\"select \"",
"not",
"in",
"q",
":",
"raise",
"InvalidQuery",
"(",
"\"SELECT word not found in the query: {0}\"",
".",
"format",
"(",
"query",
")",
")",
"if",
"\" from \"",
"not",
"in",
"q",
":",
"raise",
"InvalidQuery",
"(",
"\"FROM word not found in the query: {0}\"",
".",
"format",
"(",
"query",
")",
")"
] |
Check query sanity
Args:
query: query string
Returns:
None
|
[
"Check",
"query",
"sanity"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L93-L106
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
get_query_columns
|
def get_query_columns(engine, query):
""" Extract columns names and python typos from query
Args:
engine: SQLAlchemy connection engine
query: SQL query
Returns:
dict with columns names and python types
"""
con = engine.connect()
result = con.execute(query).fetchone()
values = list(result)
cols_names = result.keys()
cols = OrderedDict()
for i in range(len(cols_names)):
cols[cols_names[i]] = type(values[i]).__name__
return cols
|
python
|
def get_query_columns(engine, query):
""" Extract columns names and python typos from query
Args:
engine: SQLAlchemy connection engine
query: SQL query
Returns:
dict with columns names and python types
"""
con = engine.connect()
result = con.execute(query).fetchone()
values = list(result)
cols_names = result.keys()
cols = OrderedDict()
for i in range(len(cols_names)):
cols[cols_names[i]] = type(values[i]).__name__
return cols
|
[
"def",
"get_query_columns",
"(",
"engine",
",",
"query",
")",
":",
"con",
"=",
"engine",
".",
"connect",
"(",
")",
"result",
"=",
"con",
".",
"execute",
"(",
"query",
")",
".",
"fetchone",
"(",
")",
"values",
"=",
"list",
"(",
"result",
")",
"cols_names",
"=",
"result",
".",
"keys",
"(",
")",
"cols",
"=",
"OrderedDict",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"cols_names",
")",
")",
":",
"cols",
"[",
"cols_names",
"[",
"i",
"]",
"]",
"=",
"type",
"(",
"values",
"[",
"i",
"]",
")",
".",
"__name__",
"return",
"cols"
] |
Extract columns names and python typos from query
Args:
engine: SQLAlchemy connection engine
query: SQL query
Returns:
dict with columns names and python types
|
[
"Extract",
"columns",
"names",
"and",
"python",
"typos",
"from",
"query"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L109-L126
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
check_partition_column
|
def check_partition_column(partition_column, cols):
""" Check partition_column existence and type
Args:
partition_column: partition_column name
cols: dict with columns names and python types
Returns:
None
"""
for k, v in cols.items():
if k == partition_column:
if v == "int":
return
else:
raise InvalidPartitionColumn(
"partition_column must be int, and not {0}".format(v)
)
raise InvalidPartitionColumn(
"partition_column {0} not found in the query".format(partition_column)
)
|
python
|
def check_partition_column(partition_column, cols):
""" Check partition_column existence and type
Args:
partition_column: partition_column name
cols: dict with columns names and python types
Returns:
None
"""
for k, v in cols.items():
if k == partition_column:
if v == "int":
return
else:
raise InvalidPartitionColumn(
"partition_column must be int, and not {0}".format(v)
)
raise InvalidPartitionColumn(
"partition_column {0} not found in the query".format(partition_column)
)
|
[
"def",
"check_partition_column",
"(",
"partition_column",
",",
"cols",
")",
":",
"for",
"k",
",",
"v",
"in",
"cols",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"partition_column",
":",
"if",
"v",
"==",
"\"int\"",
":",
"return",
"else",
":",
"raise",
"InvalidPartitionColumn",
"(",
"\"partition_column must be int, and not {0}\"",
".",
"format",
"(",
"v",
")",
")",
"raise",
"InvalidPartitionColumn",
"(",
"\"partition_column {0} not found in the query\"",
".",
"format",
"(",
"partition_column",
")",
")"
] |
Check partition_column existence and type
Args:
partition_column: partition_column name
cols: dict with columns names and python types
Returns:
None
|
[
"Check",
"partition_column",
"existence",
"and",
"type"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L129-L149
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
get_query_info
|
def get_query_info(sql, con, partition_column):
""" Return a columns name list and the query string
Args:
sql: SQL query or table name
con: database connection or url string
partition_column: column used to share the data between the workers
Returns:
Columns name list and query string
"""
engine = create_engine(con)
if is_table(engine, sql):
table_metadata = get_table_metadata(engine, sql)
query = build_query_from_table(sql)
cols = get_table_columns(table_metadata)
else:
check_query(sql)
query = sql.replace(";", "")
cols = get_query_columns(engine, query)
# TODO allow validation that takes into account edge cases of pandas e.g. "[index]"
# check_partition_column(partition_column, cols)
cols_names = list(cols.keys())
return cols_names, query
|
python
|
def get_query_info(sql, con, partition_column):
""" Return a columns name list and the query string
Args:
sql: SQL query or table name
con: database connection or url string
partition_column: column used to share the data between the workers
Returns:
Columns name list and query string
"""
engine = create_engine(con)
if is_table(engine, sql):
table_metadata = get_table_metadata(engine, sql)
query = build_query_from_table(sql)
cols = get_table_columns(table_metadata)
else:
check_query(sql)
query = sql.replace(";", "")
cols = get_query_columns(engine, query)
# TODO allow validation that takes into account edge cases of pandas e.g. "[index]"
# check_partition_column(partition_column, cols)
cols_names = list(cols.keys())
return cols_names, query
|
[
"def",
"get_query_info",
"(",
"sql",
",",
"con",
",",
"partition_column",
")",
":",
"engine",
"=",
"create_engine",
"(",
"con",
")",
"if",
"is_table",
"(",
"engine",
",",
"sql",
")",
":",
"table_metadata",
"=",
"get_table_metadata",
"(",
"engine",
",",
"sql",
")",
"query",
"=",
"build_query_from_table",
"(",
"sql",
")",
"cols",
"=",
"get_table_columns",
"(",
"table_metadata",
")",
"else",
":",
"check_query",
"(",
"sql",
")",
"query",
"=",
"sql",
".",
"replace",
"(",
"\";\"",
",",
"\"\"",
")",
"cols",
"=",
"get_query_columns",
"(",
"engine",
",",
"query",
")",
"# TODO allow validation that takes into account edge cases of pandas e.g. \"[index]\"",
"# check_partition_column(partition_column, cols)",
"cols_names",
"=",
"list",
"(",
"cols",
".",
"keys",
"(",
")",
")",
"return",
"cols_names",
",",
"query"
] |
Return a columns name list and the query string
Args:
sql: SQL query or table name
con: database connection or url string
partition_column: column used to share the data between the workers
Returns:
Columns name list and query string
|
[
"Return",
"a",
"columns",
"name",
"list",
"and",
"the",
"query",
"string"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L152-L175
|
train
|
modin-project/modin
|
modin/experimental/engines/pandas_on_ray/sql.py
|
query_put_bounders
|
def query_put_bounders(query, partition_column, start, end):
""" Put bounders in the query
Args:
query: SQL query string
partition_column: partition_column name
start: lower_bound
end: upper_bound
Returns:
Query with bounders
"""
where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}".format(
partition_column, start, end
)
query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}".format(query, where)
return query_with_bounders
|
python
|
def query_put_bounders(query, partition_column, start, end):
""" Put bounders in the query
Args:
query: SQL query string
partition_column: partition_column name
start: lower_bound
end: upper_bound
Returns:
Query with bounders
"""
where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}".format(
partition_column, start, end
)
query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}".format(query, where)
return query_with_bounders
|
[
"def",
"query_put_bounders",
"(",
"query",
",",
"partition_column",
",",
"start",
",",
"end",
")",
":",
"where",
"=",
"\" WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}\"",
".",
"format",
"(",
"partition_column",
",",
"start",
",",
"end",
")",
"query_with_bounders",
"=",
"\"SELECT * FROM ({0}) AS TMP_TABLE {1}\"",
".",
"format",
"(",
"query",
",",
"where",
")",
"return",
"query_with_bounders"
] |
Put bounders in the query
Args:
query: SQL query string
partition_column: partition_column name
start: lower_bound
end: upper_bound
Returns:
Query with bounders
|
[
"Put",
"bounders",
"in",
"the",
"query"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L178-L194
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.compute_index
|
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
|
python
|
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
|
[
"def",
"compute_index",
"(",
"self",
",",
"axis",
",",
"data_object",
",",
"compute_diff",
"=",
"True",
")",
":",
"def",
"pandas_index_extraction",
"(",
"df",
",",
"axis",
")",
":",
"if",
"not",
"axis",
":",
"return",
"df",
".",
"index",
"else",
":",
"try",
":",
"return",
"df",
".",
"columns",
"except",
"AttributeError",
":",
"return",
"pandas",
".",
"Index",
"(",
"[",
"]",
")",
"index_obj",
"=",
"self",
".",
"index",
"if",
"not",
"axis",
"else",
"self",
".",
"columns",
"old_blocks",
"=",
"self",
".",
"data",
"if",
"compute_diff",
"else",
"None",
"new_indices",
"=",
"data_object",
".",
"get_indices",
"(",
"axis",
"=",
"axis",
",",
"index_func",
"=",
"lambda",
"df",
":",
"pandas_index_extraction",
"(",
"df",
",",
"axis",
")",
",",
"old_blocks",
"=",
"old_blocks",
",",
")",
"return",
"index_obj",
"[",
"new_indices",
"]",
"if",
"compute_diff",
"else",
"new_indices"
] |
Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
|
[
"Computes",
"the",
"index",
"after",
"a",
"number",
"of",
"rows",
"have",
"been",
"removed",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L64-L98
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._prepare_method
|
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
|
python
|
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
|
[
"def",
"_prepare_method",
"(",
"self",
",",
"pandas_func",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"def",
"helper",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"if",
"len",
"(",
"internal_indices",
")",
">",
"0",
":",
"return",
"pandas_func",
"(",
"df",
".",
"T",
",",
"internal_indices",
"=",
"internal_indices",
",",
"*",
"*",
"kwargs",
")",
"return",
"pandas_func",
"(",
"df",
".",
"T",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"def",
"helper",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"if",
"len",
"(",
"internal_indices",
")",
">",
"0",
":",
"return",
"pandas_func",
"(",
"df",
",",
"internal_indices",
"=",
"internal_indices",
",",
"*",
"*",
"kwargs",
")",
"return",
"pandas_func",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
"return",
"helper"
] |
Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
|
[
"Prepares",
"methods",
"given",
"various",
"metadata",
".",
"Args",
":",
"pandas_func",
":",
"The",
"function",
"to",
"prepare",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L141-L165
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.numeric_columns
|
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
|
python
|
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
|
[
"def",
"numeric_columns",
"(",
"self",
",",
"include_bool",
"=",
"True",
")",
":",
"columns",
"=",
"[",
"]",
"for",
"col",
",",
"dtype",
"in",
"zip",
"(",
"self",
".",
"columns",
",",
"self",
".",
"dtypes",
")",
":",
"if",
"is_numeric_dtype",
"(",
"dtype",
")",
"and",
"(",
"include_bool",
"or",
"(",
"not",
"include_bool",
"and",
"dtype",
"!=",
"np",
".",
"bool_",
")",
")",
":",
"columns",
".",
"append",
"(",
"col",
")",
"return",
"columns"
] |
Returns the numeric columns of the Manager.
Returns:
List of index names.
|
[
"Returns",
"the",
"numeric",
"columns",
"of",
"the",
"Manager",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L167-L179
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.numeric_function_clean_dataframe
|
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
|
python
|
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
|
[
"def",
"numeric_function_clean_dataframe",
"(",
"self",
",",
"axis",
")",
":",
"result",
"=",
"None",
"query_compiler",
"=",
"self",
"# If no numeric columns and over columns, then return empty Series",
"if",
"not",
"axis",
"and",
"len",
"(",
"self",
".",
"index",
")",
"==",
"0",
":",
"result",
"=",
"pandas",
".",
"Series",
"(",
"dtype",
"=",
"np",
".",
"int64",
")",
"nonnumeric",
"=",
"[",
"col",
"for",
"col",
",",
"dtype",
"in",
"zip",
"(",
"self",
".",
"columns",
",",
"self",
".",
"dtypes",
")",
"if",
"not",
"is_numeric_dtype",
"(",
"dtype",
")",
"]",
"if",
"len",
"(",
"nonnumeric",
")",
"==",
"len",
"(",
"self",
".",
"columns",
")",
":",
"# If over rows and no numeric columns, return this",
"if",
"axis",
":",
"result",
"=",
"pandas",
".",
"Series",
"(",
"[",
"np",
".",
"nan",
"for",
"_",
"in",
"self",
".",
"index",
"]",
")",
"else",
":",
"result",
"=",
"pandas",
".",
"Series",
"(",
"[",
"0",
"for",
"_",
"in",
"self",
".",
"index",
"]",
")",
"else",
":",
"query_compiler",
"=",
"self",
".",
"drop",
"(",
"columns",
"=",
"nonnumeric",
")",
"return",
"result",
",",
"query_compiler"
] |
Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
|
[
"Preprocesses",
"numeric",
"functions",
"to",
"clean",
"dataframe",
"and",
"pick",
"numeric",
"indices",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L181-L209
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._join_index_objects
|
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
|
python
|
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
|
[
"def",
"_join_index_objects",
"(",
"self",
",",
"axis",
",",
"other_index",
",",
"how",
",",
"sort",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"other_index",
",",
"list",
")",
":",
"joined_obj",
"=",
"self",
".",
"columns",
"if",
"not",
"axis",
"else",
"self",
".",
"index",
"# TODO: revisit for performance",
"for",
"obj",
"in",
"other_index",
":",
"joined_obj",
"=",
"joined_obj",
".",
"join",
"(",
"obj",
",",
"how",
"=",
"how",
")",
"return",
"joined_obj",
"if",
"not",
"axis",
":",
"return",
"self",
".",
"columns",
".",
"join",
"(",
"other_index",
",",
"how",
"=",
"how",
",",
"sort",
"=",
"sort",
")",
"else",
":",
"return",
"self",
".",
"index",
".",
"join",
"(",
"other_index",
",",
"how",
"=",
"how",
",",
"sort",
"=",
"sort",
")"
] |
Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
|
[
"Joins",
"a",
"pair",
"of",
"index",
"objects",
"(",
"columns",
"or",
"rows",
")",
"by",
"a",
"given",
"strategy",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L268-L289
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.join
|
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
|
python
|
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
|
[
"def",
"join",
"(",
"self",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"list",
")",
":",
"other",
"=",
"[",
"other",
"]",
"return",
"self",
".",
"_join_list_of_managers",
"(",
"other",
",",
"*",
"*",
"kwargs",
")"
] |
Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
|
[
"Joins",
"a",
"list",
"or",
"two",
"objects",
"together",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L291-L302
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.concat
|
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
|
python
|
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
|
[
"def",
"concat",
"(",
"self",
",",
"axis",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_append_list_of_managers",
"(",
"other",
",",
"axis",
",",
"*",
"*",
"kwargs",
")"
] |
Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
|
[
"Concatenates",
"two",
"objects",
"together",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L304-L314
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.copartition
|
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = other[i]._prepare_method(
compute_reindex(right_old_idxes[i])
)
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis, other[i].data, reindex_left, reindex_right
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
python
|
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = other[i]._prepare_method(
compute_reindex(right_old_idxes[i])
)
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis, other[i].data, reindex_left, reindex_right
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
[
"def",
"copartition",
"(",
"self",
",",
"axis",
",",
"other",
",",
"how_to_join",
",",
"sort",
",",
"force_repartition",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"type",
"(",
"self",
")",
")",
":",
"other",
"=",
"[",
"other",
"]",
"index_obj",
"=",
"(",
"[",
"o",
".",
"index",
"for",
"o",
"in",
"other",
"]",
"if",
"axis",
"==",
"0",
"else",
"[",
"o",
".",
"columns",
"for",
"o",
"in",
"other",
"]",
")",
"joined_index",
"=",
"self",
".",
"_join_index_objects",
"(",
"axis",
"^",
"1",
",",
"index_obj",
",",
"how_to_join",
",",
"sort",
"=",
"sort",
")",
"# We have to set these because otherwise when we perform the functions it may",
"# end up serializing this entire object.",
"left_old_idx",
"=",
"self",
".",
"index",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"columns",
"right_old_idxes",
"=",
"index_obj",
"# Start with this and we'll repartition the first time, and then not again.",
"reindexed_self",
"=",
"self",
".",
"data",
"reindexed_other_list",
"=",
"[",
"]",
"def",
"compute_reindex",
"(",
"old_idx",
")",
":",
"\"\"\"Create a function based on the old index and axis.\n\n Args:\n old_idx: The old index/columns\n\n Returns:\n A function that will be run in each partition.\n \"\"\"",
"def",
"reindex_partition",
"(",
"df",
")",
":",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"old_idx",
"new_df",
"=",
"df",
".",
"reindex",
"(",
"index",
"=",
"joined_index",
")",
"new_df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"new_df",
".",
"index",
")",
")",
"else",
":",
"df",
".",
"columns",
"=",
"old_idx",
"new_df",
"=",
"df",
".",
"reindex",
"(",
"columns",
"=",
"joined_index",
")",
"new_df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"new_df",
".",
"columns",
")",
")",
"return",
"new_df",
"return",
"reindex_partition",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"other",
")",
")",
":",
"# If the indices are equal we can skip partitioning so long as we are not",
"# forced to repartition. See note above about `force_repartition`.",
"if",
"i",
"!=",
"0",
"or",
"(",
"left_old_idx",
".",
"equals",
"(",
"joined_index",
")",
"and",
"not",
"force_repartition",
")",
":",
"reindex_left",
"=",
"None",
"else",
":",
"reindex_left",
"=",
"self",
".",
"_prepare_method",
"(",
"compute_reindex",
"(",
"left_old_idx",
")",
")",
"if",
"right_old_idxes",
"[",
"i",
"]",
".",
"equals",
"(",
"joined_index",
")",
"and",
"not",
"force_repartition",
":",
"reindex_right",
"=",
"None",
"else",
":",
"reindex_right",
"=",
"other",
"[",
"i",
"]",
".",
"_prepare_method",
"(",
"compute_reindex",
"(",
"right_old_idxes",
"[",
"i",
"]",
")",
")",
"reindexed_self",
",",
"reindexed_other",
"=",
"reindexed_self",
".",
"copartition_datasets",
"(",
"axis",
",",
"other",
"[",
"i",
"]",
".",
"data",
",",
"reindex_left",
",",
"reindex_right",
")",
"reindexed_other_list",
".",
"append",
"(",
"reindexed_other",
")",
"return",
"reindexed_self",
",",
"reindexed_other_list",
",",
"joined_index"
] |
Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
|
[
"Copartition",
"two",
"QueryCompiler",
"objects",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L380-L456
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.to_pandas
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
python
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
[
"def",
"to_pandas",
"(",
"self",
")",
":",
"df",
"=",
"self",
".",
"data",
".",
"to_pandas",
"(",
"is_transposed",
"=",
"self",
".",
"_is_transposed",
")",
"if",
"df",
".",
"empty",
":",
"if",
"len",
"(",
"self",
".",
"columns",
")",
"!=",
"0",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"self",
".",
"columns",
")",
".",
"astype",
"(",
"self",
".",
"dtypes",
")",
"else",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"self",
".",
"columns",
",",
"index",
"=",
"self",
".",
"index",
")",
"else",
":",
"ErrorMessage",
".",
"catch_bugs_and_request_email",
"(",
"len",
"(",
"df",
".",
"index",
")",
"!=",
"len",
"(",
"self",
".",
"index",
")",
"or",
"len",
"(",
"df",
".",
"columns",
")",
"!=",
"len",
"(",
"self",
".",
"columns",
")",
")",
"df",
".",
"index",
"=",
"self",
".",
"index",
"df",
".",
"columns",
"=",
"self",
".",
"columns",
"return",
"df"
] |
Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
|
[
"Converts",
"Modin",
"DataFrame",
"to",
"Pandas",
"DataFrame",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L468-L486
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.from_pandas
|
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns DataManager containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
|
python
|
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns DataManager containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
|
[
"def",
"from_pandas",
"(",
"cls",
",",
"df",
",",
"block_partitions_cls",
")",
":",
"new_index",
"=",
"df",
".",
"index",
"new_columns",
"=",
"df",
".",
"columns",
"new_dtypes",
"=",
"df",
".",
"dtypes",
"new_data",
"=",
"block_partitions_cls",
".",
"from_pandas",
"(",
"df",
")",
"return",
"cls",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
",",
"dtypes",
"=",
"new_dtypes",
")"
] |
Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns DataManager containing data from the Pandas DataFrame.
|
[
"Improve",
"simple",
"Pandas",
"DataFrame",
"to",
"an",
"advanced",
"and",
"superior",
"Modin",
"DataFrame",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L489-L504
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._inter_manager_operations
|
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
|
python
|
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
|
[
"def",
"_inter_manager_operations",
"(",
"self",
",",
"other",
",",
"how_to_join",
",",
"func",
")",
":",
"reindexed_self",
",",
"reindexed_other_list",
",",
"joined_index",
"=",
"self",
".",
"copartition",
"(",
"0",
",",
"other",
",",
"how_to_join",
",",
"False",
")",
"# unwrap list returned by `copartition`.",
"reindexed_other",
"=",
"reindexed_other_list",
"[",
"0",
"]",
"new_columns",
"=",
"self",
".",
"_join_index_objects",
"(",
"0",
",",
"other",
".",
"columns",
",",
"how_to_join",
",",
"sort",
"=",
"False",
")",
"# THere is an interesting serialization anomaly that happens if we do",
"# not use the columns in `inter_data_op_builder` from here (e.g. if we",
"# pass them in). Passing them in can cause problems, so we will just",
"# use them from here.",
"self_cols",
"=",
"self",
".",
"columns",
"other_cols",
"=",
"other",
".",
"columns",
"def",
"inter_data_op_builder",
"(",
"left",
",",
"right",
",",
"func",
")",
":",
"left",
".",
"columns",
"=",
"self_cols",
"right",
".",
"columns",
"=",
"other_cols",
"# We reset here to make sure that the internal indexes match. We aligned",
"# them in the previous step, so this step is to prevent mismatches.",
"left",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"left",
".",
"index",
")",
")",
"right",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"right",
".",
"index",
")",
")",
"result",
"=",
"func",
"(",
"left",
",",
"right",
")",
"result",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"result",
".",
"columns",
")",
")",
"return",
"result",
"new_data",
"=",
"reindexed_self",
".",
"inter_data_operation",
"(",
"1",
",",
"lambda",
"l",
",",
"r",
":",
"inter_data_op_builder",
"(",
"l",
",",
"r",
",",
"func",
")",
",",
"reindexed_other",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"joined_index",
",",
"new_columns",
")"
] |
Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
|
[
"Inter",
"-",
"data",
"operations",
"(",
"e",
".",
"g",
".",
"add",
"sub",
")",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L513-L552
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._inter_df_op_handler
|
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
|
python
|
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
|
[
"def",
"_inter_df_op_handler",
"(",
"self",
",",
"func",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"axis",
"=",
"pandas",
".",
"DataFrame",
"(",
")",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"is",
"not",
"None",
"else",
"0",
"if",
"isinstance",
"(",
"other",
",",
"type",
"(",
"self",
")",
")",
":",
"return",
"self",
".",
"_inter_manager_operations",
"(",
"other",
",",
"\"outer\"",
",",
"lambda",
"x",
",",
"y",
":",
"func",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
")",
"else",
":",
"return",
"self",
".",
"_scalar_operations",
"(",
"axis",
",",
"other",
",",
"lambda",
"df",
":",
"func",
"(",
"df",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
|
[
"Helper",
"method",
"for",
"inter",
"-",
"manager",
"and",
"scalar",
"operations",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L554-L573
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.binary_op
|
def binary_op(self, op, other, **kwargs):
"""Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
"""
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
|
python
|
def binary_op(self, op, other, **kwargs):
"""Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
"""
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
|
[
"def",
"binary_op",
"(",
"self",
",",
"op",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"getattr",
"(",
"pandas",
".",
"DataFrame",
",",
"op",
")",
"return",
"self",
".",
"_inter_df_op_handler",
"(",
"func",
",",
"other",
",",
"*",
"*",
"kwargs",
")"
] |
Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
|
[
"Perform",
"an",
"operation",
"between",
"two",
"objects",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L575-L608
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.update
|
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
|
python
|
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
|
[
"def",
"update",
"(",
"self",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"other",
",",
"type",
"(",
"self",
")",
")",
",",
"\"Must have the same DataManager subclass to perform this operation\"",
"def",
"update_builder",
"(",
"df",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"# This is because of a requirement in Arrow",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"update",
"(",
"other",
",",
"*",
"*",
"kwargs",
")",
"return",
"df",
"return",
"self",
".",
"_inter_df_op_handler",
"(",
"update_builder",
",",
"other",
",",
"*",
"*",
"kwargs",
")"
] |
Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
|
[
"Uses",
"other",
"manager",
"to",
"update",
"corresponding",
"values",
"in",
"this",
"manager",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L620-L639
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.where
|
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
|
python
|
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
|
[
"def",
"where",
"(",
"self",
",",
"cond",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"cond",
",",
"type",
"(",
"self",
")",
")",
",",
"\"Must have the same DataManager subclass to perform this operation\"",
"if",
"isinstance",
"(",
"other",
",",
"type",
"(",
"self",
")",
")",
":",
"# Note: Currently we are doing this with two maps across the entire",
"# data. This can be done with a single map, but it will take a",
"# modification in the `BlockPartition` class.",
"# If this were in one pass it would be ~2x faster.",
"# TODO (devin-petersohn) rewrite this to take one pass.",
"def",
"where_builder_first_pass",
"(",
"cond",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cond",
".",
"where",
"(",
"cond",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
"def",
"where_builder_second_pass",
"(",
"df",
",",
"new_other",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"df",
".",
"where",
"(",
"new_other",
".",
"eq",
"(",
"True",
")",
",",
"new_other",
",",
"*",
"*",
"kwargs",
")",
"first_pass",
"=",
"cond",
".",
"_inter_manager_operations",
"(",
"other",
",",
"\"left\"",
",",
"where_builder_first_pass",
")",
"final_pass",
"=",
"self",
".",
"_inter_manager_operations",
"(",
"first_pass",
",",
"\"left\"",
",",
"where_builder_second_pass",
")",
"return",
"self",
".",
"__constructor__",
"(",
"final_pass",
".",
"data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")",
"else",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"# Rather than serializing and passing in the index/columns, we will",
"# just change this index to match the internal index.",
"if",
"isinstance",
"(",
"other",
",",
"pandas",
".",
"Series",
")",
":",
"other",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"other",
".",
"index",
")",
")",
"def",
"where_builder_series",
"(",
"df",
",",
"cond",
")",
":",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"index",
")",
")",
"cond",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"cond",
".",
"index",
")",
")",
"else",
":",
"df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"cond",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"cond",
".",
"columns",
")",
")",
"return",
"df",
".",
"where",
"(",
"cond",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
"reindexed_self",
",",
"reindexed_cond",
",",
"a",
"=",
"self",
".",
"copartition",
"(",
"axis",
",",
"cond",
",",
"\"left\"",
",",
"False",
")",
"# Unwrap from list given by `copartition`",
"reindexed_cond",
"=",
"reindexed_cond",
"[",
"0",
"]",
"new_data",
"=",
"reindexed_self",
".",
"inter_data_operation",
"(",
"axis",
",",
"lambda",
"l",
",",
"r",
":",
"where_builder_series",
"(",
"l",
",",
"r",
")",
",",
"reindexed_cond",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")"
] |
Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
|
[
"Gets",
"values",
"from",
"this",
"manager",
"where",
"cond",
"is",
"true",
"else",
"from",
"other",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L641-L697
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._scalar_operations
|
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(self._prepare_method(func))
|
python
|
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(self._prepare_method(func))
|
[
"def",
"_scalar_operations",
"(",
"self",
",",
"axis",
",",
"scalar",
",",
"func",
")",
":",
"if",
"isinstance",
"(",
"scalar",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
",",
"pandas",
".",
"Series",
")",
")",
":",
"new_index",
"=",
"self",
".",
"index",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"columns",
"def",
"list_like_op",
"(",
"df",
")",
":",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"new_index",
"else",
":",
"df",
".",
"columns",
"=",
"new_index",
"return",
"func",
"(",
"df",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"self",
".",
"_prepare_method",
"(",
"list_like_op",
")",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")",
"else",
":",
"return",
"self",
".",
"_map_partitions",
"(",
"self",
".",
"_prepare_method",
"(",
"func",
")",
")"
] |
Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
|
[
"Handler",
"for",
"mapping",
"scalar",
"operations",
"across",
"a",
"Manager",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L702-L728
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.reindex
|
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
|
python
|
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
|
[
"def",
"reindex",
"(",
"self",
",",
"axis",
",",
"labels",
",",
"*",
"*",
"kwargs",
")",
":",
"# To reindex, we need a function that will be shipped to each of the",
"# partitions.",
"def",
"reindex_builer",
"(",
"df",
",",
"axis",
",",
"old_labels",
",",
"new_labels",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axis",
":",
"while",
"len",
"(",
"df",
".",
"columns",
")",
"<",
"len",
"(",
"old_labels",
")",
":",
"df",
"[",
"len",
"(",
"df",
".",
"columns",
")",
"]",
"=",
"np",
".",
"nan",
"df",
".",
"columns",
"=",
"old_labels",
"new_df",
"=",
"df",
".",
"reindex",
"(",
"columns",
"=",
"new_labels",
",",
"*",
"*",
"kwargs",
")",
"# reset the internal columns back to a RangeIndex",
"new_df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"new_df",
".",
"columns",
")",
")",
"return",
"new_df",
"else",
":",
"while",
"len",
"(",
"df",
".",
"index",
")",
"<",
"len",
"(",
"old_labels",
")",
":",
"df",
".",
"loc",
"[",
"len",
"(",
"df",
".",
"index",
")",
"]",
"=",
"np",
".",
"nan",
"df",
".",
"index",
"=",
"old_labels",
"new_df",
"=",
"df",
".",
"reindex",
"(",
"index",
"=",
"new_labels",
",",
"*",
"*",
"kwargs",
")",
"# reset the internal index back to a RangeIndex",
"new_df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"return",
"new_df",
"old_labels",
"=",
"self",
".",
"columns",
"if",
"axis",
"else",
"self",
".",
"index",
"new_index",
"=",
"self",
".",
"index",
"if",
"axis",
"else",
"labels",
"new_columns",
"=",
"labels",
"if",
"axis",
"else",
"self",
".",
"columns",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"lambda",
"df",
":",
"reindex_builer",
"(",
"df",
",",
"axis",
",",
"old_labels",
",",
"labels",
",",
"*",
"*",
"kwargs",
")",
")",
"# The reindex can just be mapped over the axis we are modifying. This",
"# is for simplicity in implementation. We specify num_splits here",
"# because if we are repartitioning we should (in the future).",
"# Additionally this operation is often followed by an operation that",
"# assumes identical partitioning. Internally, we *may* change the",
"# partitioning during a map across a full axis.",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
")"
] |
Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
|
[
"Fits",
"a",
"new",
"index",
"for",
"this",
"Manger",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L733-L777
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.reset_index
|
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
|
python
|
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
|
[
"def",
"reset_index",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"drop",
"=",
"kwargs",
".",
"get",
"(",
"\"drop\"",
",",
"False",
")",
"new_index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"self",
".",
"index",
")",
")",
"if",
"not",
"drop",
":",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"pandas",
".",
"MultiIndex",
")",
":",
"# TODO (devin-petersohn) ensure partitioning is properly aligned",
"new_column_names",
"=",
"pandas",
".",
"Index",
"(",
"self",
".",
"index",
".",
"names",
")",
"new_columns",
"=",
"new_column_names",
".",
"append",
"(",
"self",
".",
"columns",
")",
"index_data",
"=",
"pandas",
".",
"DataFrame",
"(",
"list",
"(",
"zip",
"(",
"*",
"self",
".",
"index",
")",
")",
")",
".",
"T",
"result",
"=",
"self",
".",
"data",
".",
"from_pandas",
"(",
"index_data",
")",
".",
"concat",
"(",
"1",
",",
"self",
".",
"data",
")",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"new_index",
",",
"new_columns",
")",
"else",
":",
"new_column_name",
"=",
"(",
"self",
".",
"index",
".",
"name",
"if",
"self",
".",
"index",
".",
"name",
"is",
"not",
"None",
"else",
"\"index\"",
"if",
"\"index\"",
"not",
"in",
"self",
".",
"columns",
"else",
"\"level_0\"",
")",
"new_columns",
"=",
"self",
".",
"columns",
".",
"insert",
"(",
"0",
",",
"new_column_name",
")",
"result",
"=",
"self",
".",
"insert",
"(",
"0",
",",
"new_column_name",
",",
"self",
".",
"index",
")",
"return",
"self",
".",
"__constructor__",
"(",
"result",
".",
"data",
",",
"new_index",
",",
"new_columns",
")",
"else",
":",
"# The copies here are to ensure that we do not give references to",
"# this object for the purposes of updates.",
"return",
"self",
".",
"__constructor__",
"(",
"self",
".",
"data",
".",
"copy",
"(",
")",
",",
"new_index",
",",
"self",
".",
"columns",
".",
"copy",
"(",
")",
",",
"self",
".",
"_dtype_cache",
")"
] |
Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
|
[
"Removes",
"all",
"levels",
"from",
"index",
"and",
"sets",
"a",
"default",
"level_0",
"index",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L779-L811
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.transpose
|
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(new_data, self.columns, self.index)
# It is possible that this is already transposed
new_manager._is_transposed = self._is_transposed ^ 1
return new_manager
|
python
|
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(new_data, self.columns, self.index)
# It is possible that this is already transposed
new_manager._is_transposed = self._is_transposed ^ 1
return new_manager
|
[
"def",
"transpose",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"new_data",
"=",
"self",
".",
"data",
".",
"transpose",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Switch the index and columns and transpose the",
"new_manager",
"=",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"columns",
",",
"self",
".",
"index",
")",
"# It is possible that this is already transposed",
"new_manager",
".",
"_is_transposed",
"=",
"self",
".",
"_is_transposed",
"^",
"1",
"return",
"new_manager"
] |
Transposes this DataManager.
Returns:
Transposed new DataManager.
|
[
"Transposes",
"this",
"DataManager",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L828-L839
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._full_reduce
|
def _full_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
"""
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
)
|
python
|
def _full_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
"""
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
)
|
[
"def",
"_full_reduce",
"(",
"self",
",",
"axis",
",",
"map_func",
",",
"reduce_func",
"=",
"None",
")",
":",
"if",
"reduce_func",
"is",
"None",
":",
"reduce_func",
"=",
"map_func",
"mapped_parts",
"=",
"self",
".",
"data",
".",
"map_across_blocks",
"(",
"map_func",
")",
"full_frame",
"=",
"mapped_parts",
".",
"map_across_full_axis",
"(",
"axis",
",",
"reduce_func",
")",
"if",
"axis",
"==",
"0",
":",
"columns",
"=",
"self",
".",
"columns",
"return",
"self",
".",
"__constructor__",
"(",
"full_frame",
",",
"index",
"=",
"[",
"\"__reduced__\"",
"]",
",",
"columns",
"=",
"columns",
")",
"else",
":",
"index",
"=",
"self",
".",
"index",
"return",
"self",
".",
"__constructor__",
"(",
"full_frame",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"[",
"\"__reduced__\"",
"]",
")"
] |
Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
|
[
"Apply",
"function",
"that",
"will",
"reduce",
"the",
"data",
"to",
"a",
"Pandas",
"Series",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L847-L874
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.count
|
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
|
python
|
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
|
[
"def",
"count",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"count",
"(",
"*",
"*",
"kwargs",
")",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"map_func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"pandas",
".",
"DataFrame",
".",
"count",
",",
"*",
"*",
"kwargs",
")",
"reduce_func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"pandas",
".",
"DataFrame",
".",
"sum",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_reduce",
"(",
"axis",
",",
"map_func",
",",
"reduce_func",
")"
] |
Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
|
[
"Counts",
"the",
"number",
"of",
"non",
"-",
"NaN",
"objects",
"for",
"each",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L893-L906
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.mean
|
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
A new QueryCompiler object containing the mean from each numerical column or
row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().mean(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
if sums._is_transposed and counts._is_transposed:
sums = sums.transpose()
counts = counts.transpose()
result = sums.binary_op("truediv", counts, axis=axis)
return result.transpose() if axis == 0 else result
|
python
|
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
A new QueryCompiler object containing the mean from each numerical column or
row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().mean(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
if sums._is_transposed and counts._is_transposed:
sums = sums.transpose()
counts = counts.transpose()
result = sums.binary_op("truediv", counts, axis=axis)
return result.transpose() if axis == 0 else result
|
[
"def",
"mean",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"mean",
"(",
"*",
"*",
"kwargs",
")",
"# Pandas default is 0 (though not mentioned in docs)",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"sums",
"=",
"self",
".",
"sum",
"(",
"*",
"*",
"kwargs",
")",
"counts",
"=",
"self",
".",
"count",
"(",
"axis",
"=",
"axis",
",",
"numeric_only",
"=",
"kwargs",
".",
"get",
"(",
"\"numeric_only\"",
",",
"None",
")",
")",
"if",
"sums",
".",
"_is_transposed",
"and",
"counts",
".",
"_is_transposed",
":",
"sums",
"=",
"sums",
".",
"transpose",
"(",
")",
"counts",
"=",
"counts",
".",
"transpose",
"(",
")",
"result",
"=",
"sums",
".",
"binary_op",
"(",
"\"truediv\"",
",",
"counts",
",",
"axis",
"=",
"axis",
")",
"return",
"result",
".",
"transpose",
"(",
")",
"if",
"axis",
"==",
"0",
"else",
"result"
] |
Returns the mean for each numerical column or row.
Return:
A new QueryCompiler object containing the mean from each numerical column or
row.
|
[
"Returns",
"the",
"mean",
"for",
"each",
"numerical",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L920-L938
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.min
|
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
A new QueryCompiler object with the minimum value from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().min(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
|
python
|
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
A new QueryCompiler object with the minimum value from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().min(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
|
[
"def",
"min",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"min",
"(",
"*",
"*",
"kwargs",
")",
"mapreduce_func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"pandas",
".",
"DataFrame",
".",
"min",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_reduce",
"(",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
",",
"mapreduce_func",
")"
] |
Returns the minimum from each column or row.
Return:
A new QueryCompiler object with the minimum value from each column or row.
|
[
"Returns",
"the",
"minimum",
"from",
"each",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L940-L950
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._process_sum_prod
|
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object.
"""
axis = kwargs.get("axis", 0)
min_count = kwargs.get("min_count", 0)
def sum_prod_builder(df, **kwargs):
return func(df, **kwargs)
if min_count <= 1:
return self._full_reduce(axis, sum_prod_builder)
else:
return self._full_axis_reduce(axis, sum_prod_builder)
|
python
|
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object.
"""
axis = kwargs.get("axis", 0)
min_count = kwargs.get("min_count", 0)
def sum_prod_builder(df, **kwargs):
return func(df, **kwargs)
if min_count <= 1:
return self._full_reduce(axis, sum_prod_builder)
else:
return self._full_axis_reduce(axis, sum_prod_builder)
|
[
"def",
"_process_sum_prod",
"(",
"self",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"min_count",
"=",
"kwargs",
".",
"get",
"(",
"\"min_count\"",
",",
"0",
")",
"def",
"sum_prod_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"func",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
"if",
"min_count",
"<=",
"1",
":",
"return",
"self",
".",
"_full_reduce",
"(",
"axis",
",",
"sum_prod_builder",
")",
"else",
":",
"return",
"self",
".",
"_full_axis_reduce",
"(",
"axis",
",",
"sum_prod_builder",
")"
] |
Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object.
|
[
"Calculates",
"the",
"sum",
"or",
"product",
"of",
"the",
"DataFrame",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L952-L970
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.prod
|
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
A new QueryCompiler object with the product of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().prod(**kwargs)
return self._process_sum_prod(
self._build_mapreduce_func(pandas.DataFrame.prod, **kwargs), **kwargs
)
|
python
|
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
A new QueryCompiler object with the product of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().prod(**kwargs)
return self._process_sum_prod(
self._build_mapreduce_func(pandas.DataFrame.prod, **kwargs), **kwargs
)
|
[
"def",
"prod",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"prod",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_process_sum_prod",
"(",
"self",
".",
"_build_mapreduce_func",
"(",
"pandas",
".",
"DataFrame",
".",
"prod",
",",
"*",
"*",
"kwargs",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Returns the product of each numerical column or row.
Return:
A new QueryCompiler object with the product of each numerical column or row.
|
[
"Returns",
"the",
"product",
"of",
"each",
"numerical",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L972-L983
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._process_all_any
|
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func)
|
python
|
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func)
|
[
"def",
"_process_all_any",
"(",
"self",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"axis",
"=",
"0",
"if",
"axis",
"is",
"None",
"else",
"axis",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"axis",
"builder_func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"func",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_reduce",
"(",
"axis",
",",
"builder_func",
")"
] |
Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
|
[
"Calculates",
"if",
"any",
"or",
"all",
"the",
"values",
"are",
"true",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L998-L1008
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.all
|
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().all(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
|
python
|
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().all(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
|
[
"def",
"all",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"# Pandas ignores on axis=1",
"kwargs",
"[",
"\"bool_only\"",
"]",
"=",
"False",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"all",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_process_all_any",
"(",
"lambda",
"df",
",",
"*",
"*",
"kwargs",
":",
"df",
".",
"all",
"(",
"*",
"*",
"kwargs",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Returns whether all the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
|
[
"Returns",
"whether",
"all",
"the",
"elements",
"are",
"true",
"potentially",
"over",
"an",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1010-L1021
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.astype
|
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
|
python
|
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
|
[
"def",
"astype",
"(",
"self",
",",
"col_dtypes",
",",
"*",
"*",
"kwargs",
")",
":",
"# Group indices to update by dtype for less map operations",
"dtype_indices",
"=",
"{",
"}",
"columns",
"=",
"col_dtypes",
".",
"keys",
"(",
")",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"columns",
".",
"get_indexer_for",
"(",
"columns",
")",
")",
"# Create Series for the updated dtypes",
"new_dtypes",
"=",
"self",
".",
"dtypes",
".",
"copy",
"(",
")",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"columns",
")",
":",
"dtype",
"=",
"col_dtypes",
"[",
"column",
"]",
"if",
"(",
"not",
"isinstance",
"(",
"dtype",
",",
"type",
"(",
"self",
".",
"dtypes",
"[",
"column",
"]",
")",
")",
"or",
"dtype",
"!=",
"self",
".",
"dtypes",
"[",
"column",
"]",
")",
":",
"# Only add dtype only if different",
"if",
"dtype",
"in",
"dtype_indices",
".",
"keys",
"(",
")",
":",
"dtype_indices",
"[",
"dtype",
"]",
".",
"append",
"(",
"numeric_indices",
"[",
"i",
"]",
")",
"else",
":",
"dtype_indices",
"[",
"dtype",
"]",
"=",
"[",
"numeric_indices",
"[",
"i",
"]",
"]",
"# Update the new dtype series to the proper pandas dtype",
"try",
":",
"new_dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"except",
"TypeError",
":",
"new_dtype",
"=",
"dtype",
"if",
"dtype",
"!=",
"np",
".",
"int32",
"and",
"new_dtype",
"==",
"np",
".",
"int32",
":",
"new_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"int64\"",
")",
"elif",
"dtype",
"!=",
"np",
".",
"float32",
"and",
"new_dtype",
"==",
"np",
".",
"float32",
":",
"new_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"float64\"",
")",
"new_dtypes",
"[",
"column",
"]",
"=",
"new_dtype",
"# Update partitions for each dtype that is updated",
"new_data",
"=",
"self",
".",
"data",
"for",
"dtype",
"in",
"dtype_indices",
".",
"keys",
"(",
")",
":",
"def",
"astype",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"block_dtypes",
"=",
"{",
"}",
"for",
"ind",
"in",
"internal_indices",
":",
"block_dtypes",
"[",
"df",
".",
"columns",
"[",
"ind",
"]",
"]",
"=",
"dtype",
"return",
"df",
".",
"astype",
"(",
"block_dtypes",
")",
"new_data",
"=",
"new_data",
".",
"apply_func_to_select_indices",
"(",
"0",
",",
"astype",
",",
"dtype_indices",
"[",
"dtype",
"]",
",",
"keep_remaining",
"=",
"True",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
",",
"new_dtypes",
")"
] |
Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
|
[
"Converts",
"columns",
"dtypes",
"to",
"given",
"dtypes",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1090-L1140
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._full_axis_reduce
|
def _full_axis_reduce(self, axis, func, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
result = self.data.map_across_full_axis(axis, func)
if axis == 0:
columns = alternate_index if alternate_index is not None else self.columns
return self.__constructor__(result, index=["__reduced__"], columns=columns)
else:
index = alternate_index if alternate_index is not None else self.index
return self.__constructor__(result, index=index, columns=["__reduced__"])
|
python
|
def _full_axis_reduce(self, axis, func, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
result = self.data.map_across_full_axis(axis, func)
if axis == 0:
columns = alternate_index if alternate_index is not None else self.columns
return self.__constructor__(result, index=["__reduced__"], columns=columns)
else:
index = alternate_index if alternate_index is not None else self.index
return self.__constructor__(result, index=index, columns=["__reduced__"])
|
[
"def",
"_full_axis_reduce",
"(",
"self",
",",
"axis",
",",
"func",
",",
"alternate_index",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"data",
".",
"map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"if",
"axis",
"==",
"0",
":",
"columns",
"=",
"alternate_index",
"if",
"alternate_index",
"is",
"not",
"None",
"else",
"self",
".",
"columns",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"index",
"=",
"[",
"\"__reduced__\"",
"]",
",",
"columns",
"=",
"columns",
")",
"else",
":",
"index",
"=",
"alternate_index",
"if",
"alternate_index",
"is",
"not",
"None",
"else",
"self",
".",
"index",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"[",
"\"__reduced__\"",
"]",
")"
] |
Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
|
[
"Applies",
"map",
"that",
"reduce",
"Manager",
"to",
"series",
"but",
"require",
"knowledge",
"of",
"full",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1148-L1166
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.first_valid_index
|
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result]
|
python
|
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result]
|
[
"def",
"first_valid_index",
"(",
"self",
")",
":",
"# It may be possible to incrementally check each partition, but this",
"# computation is fairly cheap.",
"def",
"first_valid_index_builder",
"(",
"df",
")",
":",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"index",
")",
")",
"return",
"df",
".",
"apply",
"(",
"lambda",
"df",
":",
"df",
".",
"first_valid_index",
"(",
")",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"first_valid_index_builder",
")",
"# We get the minimum from each column, then take the min of that to get",
"# first_valid_index. The `to_pandas()` here is just for a single value and",
"# `squeeze` will convert it to a scalar.",
"first_result",
"=",
"self",
".",
"_full_axis_reduce",
"(",
"0",
",",
"func",
")",
".",
"min",
"(",
"axis",
"=",
"1",
")",
".",
"to_pandas",
"(",
")",
".",
"squeeze",
"(",
")",
"return",
"self",
".",
"index",
"[",
"first_result",
"]"
] |
Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
|
[
"Returns",
"index",
"of",
"first",
"non",
"-",
"NaN",
"/",
"NULL",
"value",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1168-L1185
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.idxmax
|
def idxmax(self, **kwargs):
"""Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmax(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmax_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmax(**kwargs)
func = self._build_mapreduce_func(idxmax_builder, **kwargs)
return self._full_axis_reduce(axis, func)
|
python
|
def idxmax(self, **kwargs):
"""Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmax(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmax_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmax(**kwargs)
func = self._build_mapreduce_func(idxmax_builder, **kwargs)
return self._full_axis_reduce(axis, func)
|
[
"def",
"idxmax",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"idxmax",
"(",
"*",
"*",
"kwargs",
")",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"index",
"=",
"self",
".",
"index",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"columns",
"def",
"idxmax_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"index",
"else",
":",
"df",
".",
"columns",
"=",
"index",
"return",
"df",
".",
"idxmax",
"(",
"*",
"*",
"kwargs",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"idxmax_builder",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_axis_reduce",
"(",
"axis",
",",
"func",
")"
] |
Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis.
|
[
"Returns",
"the",
"first",
"occurrence",
"of",
"the",
"maximum",
"over",
"requested",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1187-L1208
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.idxmin
|
def idxmin(self, **kwargs):
"""Returns the first occurrence of the minimum over requested axis.
Returns:
A new QueryCompiler object containing the minimum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmin(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmin_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmin(**kwargs)
func = self._build_mapreduce_func(idxmin_builder, **kwargs)
return self._full_axis_reduce(axis, func)
|
python
|
def idxmin(self, **kwargs):
"""Returns the first occurrence of the minimum over requested axis.
Returns:
A new QueryCompiler object containing the minimum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmin(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmin_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmin(**kwargs)
func = self._build_mapreduce_func(idxmin_builder, **kwargs)
return self._full_axis_reduce(axis, func)
|
[
"def",
"idxmin",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"idxmin",
"(",
"*",
"*",
"kwargs",
")",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"index",
"=",
"self",
".",
"index",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"columns",
"def",
"idxmin_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"index",
"else",
":",
"df",
".",
"columns",
"=",
"index",
"return",
"df",
".",
"idxmin",
"(",
"*",
"*",
"kwargs",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"idxmin_builder",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_axis_reduce",
"(",
"axis",
",",
"func",
")"
] |
Returns the first occurrence of the minimum over requested axis.
Returns:
A new QueryCompiler object containing the minimum of each column or axis.
|
[
"Returns",
"the",
"first",
"occurrence",
"of",
"the",
"minimum",
"over",
"requested",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1210-L1231
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.last_valid_index
|
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result]
|
python
|
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result]
|
[
"def",
"last_valid_index",
"(",
"self",
")",
":",
"def",
"last_valid_index_builder",
"(",
"df",
")",
":",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"index",
")",
")",
"return",
"df",
".",
"apply",
"(",
"lambda",
"df",
":",
"df",
".",
"last_valid_index",
"(",
")",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"last_valid_index_builder",
")",
"# We get the maximum from each column, then take the max of that to get",
"# last_valid_index. The `to_pandas()` here is just for a single value and",
"# `squeeze` will convert it to a scalar.",
"first_result",
"=",
"self",
".",
"_full_axis_reduce",
"(",
"0",
",",
"func",
")",
".",
"max",
"(",
"axis",
"=",
"1",
")",
".",
"to_pandas",
"(",
")",
".",
"squeeze",
"(",
")",
"return",
"self",
".",
"index",
"[",
"first_result",
"]"
] |
Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
|
[
"Returns",
"index",
"of",
"last",
"non",
"-",
"NaN",
"/",
"NULL",
"value",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1233-L1249
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.median
|
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func)
|
python
|
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func)
|
[
"def",
"median",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"median",
"(",
"*",
"*",
"kwargs",
")",
"# Pandas default is 0 (though not mentioned in docs)",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"pandas",
".",
"DataFrame",
".",
"median",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_axis_reduce",
"(",
"axis",
",",
"func",
")"
] |
Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
|
[
"Returns",
"median",
"of",
"each",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1251-L1263
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.memory_usage
|
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(**kwargs)
func = self._build_mapreduce_func(memory_usage_builder, **kwargs)
return self._full_axis_reduce(0, func)
|
python
|
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(**kwargs)
func = self._build_mapreduce_func(memory_usage_builder, **kwargs)
return self._full_axis_reduce(0, func)
|
[
"def",
"memory_usage",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"memory_usage_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"df",
".",
"memory_usage",
"(",
"*",
"*",
"kwargs",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"memory_usage_builder",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_full_axis_reduce",
"(",
"0",
",",
"func",
")"
] |
Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
|
[
"Returns",
"the",
"memory",
"usage",
"of",
"each",
"column",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1265-L1276
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.quantile_for_single_value
|
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result
|
python
|
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result
|
[
"def",
"quantile_for_single_value",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"quantile_for_single_value",
"(",
"*",
"*",
"kwargs",
")",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"q",
"=",
"kwargs",
".",
"get",
"(",
"\"q\"",
",",
"0.5",
")",
"assert",
"type",
"(",
"q",
")",
"is",
"float",
"def",
"quantile_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"pandas",
".",
"DataFrame",
".",
"quantile",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
"except",
"ValueError",
":",
"return",
"pandas",
".",
"Series",
"(",
")",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"quantile_builder",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"self",
".",
"_full_axis_reduce",
"(",
"axis",
",",
"func",
")",
"if",
"axis",
"==",
"0",
":",
"result",
".",
"index",
"=",
"[",
"q",
"]",
"else",
":",
"result",
".",
"columns",
"=",
"[",
"q",
"]",
"return",
"result"
] |
Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
|
[
"Returns",
"quantile",
"of",
"each",
"column",
"or",
"row",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1291-L1316
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._full_axis_reduce_along_select_indices
|
def _full_axis_reduce_along_select_indices(self, func, axis, index):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
return result
|
python
|
def _full_axis_reduce_along_select_indices(self, func, axis, index):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
return result
|
[
"def",
"_full_axis_reduce_along_select_indices",
"(",
"self",
",",
"func",
",",
"axis",
",",
"index",
")",
":",
"# Convert indices to numeric indices",
"old_index",
"=",
"self",
".",
"index",
"if",
"axis",
"else",
"self",
".",
"columns",
"numeric_indices",
"=",
"[",
"i",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"old_index",
")",
"if",
"name",
"in",
"index",
"]",
"result",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"axis",
",",
"func",
",",
"numeric_indices",
")",
"return",
"result"
] |
Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
|
[
"Reduce",
"Manger",
"along",
"select",
"indices",
"using",
"function",
"that",
"needs",
"full",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1367-L1385
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.describe
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
python
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
[
"def",
"describe",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Use pandas to calculate the correct columns",
"new_columns",
"=",
"(",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"self",
".",
"columns",
")",
".",
"astype",
"(",
"self",
".",
"dtypes",
")",
".",
"describe",
"(",
"*",
"*",
"kwargs",
")",
".",
"columns",
")",
"def",
"describe_builder",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"df",
".",
"iloc",
"[",
":",
",",
"internal_indices",
"]",
".",
"describe",
"(",
"*",
"*",
"kwargs",
")",
"# Apply describe and update indices, columns, and dtypes",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"describe_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_full_axis_reduce_along_select_indices",
"(",
"func",
",",
"0",
",",
"new_columns",
")",
"new_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"new_data",
",",
"False",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
")"
] |
Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
|
[
"Generates",
"descriptive",
"statistics",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1387-L1408
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.dropna
|
def dropna(self, **kwargs):
"""Returns a new QueryCompiler with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset", None)
thresh = kwargs.get("thresh", None)
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1).to_pandas().squeeze()
> thresh
for ax in axis
}
else:
drop_values = {
ax
^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1).to_pandas().squeeze()
for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
|
python
|
def dropna(self, **kwargs):
"""Returns a new QueryCompiler with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset", None)
thresh = kwargs.get("thresh", None)
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1).to_pandas().squeeze()
> thresh
for ax in axis
}
else:
drop_values = {
ax
^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1).to_pandas().squeeze()
for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
|
[
"def",
"dropna",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"subset",
"=",
"kwargs",
".",
"get",
"(",
"\"subset\"",
",",
"None",
")",
"thresh",
"=",
"kwargs",
".",
"get",
"(",
"\"thresh\"",
",",
"None",
")",
"how",
"=",
"kwargs",
".",
"get",
"(",
"\"how\"",
",",
"\"any\"",
")",
"# We need to subset the axis that we care about with `subset`. This",
"# will be used to determine the number of values that are NA.",
"if",
"subset",
"is",
"not",
"None",
":",
"if",
"not",
"axis",
":",
"compute_na",
"=",
"self",
".",
"getitem_column_array",
"(",
"subset",
")",
"else",
":",
"compute_na",
"=",
"self",
".",
"getitem_row_array",
"(",
"self",
".",
"index",
".",
"get_indexer_for",
"(",
"subset",
")",
")",
"else",
":",
"compute_na",
"=",
"self",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"list",
")",
":",
"axis",
"=",
"[",
"axis",
"]",
"# We are building this dictionary first to determine which columns",
"# and rows to drop. This way we do not drop some columns before we",
"# know which rows need to be dropped.",
"if",
"thresh",
"is",
"not",
"None",
":",
"# Count the number of NA values and specify which are higher than",
"# thresh.",
"drop_values",
"=",
"{",
"ax",
"^",
"1",
":",
"compute_na",
".",
"isna",
"(",
")",
".",
"sum",
"(",
"axis",
"=",
"ax",
"^",
"1",
")",
".",
"to_pandas",
"(",
")",
".",
"squeeze",
"(",
")",
">",
"thresh",
"for",
"ax",
"in",
"axis",
"}",
"else",
":",
"drop_values",
"=",
"{",
"ax",
"^",
"1",
":",
"getattr",
"(",
"compute_na",
".",
"isna",
"(",
")",
",",
"how",
")",
"(",
"axis",
"=",
"ax",
"^",
"1",
")",
".",
"to_pandas",
"(",
")",
".",
"squeeze",
"(",
")",
"for",
"ax",
"in",
"axis",
"}",
"if",
"0",
"not",
"in",
"drop_values",
":",
"drop_values",
"[",
"0",
"]",
"=",
"None",
"if",
"1",
"not",
"in",
"drop_values",
":",
"drop_values",
"[",
"1",
"]",
"=",
"None",
"rm_from_index",
"=",
"(",
"[",
"obj",
"for",
"obj",
"in",
"compute_na",
".",
"index",
"[",
"drop_values",
"[",
"1",
"]",
"]",
"]",
"if",
"drop_values",
"[",
"1",
"]",
"is",
"not",
"None",
"else",
"None",
")",
"rm_from_columns",
"=",
"(",
"[",
"obj",
"for",
"obj",
"in",
"compute_na",
".",
"columns",
"[",
"drop_values",
"[",
"0",
"]",
"]",
"]",
"if",
"drop_values",
"[",
"0",
"]",
"is",
"not",
"None",
"else",
"None",
")",
"else",
":",
"rm_from_index",
"=",
"(",
"compute_na",
".",
"index",
"[",
"drop_values",
"[",
"1",
"]",
"]",
"if",
"drop_values",
"[",
"1",
"]",
"is",
"not",
"None",
"else",
"None",
")",
"rm_from_columns",
"=",
"(",
"compute_na",
".",
"columns",
"[",
"drop_values",
"[",
"0",
"]",
"]",
"if",
"drop_values",
"[",
"0",
"]",
"is",
"not",
"None",
"else",
"None",
")",
"return",
"self",
".",
"drop",
"(",
"index",
"=",
"rm_from_index",
",",
"columns",
"=",
"rm_from_columns",
")"
] |
Returns a new QueryCompiler with null values dropped along given axis.
Return:
a new DataManager
|
[
"Returns",
"a",
"new",
"QueryCompiler",
"with",
"null",
"values",
"dropped",
"along",
"given",
"axis",
".",
"Return",
":",
"a",
"new",
"DataManager"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1460-L1525
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.eval
|
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
def eval_builder(df, **kwargs):
# pop the `axis` parameter because it was needed to build the mapreduce
# function but it is not a parameter used by `eval`.
kwargs.pop("axis", None)
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
return result
func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)
new_data = self._map_across_full_axis(1, func)
if expect_series:
new_columns = [columns_copy.name]
new_index = index
else:
new_columns = columns_copy.columns
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
|
python
|
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
def eval_builder(df, **kwargs):
# pop the `axis` parameter because it was needed to build the mapreduce
# function but it is not a parameter used by `eval`.
kwargs.pop("axis", None)
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
return result
func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)
new_data = self._map_across_full_axis(1, func)
if expect_series:
new_columns = [columns_copy.name]
new_index = index
else:
new_columns = columns_copy.columns
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
|
[
"def",
"eval",
"(",
"self",
",",
"expr",
",",
"*",
"*",
"kwargs",
")",
":",
"columns",
"=",
"self",
".",
"index",
"if",
"self",
".",
"_is_transposed",
"else",
"self",
".",
"columns",
"index",
"=",
"self",
".",
"columns",
"if",
"self",
".",
"_is_transposed",
"else",
"self",
".",
"index",
"# Make a copy of columns and eval on the copy to determine if result type is",
"# series or not",
"columns_copy",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"self",
".",
"columns",
")",
"columns_copy",
"=",
"columns_copy",
".",
"eval",
"(",
"expr",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"expect_series",
"=",
"isinstance",
"(",
"columns_copy",
",",
"pandas",
".",
"Series",
")",
"def",
"eval_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"# pop the `axis` parameter because it was needed to build the mapreduce",
"# function but it is not a parameter used by `eval`.",
"kwargs",
".",
"pop",
"(",
"\"axis\"",
",",
"None",
")",
"df",
".",
"columns",
"=",
"columns",
"result",
"=",
"df",
".",
"eval",
"(",
"expr",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"func",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"eval_builder",
",",
"axis",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"1",
",",
"func",
")",
"if",
"expect_series",
":",
"new_columns",
"=",
"[",
"columns_copy",
".",
"name",
"]",
"new_index",
"=",
"index",
"else",
":",
"new_columns",
"=",
"columns_copy",
".",
"columns",
"new_index",
"=",
"self",
".",
"index",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
")"
] |
Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
|
[
"Returns",
"a",
"new",
"QueryCompiler",
"with",
"expr",
"evaluated",
"on",
"columns",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1527-L1562
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.mode
|
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all")
|
python
|
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all")
|
[
"def",
"mode",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"def",
"mode_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"df",
".",
"mode",
"(",
"*",
"*",
"kwargs",
")",
"# We return a dataframe with the same shape as the input to ensure",
"# that all the partitions will be the same shape",
"if",
"not",
"axis",
"and",
"len",
"(",
"df",
")",
"!=",
"len",
"(",
"result",
")",
":",
"# Pad columns",
"append_values",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"result",
".",
"columns",
",",
"index",
"=",
"range",
"(",
"len",
"(",
"result",
")",
",",
"len",
"(",
"df",
")",
")",
")",
"result",
"=",
"pandas",
".",
"concat",
"(",
"[",
"result",
",",
"append_values",
"]",
",",
"ignore_index",
"=",
"True",
")",
"elif",
"axis",
"and",
"len",
"(",
"df",
".",
"columns",
")",
"!=",
"len",
"(",
"result",
".",
"columns",
")",
":",
"# Pad rows",
"append_vals",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"range",
"(",
"len",
"(",
"result",
".",
"columns",
")",
",",
"len",
"(",
"df",
".",
"columns",
")",
")",
",",
"index",
"=",
"result",
".",
"index",
",",
")",
"result",
"=",
"pandas",
".",
"concat",
"(",
"[",
"result",
",",
"append_vals",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"pandas",
".",
"DataFrame",
"(",
"result",
")",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"mode_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"new_index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"self",
".",
"index",
")",
")",
"if",
"not",
"axis",
"else",
"self",
".",
"index",
"new_columns",
"=",
"self",
".",
"columns",
"if",
"not",
"axis",
"else",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"self",
".",
"columns",
")",
")",
"new_dtypes",
"=",
"self",
".",
"_dtype_cache",
"if",
"new_dtypes",
"is",
"not",
"None",
":",
"new_dtypes",
".",
"index",
"=",
"new_columns",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
",",
"new_dtypes",
")",
".",
"dropna",
"(",
"axis",
"=",
"axis",
",",
"how",
"=",
"\"all\"",
")"
] |
Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
|
[
"Returns",
"a",
"new",
"QueryCompiler",
"with",
"modes",
"calculated",
"for",
"each",
"label",
"along",
"given",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1564-L1601
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.fillna
|
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
|
python
|
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
|
[
"def",
"fillna",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"value",
"=",
"kwargs",
".",
"get",
"(",
"\"value\"",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"kwargs",
".",
"pop",
"(",
"\"value\"",
")",
"if",
"axis",
"==",
"0",
":",
"index",
"=",
"self",
".",
"columns",
"else",
":",
"index",
"=",
"self",
".",
"index",
"value",
"=",
"{",
"idx",
":",
"value",
"[",
"key",
"]",
"for",
"key",
"in",
"value",
"for",
"idx",
"in",
"index",
".",
"get_indexer_for",
"(",
"[",
"key",
"]",
")",
"}",
"def",
"fillna_dict_builder",
"(",
"df",
",",
"func_dict",
"=",
"{",
"}",
")",
":",
"# We do this to ensure that no matter the state of the columns we get",
"# the correct ones.",
"func_dict",
"=",
"{",
"df",
".",
"columns",
"[",
"idx",
"]",
":",
"func_dict",
"[",
"idx",
"]",
"for",
"idx",
"in",
"func_dict",
"}",
"return",
"df",
".",
"fillna",
"(",
"value",
"=",
"func_dict",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices",
"(",
"axis",
",",
"fillna_dict_builder",
",",
"value",
",",
"keep_remaining",
"=",
"True",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")",
"else",
":",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"pandas",
".",
"DataFrame",
".",
"fillna",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")"
] |
Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
|
[
"Replaces",
"NaN",
"values",
"with",
"the",
"method",
"provided",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1603-L1635
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.query
|
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
|
python
|
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
|
[
"def",
"query",
"(",
"self",
",",
"expr",
",",
"*",
"*",
"kwargs",
")",
":",
"columns",
"=",
"self",
".",
"columns",
"def",
"query_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"# This is required because of an Arrow limitation",
"# TODO revisit for Arrow error",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
")",
")",
"df",
".",
"columns",
"=",
"columns",
"df",
".",
"query",
"(",
"expr",
",",
"inplace",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"return",
"df",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"query_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"1",
",",
"func",
")",
"# Query removes rows, so we need to update the index",
"new_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"new_data",
",",
"True",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"self",
".",
"columns",
",",
"self",
".",
"dtypes",
")"
] |
Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
|
[
"Query",
"columns",
"of",
"the",
"DataManager",
"with",
"a",
"boolean",
"expression",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1637-L1663
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.rank
|
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self._map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
if numeric_only:
new_columns = self.compute_index(1, new_data, True)
else:
new_columns = self.columns
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
|
python
|
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self._map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
if numeric_only:
new_columns = self.compute_index(1, new_data, True)
else:
new_columns = self.columns
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
|
[
"def",
"rank",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"numeric_only",
"=",
"True",
"if",
"axis",
"else",
"kwargs",
".",
"get",
"(",
"\"numeric_only\"",
",",
"False",
")",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"pandas",
".",
"DataFrame",
".",
"rank",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"# Since we assume no knowledge of internal state, we get the columns",
"# from the internal partitions.",
"if",
"numeric_only",
":",
"new_columns",
"=",
"self",
".",
"compute_index",
"(",
"1",
",",
"new_data",
",",
"True",
")",
"else",
":",
"new_columns",
"=",
"self",
".",
"columns",
"new_dtypes",
"=",
"pandas",
".",
"Series",
"(",
"[",
"np",
".",
"float64",
"for",
"_",
"in",
"new_columns",
"]",
",",
"index",
"=",
"new_columns",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"new_columns",
",",
"new_dtypes",
")"
] |
Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis.
|
[
"Computes",
"numerical",
"rank",
"along",
"axis",
".",
"Equal",
"values",
"are",
"set",
"to",
"the",
"average",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1665-L1682
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.sort_index
|
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
index = self.columns if axis else self.index
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy()
)
|
python
|
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
index = self.columns if axis else self.index
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy()
)
|
[
"def",
"sort_index",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"pop",
"(",
"\"axis\"",
",",
"0",
")",
"index",
"=",
"self",
".",
"columns",
"if",
"axis",
"else",
"self",
".",
"index",
"# sort_index can have ascending be None and behaves as if it is False.",
"# sort_values cannot have ascending be None. Thus, the following logic is to",
"# convert the ascending argument to one that works with sort_values",
"ascending",
"=",
"kwargs",
".",
"pop",
"(",
"\"ascending\"",
",",
"True",
")",
"if",
"ascending",
"is",
"None",
":",
"ascending",
"=",
"False",
"kwargs",
"[",
"\"ascending\"",
"]",
"=",
"ascending",
"def",
"sort_index_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axis",
":",
"df",
".",
"columns",
"=",
"index",
"else",
":",
"df",
".",
"index",
"=",
"index",
"return",
"df",
".",
"sort_index",
"(",
"axis",
"=",
"axis",
",",
"*",
"*",
"kwargs",
")",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"sort_index_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"if",
"axis",
":",
"new_columns",
"=",
"pandas",
".",
"Series",
"(",
"self",
".",
"columns",
")",
".",
"sort_values",
"(",
"*",
"*",
"kwargs",
")",
"new_index",
"=",
"self",
".",
"index",
"else",
":",
"new_index",
"=",
"pandas",
".",
"Series",
"(",
"self",
".",
"index",
")",
".",
"sort_values",
"(",
"*",
"*",
"kwargs",
")",
"new_columns",
"=",
"self",
".",
"columns",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
",",
"self",
".",
"dtypes",
".",
"copy",
"(",
")",
")"
] |
Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices.
|
[
"Sorts",
"the",
"data",
"with",
"respect",
"to",
"either",
"the",
"columns",
"or",
"the",
"indices",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1684-L1718
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._map_across_full_axis_select_indices
|
def _map_across_full_axis_select_indices(
self, axis, func, indices, keep_remaining=False
):
"""Maps function to select indices along full axis.
Args:
axis: 0 for columns and 1 for rows.
func: Callable mapping function over the BlockParitions.
indices: indices along axis to map over.
keep_remaining: True if keep indices where function was not applied.
Returns:
BaseFrameManager containing the result of mapping func over axis on indices.
"""
return self.data.apply_func_to_select_indices_along_full_axis(
axis, func, indices, keep_remaining
)
|
python
|
def _map_across_full_axis_select_indices(
self, axis, func, indices, keep_remaining=False
):
"""Maps function to select indices along full axis.
Args:
axis: 0 for columns and 1 for rows.
func: Callable mapping function over the BlockParitions.
indices: indices along axis to map over.
keep_remaining: True if keep indices where function was not applied.
Returns:
BaseFrameManager containing the result of mapping func over axis on indices.
"""
return self.data.apply_func_to_select_indices_along_full_axis(
axis, func, indices, keep_remaining
)
|
[
"def",
"_map_across_full_axis_select_indices",
"(",
"self",
",",
"axis",
",",
"func",
",",
"indices",
",",
"keep_remaining",
"=",
"False",
")",
":",
"return",
"self",
".",
"data",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"axis",
",",
"func",
",",
"indices",
",",
"keep_remaining",
")"
] |
Maps function to select indices along full axis.
Args:
axis: 0 for columns and 1 for rows.
func: Callable mapping function over the BlockParitions.
indices: indices along axis to map over.
keep_remaining: True if keep indices where function was not applied.
Returns:
BaseFrameManager containing the result of mapping func over axis on indices.
|
[
"Maps",
"function",
"to",
"select",
"indices",
"along",
"full",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1726-L1742
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.quantile_for_list_of_values
|
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis:
# If along rows, then drop the nonnumeric columns, record the index, and
# take transpose. We have to do this because if we don't, the result is all
# in one column for some reason.
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if axis == 1 else result
func = query_compiler._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler._map_across_full_axis(axis, func)
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Float64Index(q)
result = self.__constructor__(new_data, q_index, new_columns)
return result.transpose() if axis == 1 else result
|
python
|
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis:
# If along rows, then drop the nonnumeric columns, record the index, and
# take transpose. We have to do this because if we don't, the result is all
# in one column for some reason.
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if axis == 1 else result
func = query_compiler._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler._map_across_full_axis(axis, func)
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Float64Index(q)
result = self.__constructor__(new_data, q_index, new_columns)
return result.transpose() if axis == 1 else result
|
[
"def",
"quantile_for_list_of_values",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"^",
"1",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"quantile_for_list_of_values",
"(",
"*",
"*",
"kwargs",
")",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"q",
"=",
"kwargs",
".",
"get",
"(",
"\"q\"",
")",
"numeric_only",
"=",
"kwargs",
".",
"get",
"(",
"\"numeric_only\"",
",",
"True",
")",
"assert",
"isinstance",
"(",
"q",
",",
"(",
"pandas",
".",
"Series",
",",
"np",
".",
"ndarray",
",",
"pandas",
".",
"Index",
",",
"list",
")",
")",
"if",
"numeric_only",
":",
"new_columns",
"=",
"self",
".",
"numeric_columns",
"(",
")",
"else",
":",
"new_columns",
"=",
"[",
"col",
"for",
"col",
",",
"dtype",
"in",
"zip",
"(",
"self",
".",
"columns",
",",
"self",
".",
"dtypes",
")",
"if",
"(",
"is_numeric_dtype",
"(",
"dtype",
")",
"or",
"is_datetime_or_timedelta_dtype",
"(",
"dtype",
")",
")",
"]",
"if",
"axis",
":",
"# If along rows, then drop the nonnumeric columns, record the index, and",
"# take transpose. We have to do this because if we don't, the result is all",
"# in one column for some reason.",
"nonnumeric",
"=",
"[",
"col",
"for",
"col",
",",
"dtype",
"in",
"zip",
"(",
"self",
".",
"columns",
",",
"self",
".",
"dtypes",
")",
"if",
"not",
"is_numeric_dtype",
"(",
"dtype",
")",
"]",
"query_compiler",
"=",
"self",
".",
"drop",
"(",
"columns",
"=",
"nonnumeric",
")",
"new_columns",
"=",
"query_compiler",
".",
"index",
"else",
":",
"query_compiler",
"=",
"self",
"def",
"quantile_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"df",
".",
"quantile",
"(",
"*",
"*",
"kwargs",
")",
"return",
"result",
".",
"T",
"if",
"axis",
"==",
"1",
"else",
"result",
"func",
"=",
"query_compiler",
".",
"_prepare_method",
"(",
"quantile_builder",
",",
"*",
"*",
"kwargs",
")",
"q_index",
"=",
"pandas",
".",
"Float64Index",
"(",
"q",
")",
"new_data",
"=",
"query_compiler",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"# This took a long time to debug, so here is the rundown of why this is needed.",
"# Previously, we were operating on select indices, but that was broken. We were",
"# not correctly setting the columns/index. Because of how we compute `to_pandas`",
"# and because of the static nature of the index for `axis=1` it is easier to",
"# just handle this as the transpose (see `quantile_builder` above for the",
"# transpose within the partition) than it is to completely rework other",
"# internal methods. Basically we are returning the transpose of the object for",
"# correctness and cleanliness of the code.",
"if",
"axis",
"==",
"1",
":",
"q_index",
"=",
"new_columns",
"new_columns",
"=",
"pandas",
".",
"Float64Index",
"(",
"q",
")",
"result",
"=",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"q_index",
",",
"new_columns",
")",
"return",
"result",
".",
"transpose",
"(",
")",
"if",
"axis",
"==",
"1",
"else",
"result"
] |
Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis.
|
[
"Returns",
"Manager",
"containing",
"quantiles",
"along",
"an",
"axis",
"for",
"numeric",
"columns",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1744-L1800
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.tail
|
def tail(self, n):
"""Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager.
"""
# See head for an explanation of the transposed behavior
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
self.index[-n:],
self.columns,
self._dtype_cache,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache
)
return result
|
python
|
def tail(self, n):
"""Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager.
"""
# See head for an explanation of the transposed behavior
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
self.index[-n:],
self.columns,
self._dtype_cache,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache
)
return result
|
[
"def",
"tail",
"(",
"self",
",",
"n",
")",
":",
"# See head for an explanation of the transposed behavior",
"if",
"n",
"<",
"0",
":",
"n",
"=",
"max",
"(",
"0",
",",
"len",
"(",
"self",
".",
"index",
")",
"+",
"n",
")",
"if",
"self",
".",
"_is_transposed",
":",
"result",
"=",
"self",
".",
"__constructor__",
"(",
"self",
".",
"data",
".",
"transpose",
"(",
")",
".",
"take",
"(",
"1",
",",
"-",
"n",
")",
".",
"transpose",
"(",
")",
",",
"self",
".",
"index",
"[",
"-",
"n",
":",
"]",
",",
"self",
".",
"columns",
",",
"self",
".",
"_dtype_cache",
",",
")",
"result",
".",
"_is_transposed",
"=",
"True",
"else",
":",
"result",
"=",
"self",
".",
"__constructor__",
"(",
"self",
".",
"data",
".",
"take",
"(",
"0",
",",
"-",
"n",
")",
",",
"self",
".",
"index",
"[",
"-",
"n",
":",
"]",
",",
"self",
".",
"columns",
",",
"self",
".",
"_dtype_cache",
")",
"return",
"result"
] |
Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager.
|
[
"Returns",
"the",
"last",
"n",
"rows",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1837-L1861
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.front
|
def front(self, n):
"""Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the first n columns of the original DataManager.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, n).transpose(),
self.index,
self.columns[:n],
new_dtypes,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(1, n), self.index, self.columns[:n], new_dtypes
)
return result
|
python
|
def front(self, n):
"""Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the first n columns of the original DataManager.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, n).transpose(),
self.index,
self.columns[:n],
new_dtypes,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(1, n), self.index, self.columns[:n], new_dtypes
)
return result
|
[
"def",
"front",
"(",
"self",
",",
"n",
")",
":",
"new_dtypes",
"=",
"(",
"self",
".",
"_dtype_cache",
"if",
"self",
".",
"_dtype_cache",
"is",
"None",
"else",
"self",
".",
"_dtype_cache",
"[",
":",
"n",
"]",
")",
"# See head for an explanation of the transposed behavior",
"if",
"self",
".",
"_is_transposed",
":",
"result",
"=",
"self",
".",
"__constructor__",
"(",
"self",
".",
"data",
".",
"transpose",
"(",
")",
".",
"take",
"(",
"0",
",",
"n",
")",
".",
"transpose",
"(",
")",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
"[",
":",
"n",
"]",
",",
"new_dtypes",
",",
")",
"result",
".",
"_is_transposed",
"=",
"True",
"else",
":",
"result",
"=",
"self",
".",
"__constructor__",
"(",
"self",
".",
"data",
".",
"take",
"(",
"1",
",",
"n",
")",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
"[",
":",
"n",
"]",
",",
"new_dtypes",
")",
"return",
"result"
] |
Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the first n columns of the original DataManager.
|
[
"Returns",
"the",
"first",
"n",
"columns",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1863-L1888
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.getitem_column_array
|
def getitem_column_array(self, key):
"""Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
numeric_indices = list(self.columns.get_indexer_for(key))
# Internal indices is left blank and the internal
# `apply_func_to_select_indices` will do the conversion and pass it in.
def getitem(df, internal_indices=[]):
return df.iloc[:, internal_indices]
result = self.data.apply_func_to_select_indices(
0, getitem, numeric_indices, keep_remaining=False
)
# We can't just set the columns to key here because there may be
# multiple instances of a key.
new_columns = self.columns[numeric_indices]
new_dtypes = self.dtypes[numeric_indices]
return self.__constructor__(result, self.index, new_columns, new_dtypes)
|
python
|
def getitem_column_array(self, key):
"""Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
numeric_indices = list(self.columns.get_indexer_for(key))
# Internal indices is left blank and the internal
# `apply_func_to_select_indices` will do the conversion and pass it in.
def getitem(df, internal_indices=[]):
return df.iloc[:, internal_indices]
result = self.data.apply_func_to_select_indices(
0, getitem, numeric_indices, keep_remaining=False
)
# We can't just set the columns to key here because there may be
# multiple instances of a key.
new_columns = self.columns[numeric_indices]
new_dtypes = self.dtypes[numeric_indices]
return self.__constructor__(result, self.index, new_columns, new_dtypes)
|
[
"def",
"getitem_column_array",
"(",
"self",
",",
"key",
")",
":",
"# Convert to list for type checking",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"columns",
".",
"get_indexer_for",
"(",
"key",
")",
")",
"# Internal indices is left blank and the internal",
"# `apply_func_to_select_indices` will do the conversion and pass it in.",
"def",
"getitem",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"return",
"df",
".",
"iloc",
"[",
":",
",",
"internal_indices",
"]",
"result",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices",
"(",
"0",
",",
"getitem",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"False",
")",
"# We can't just set the columns to key here because there may be",
"# multiple instances of a key.",
"new_columns",
"=",
"self",
".",
"columns",
"[",
"numeric_indices",
"]",
"new_dtypes",
"=",
"self",
".",
"dtypes",
"[",
"numeric_indices",
"]",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"self",
".",
"index",
",",
"new_columns",
",",
"new_dtypes",
")"
] |
Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
Returns:
A new QueryCompiler.
|
[
"Get",
"column",
"data",
"for",
"target",
"labels",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1920-L1944
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.getitem_row_array
|
def getitem_row_array(self, key):
"""Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
key = list(key)
def getitem(df, internal_indices=[]):
return df.iloc[internal_indices]
result = self.data.apply_func_to_select_indices(
1, getitem, key, keep_remaining=False
)
# We can't just set the index to key here because there may be multiple
# instances of a key.
new_index = self.index[key]
return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
|
python
|
def getitem_row_array(self, key):
"""Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
key = list(key)
def getitem(df, internal_indices=[]):
return df.iloc[internal_indices]
result = self.data.apply_func_to_select_indices(
1, getitem, key, keep_remaining=False
)
# We can't just set the index to key here because there may be multiple
# instances of a key.
new_index = self.index[key]
return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
|
[
"def",
"getitem_row_array",
"(",
"self",
",",
"key",
")",
":",
"# Convert to list for type checking",
"key",
"=",
"list",
"(",
"key",
")",
"def",
"getitem",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"return",
"df",
".",
"iloc",
"[",
"internal_indices",
"]",
"result",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices",
"(",
"1",
",",
"getitem",
",",
"key",
",",
"keep_remaining",
"=",
"False",
")",
"# We can't just set the index to key here because there may be multiple",
"# instances of a key.",
"new_index",
"=",
"self",
".",
"index",
"[",
"key",
"]",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"new_index",
",",
"self",
".",
"columns",
",",
"self",
".",
"_dtype_cache",
")"
] |
Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
|
[
"Get",
"row",
"data",
"for",
"target",
"labels",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1946-L1967
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.setitem
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem(df, internal_indices=[]):
def _setitem():
if len(internal_indices) == 1:
if axis == 0:
df[df.columns[internal_indices[0]]] = value
else:
df.iloc[internal_indices[0]] = value
else:
if axis == 0:
df[df.columns[internal_indices]] = value
else:
df.iloc[internal_indices] = value
try:
_setitem()
except ValueError:
# TODO: This is a workaround for a pyarrow serialization issue
df = df.copy()
_setitem()
return df
if axis == 0:
numeric_indices = list(self.columns.get_indexer_for([key]))
else:
numeric_indices = list(self.index.get_indexer_for([key]))
prepared_func = self._prepare_method(setitem)
if is_list_like(value):
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, prepared_func, numeric_indices, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, prepared_func, numeric_indices, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
|
python
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem(df, internal_indices=[]):
def _setitem():
if len(internal_indices) == 1:
if axis == 0:
df[df.columns[internal_indices[0]]] = value
else:
df.iloc[internal_indices[0]] = value
else:
if axis == 0:
df[df.columns[internal_indices]] = value
else:
df.iloc[internal_indices] = value
try:
_setitem()
except ValueError:
# TODO: This is a workaround for a pyarrow serialization issue
df = df.copy()
_setitem()
return df
if axis == 0:
numeric_indices = list(self.columns.get_indexer_for([key]))
else:
numeric_indices = list(self.index.get_indexer_for([key]))
prepared_func = self._prepare_method(setitem)
if is_list_like(value):
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, prepared_func, numeric_indices, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, prepared_func, numeric_indices, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
|
[
"def",
"setitem",
"(",
"self",
",",
"axis",
",",
"key",
",",
"value",
")",
":",
"def",
"setitem",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"def",
"_setitem",
"(",
")",
":",
"if",
"len",
"(",
"internal_indices",
")",
"==",
"1",
":",
"if",
"axis",
"==",
"0",
":",
"df",
"[",
"df",
".",
"columns",
"[",
"internal_indices",
"[",
"0",
"]",
"]",
"]",
"=",
"value",
"else",
":",
"df",
".",
"iloc",
"[",
"internal_indices",
"[",
"0",
"]",
"]",
"=",
"value",
"else",
":",
"if",
"axis",
"==",
"0",
":",
"df",
"[",
"df",
".",
"columns",
"[",
"internal_indices",
"]",
"]",
"=",
"value",
"else",
":",
"df",
".",
"iloc",
"[",
"internal_indices",
"]",
"=",
"value",
"try",
":",
"_setitem",
"(",
")",
"except",
"ValueError",
":",
"# TODO: This is a workaround for a pyarrow serialization issue",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"_setitem",
"(",
")",
"return",
"df",
"if",
"axis",
"==",
"0",
":",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"columns",
".",
"get_indexer_for",
"(",
"[",
"key",
"]",
")",
")",
"else",
":",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"index",
".",
"get_indexer_for",
"(",
"[",
"key",
"]",
")",
")",
"prepared_func",
"=",
"self",
".",
"_prepare_method",
"(",
"setitem",
")",
"if",
"is_list_like",
"(",
"value",
")",
":",
"new_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"axis",
",",
"prepared_func",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"True",
")",
"else",
":",
"new_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices",
"(",
"axis",
",",
"prepared_func",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"True",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")"
] |
Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
|
[
"Set",
"the",
"column",
"defined",
"by",
"key",
"to",
"the",
"value",
"provided",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1969-L2014
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.drop
|
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
"""
if self._is_transposed:
return self.transpose().drop(index=columns, columns=index).transpose()
if index is None:
new_data = self.data
new_index = self.index
else:
def delitem(df, internal_indices=[]):
return df.drop(index=df.index[internal_indices])
numeric_indices = list(self.index.get_indexer_for(index))
new_data = self.data.apply_func_to_select_indices(
1, delitem, numeric_indices, keep_remaining=True
)
# We can't use self.index.drop with duplicate keys because in Pandas
# it throws an error.
new_index = self.index[~self.index.isin(index)]
if columns is None:
new_columns = self.columns
new_dtypes = self.dtypes
else:
def delitem(df, internal_indices=[]):
return df.drop(columns=df.columns[internal_indices])
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = new_data.apply_func_to_select_indices(
0, delitem, numeric_indices, keep_remaining=True
)
new_columns = self.columns[~self.columns.isin(columns)]
new_dtypes = self.dtypes.drop(columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
python
|
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
"""
if self._is_transposed:
return self.transpose().drop(index=columns, columns=index).transpose()
if index is None:
new_data = self.data
new_index = self.index
else:
def delitem(df, internal_indices=[]):
return df.drop(index=df.index[internal_indices])
numeric_indices = list(self.index.get_indexer_for(index))
new_data = self.data.apply_func_to_select_indices(
1, delitem, numeric_indices, keep_remaining=True
)
# We can't use self.index.drop with duplicate keys because in Pandas
# it throws an error.
new_index = self.index[~self.index.isin(index)]
if columns is None:
new_columns = self.columns
new_dtypes = self.dtypes
else:
def delitem(df, internal_indices=[]):
return df.drop(columns=df.columns[internal_indices])
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = new_data.apply_func_to_select_indices(
0, delitem, numeric_indices, keep_remaining=True
)
new_columns = self.columns[~self.columns.isin(columns)]
new_dtypes = self.dtypes.drop(columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
[
"def",
"drop",
"(",
"self",
",",
"index",
"=",
"None",
",",
"columns",
"=",
"None",
")",
":",
"if",
"self",
".",
"_is_transposed",
":",
"return",
"self",
".",
"transpose",
"(",
")",
".",
"drop",
"(",
"index",
"=",
"columns",
",",
"columns",
"=",
"index",
")",
".",
"transpose",
"(",
")",
"if",
"index",
"is",
"None",
":",
"new_data",
"=",
"self",
".",
"data",
"new_index",
"=",
"self",
".",
"index",
"else",
":",
"def",
"delitem",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"return",
"df",
".",
"drop",
"(",
"index",
"=",
"df",
".",
"index",
"[",
"internal_indices",
"]",
")",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"index",
".",
"get_indexer_for",
"(",
"index",
")",
")",
"new_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices",
"(",
"1",
",",
"delitem",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"True",
")",
"# We can't use self.index.drop with duplicate keys because in Pandas",
"# it throws an error.",
"new_index",
"=",
"self",
".",
"index",
"[",
"~",
"self",
".",
"index",
".",
"isin",
"(",
"index",
")",
"]",
"if",
"columns",
"is",
"None",
":",
"new_columns",
"=",
"self",
".",
"columns",
"new_dtypes",
"=",
"self",
".",
"dtypes",
"else",
":",
"def",
"delitem",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"return",
"df",
".",
"drop",
"(",
"columns",
"=",
"df",
".",
"columns",
"[",
"internal_indices",
"]",
")",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"columns",
".",
"get_indexer_for",
"(",
"columns",
")",
")",
"new_data",
"=",
"new_data",
".",
"apply_func_to_select_indices",
"(",
"0",
",",
"delitem",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"True",
")",
"new_columns",
"=",
"self",
".",
"columns",
"[",
"~",
"self",
".",
"columns",
".",
"isin",
"(",
"columns",
")",
"]",
"new_dtypes",
"=",
"self",
".",
"dtypes",
".",
"drop",
"(",
"columns",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
",",
"new_dtypes",
")"
] |
Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
|
[
"Remove",
"row",
"data",
"for",
"target",
"index",
"and",
"columns",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2020-L2062
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.insert
|
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
"""
if is_list_like(value):
# TODO make work with another querycompiler object as `value`.
# This will require aligning the indices with a `reindex` and ensuring that
# the data is partitioned identically.
if isinstance(value, pandas.Series):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index = pandas.RangeIndex(len(df.index))
df.insert(internal_idx, internal_idx, value, allow_duplicates=True)
df.columns = pandas.RangeIndex(len(df.columns))
df.index = old_index
return df
new_data = self.data.apply_func_to_select_indices_along_full_axis(
0, insert, loc, keep_remaining=True
)
new_columns = self.columns.insert(loc, column)
return self.__constructor__(new_data, self.index, new_columns)
|
python
|
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
"""
if is_list_like(value):
# TODO make work with another querycompiler object as `value`.
# This will require aligning the indices with a `reindex` and ensuring that
# the data is partitioned identically.
if isinstance(value, pandas.Series):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index = pandas.RangeIndex(len(df.index))
df.insert(internal_idx, internal_idx, value, allow_duplicates=True)
df.columns = pandas.RangeIndex(len(df.columns))
df.index = old_index
return df
new_data = self.data.apply_func_to_select_indices_along_full_axis(
0, insert, loc, keep_remaining=True
)
new_columns = self.columns.insert(loc, column)
return self.__constructor__(new_data, self.index, new_columns)
|
[
"def",
"insert",
"(",
"self",
",",
"loc",
",",
"column",
",",
"value",
")",
":",
"if",
"is_list_like",
"(",
"value",
")",
":",
"# TODO make work with another querycompiler object as `value`.",
"# This will require aligning the indices with a `reindex` and ensuring that",
"# the data is partitioned identically.",
"if",
"isinstance",
"(",
"value",
",",
"pandas",
".",
"Series",
")",
":",
"value",
"=",
"value",
".",
"reindex",
"(",
"self",
".",
"index",
")",
"value",
"=",
"list",
"(",
"value",
")",
"def",
"insert",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"internal_idx",
"=",
"int",
"(",
"internal_indices",
"[",
"0",
"]",
")",
"old_index",
"=",
"df",
".",
"index",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"index",
")",
")",
"df",
".",
"insert",
"(",
"internal_idx",
",",
"internal_idx",
",",
"value",
",",
"allow_duplicates",
"=",
"True",
")",
"df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"df",
".",
"index",
"=",
"old_index",
"return",
"df",
"new_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"0",
",",
"insert",
",",
"loc",
",",
"keep_remaining",
"=",
"True",
")",
"new_columns",
"=",
"self",
".",
"columns",
".",
"insert",
"(",
"loc",
",",
"column",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"new_columns",
")"
] |
Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
|
[
"Insert",
"new",
"column",
"data",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2071-L2103
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.apply
|
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
|
python
|
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"callable",
"(",
"func",
")",
":",
"return",
"self",
".",
"_callable_func",
"(",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"func",
",",
"dict",
")",
":",
"return",
"self",
".",
"_dict_func",
"(",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"is_list_like",
"(",
"func",
")",
":",
"return",
"self",
".",
"_list_like_func",
"(",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"pass"
] |
Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
|
[
"Apply",
"func",
"across",
"given",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2110-L2127
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._post_process_apply
|
def _post_process_apply(self, result_data, axis, try_scale=True):
"""Recompute the index after applying function.
Args:
result_data: a BaseFrameManager object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
"""
if try_scale:
try:
internal_index = self.compute_index(0, result_data, True)
except IndexError:
internal_index = self.compute_index(0, result_data, False)
try:
internal_columns = self.compute_index(1, result_data, True)
except IndexError:
internal_columns = self.compute_index(1, result_data, False)
else:
internal_index = self.compute_index(0, result_data, False)
internal_columns = self.compute_index(1, result_data, False)
if not axis:
index = internal_index
# We check if the two columns are the same length because if
# they are the same length, `self.columns` is the correct index.
# However, if the operation resulted in a different number of columns,
# we must use the derived columns from `self.compute_index()`.
if len(internal_columns) != len(self.columns):
columns = internal_columns
else:
columns = self.columns
else:
columns = internal_columns
# See above explanation for checking the lengths of columns
if len(internal_index) != len(self.index):
index = internal_index
else:
index = self.index
return self.__constructor__(result_data, index, columns)
|
python
|
def _post_process_apply(self, result_data, axis, try_scale=True):
"""Recompute the index after applying function.
Args:
result_data: a BaseFrameManager object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
"""
if try_scale:
try:
internal_index = self.compute_index(0, result_data, True)
except IndexError:
internal_index = self.compute_index(0, result_data, False)
try:
internal_columns = self.compute_index(1, result_data, True)
except IndexError:
internal_columns = self.compute_index(1, result_data, False)
else:
internal_index = self.compute_index(0, result_data, False)
internal_columns = self.compute_index(1, result_data, False)
if not axis:
index = internal_index
# We check if the two columns are the same length because if
# they are the same length, `self.columns` is the correct index.
# However, if the operation resulted in a different number of columns,
# we must use the derived columns from `self.compute_index()`.
if len(internal_columns) != len(self.columns):
columns = internal_columns
else:
columns = self.columns
else:
columns = internal_columns
# See above explanation for checking the lengths of columns
if len(internal_index) != len(self.index):
index = internal_index
else:
index = self.index
return self.__constructor__(result_data, index, columns)
|
[
"def",
"_post_process_apply",
"(",
"self",
",",
"result_data",
",",
"axis",
",",
"try_scale",
"=",
"True",
")",
":",
"if",
"try_scale",
":",
"try",
":",
"internal_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"result_data",
",",
"True",
")",
"except",
"IndexError",
":",
"internal_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"result_data",
",",
"False",
")",
"try",
":",
"internal_columns",
"=",
"self",
".",
"compute_index",
"(",
"1",
",",
"result_data",
",",
"True",
")",
"except",
"IndexError",
":",
"internal_columns",
"=",
"self",
".",
"compute_index",
"(",
"1",
",",
"result_data",
",",
"False",
")",
"else",
":",
"internal_index",
"=",
"self",
".",
"compute_index",
"(",
"0",
",",
"result_data",
",",
"False",
")",
"internal_columns",
"=",
"self",
".",
"compute_index",
"(",
"1",
",",
"result_data",
",",
"False",
")",
"if",
"not",
"axis",
":",
"index",
"=",
"internal_index",
"# We check if the two columns are the same length because if",
"# they are the same length, `self.columns` is the correct index.",
"# However, if the operation resulted in a different number of columns,",
"# we must use the derived columns from `self.compute_index()`.",
"if",
"len",
"(",
"internal_columns",
")",
"!=",
"len",
"(",
"self",
".",
"columns",
")",
":",
"columns",
"=",
"internal_columns",
"else",
":",
"columns",
"=",
"self",
".",
"columns",
"else",
":",
"columns",
"=",
"internal_columns",
"# See above explanation for checking the lengths of columns",
"if",
"len",
"(",
"internal_index",
")",
"!=",
"len",
"(",
"self",
".",
"index",
")",
":",
"index",
"=",
"internal_index",
"else",
":",
"index",
"=",
"self",
".",
"index",
"return",
"self",
".",
"__constructor__",
"(",
"result_data",
",",
"index",
",",
"columns",
")"
] |
Recompute the index after applying function.
Args:
result_data: a BaseFrameManager object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
|
[
"Recompute",
"the",
"index",
"after",
"applying",
"function",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2129-L2168
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._dict_func
|
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
return full_result
|
python
|
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
return full_result
|
[
"def",
"_dict_func",
"(",
"self",
",",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"axis\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"axis\"",
"]",
"=",
"axis",
"if",
"axis",
"==",
"0",
":",
"index",
"=",
"self",
".",
"columns",
"else",
":",
"index",
"=",
"self",
".",
"index",
"func",
"=",
"{",
"idx",
":",
"func",
"[",
"key",
"]",
"for",
"key",
"in",
"func",
"for",
"idx",
"in",
"index",
".",
"get_indexer_for",
"(",
"[",
"key",
"]",
")",
"}",
"def",
"dict_apply_builder",
"(",
"df",
",",
"func_dict",
"=",
"{",
"}",
")",
":",
"# Sometimes `apply` can return a `Series`, but we require that internally",
"# all objects are `DataFrame`s.",
"return",
"pandas",
".",
"DataFrame",
"(",
"df",
".",
"apply",
"(",
"func_dict",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"result_data",
"=",
"self",
".",
"data",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"axis",
",",
"dict_apply_builder",
",",
"func",
",",
"keep_remaining",
"=",
"False",
")",
"full_result",
"=",
"self",
".",
"_post_process_apply",
"(",
"result_data",
",",
"axis",
")",
"return",
"full_result"
] |
Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
|
[
"Apply",
"function",
"to",
"certain",
"indices",
"across",
"given",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2170-L2198
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._list_like_func
|
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns)
|
python
|
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns)
|
[
"def",
"_list_like_func",
"(",
"self",
",",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func_prepared",
"=",
"self",
".",
"_prepare_method",
"(",
"lambda",
"df",
":",
"pandas",
".",
"DataFrame",
"(",
"df",
".",
"apply",
"(",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func_prepared",
")",
"# When the function is list-like, the function names become the index/columns",
"new_index",
"=",
"(",
"[",
"f",
"if",
"isinstance",
"(",
"f",
",",
"string_types",
")",
"else",
"f",
".",
"__name__",
"for",
"f",
"in",
"func",
"]",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"index",
")",
"new_columns",
"=",
"(",
"[",
"f",
"if",
"isinstance",
"(",
"f",
",",
"string_types",
")",
"else",
"f",
".",
"__name__",
"for",
"f",
"in",
"func",
"]",
"if",
"axis",
"==",
"1",
"else",
"self",
".",
"columns",
")",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
")"
] |
Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
|
[
"Apply",
"list",
"-",
"like",
"function",
"across",
"given",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2200-L2225
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._callable_func
|
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
def callable_apply_builder(df, axis=0):
if not axis:
df.index = index
df.columns = pandas.RangeIndex(len(df.columns))
else:
df.columns = index
df.index = pandas.RangeIndex(len(df.index))
result = df.apply(func, axis=axis, *args, **kwargs)
return result
index = self.index if not axis else self.columns
func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)
result_data = self._map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis)
|
python
|
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
def callable_apply_builder(df, axis=0):
if not axis:
df.index = index
df.columns = pandas.RangeIndex(len(df.columns))
else:
df.columns = index
df.index = pandas.RangeIndex(len(df.index))
result = df.apply(func, axis=axis, *args, **kwargs)
return result
index = self.index if not axis else self.columns
func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)
result_data = self._map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis)
|
[
"def",
"_callable_func",
"(",
"self",
",",
"func",
",",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"callable_apply_builder",
"(",
"df",
",",
"axis",
"=",
"0",
")",
":",
"if",
"not",
"axis",
":",
"df",
".",
"index",
"=",
"index",
"df",
".",
"columns",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"else",
":",
"df",
".",
"columns",
"=",
"index",
"df",
".",
"index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"df",
".",
"index",
")",
")",
"result",
"=",
"df",
".",
"apply",
"(",
"func",
",",
"axis",
"=",
"axis",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"index",
"=",
"self",
".",
"index",
"if",
"not",
"axis",
"else",
"self",
".",
"columns",
"func_prepared",
"=",
"self",
".",
"_build_mapreduce_func",
"(",
"callable_apply_builder",
",",
"axis",
"=",
"axis",
")",
"result_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func_prepared",
")",
"return",
"self",
".",
"_post_process_apply",
"(",
"result_data",
",",
"axis",
")"
] |
Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
|
[
"Apply",
"callable",
"functions",
"across",
"given",
"axis",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2227-L2251
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler._manual_repartition
|
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
|
python
|
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
|
[
"def",
"_manual_repartition",
"(",
"self",
",",
"axis",
",",
"repartition_func",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"repartition_func",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"data",
".",
"manual_shuffle",
"(",
"axis",
",",
"func",
")"
] |
This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
|
[
"This",
"method",
"applies",
"all",
"manual",
"partitioning",
"functions",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2259-L2270
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.get_dummies
|
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
"""
cls = type(self)
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
# We have to do one of two things in order to ensure the final columns
# are correct. Our first option is to map over the data and assign the
# columns in a separate pass. That is what we have chosen to do here.
# This is not as efficient, but it requires less information from the
# lower layers and does not break any of our internal requirements. The
# second option is that we assign the columns as a part of the
# `get_dummies` call. This requires knowledge of the length of each
# partition, and breaks some of our assumptions and separation of
# concerns.
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self._map_across_full_axis(
1, lambda df: set_columns(df, set_cols)
)
# In some cases, we are mapping across all of the data. It is more
# efficient if we are mapping over all of the data to do it this way
# than it would be to reuse the code for specific columns.
if len(columns) == len(self.columns):
def get_dummies_builder(df):
if df is not None:
if not df.empty:
return pandas.get_dummies(df, **kwargs)
else:
return pandas.DataFrame([])
func = self._prepare_method(lambda df: get_dummies_builder(df))
new_data = columns_applied.map_across_full_axis(0, func)
untouched_data = None
else:
def get_dummies_builder(df, internal_indices=[]):
return pandas.get_dummies(
df.iloc[:, internal_indices], columns=None, **kwargs
)
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = columns_applied.apply_func_to_select_indices_along_full_axis(
0, get_dummies_builder, numeric_indices, keep_remaining=False
)
untouched_data = self.drop(columns=columns)
# Since we set the columns in the beginning, we can just extract them
# here. There is fortunately no required extra steps for a correct
# column index.
final_columns = self.compute_index(1, new_data, False)
# If we mapped over all the data we are done. If not, we need to
# prepend the `new_data` with the raw data from the columns that were
# not selected.
if len(columns) != len(self.columns):
new_data = untouched_data.data.concat(1, new_data)
final_columns = untouched_data.columns.append(pandas.Index(final_columns))
return cls(new_data, self.index, final_columns)
|
python
|
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
"""
cls = type(self)
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
# We have to do one of two things in order to ensure the final columns
# are correct. Our first option is to map over the data and assign the
# columns in a separate pass. That is what we have chosen to do here.
# This is not as efficient, but it requires less information from the
# lower layers and does not break any of our internal requirements. The
# second option is that we assign the columns as a part of the
# `get_dummies` call. This requires knowledge of the length of each
# partition, and breaks some of our assumptions and separation of
# concerns.
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self._map_across_full_axis(
1, lambda df: set_columns(df, set_cols)
)
# In some cases, we are mapping across all of the data. It is more
# efficient if we are mapping over all of the data to do it this way
# than it would be to reuse the code for specific columns.
if len(columns) == len(self.columns):
def get_dummies_builder(df):
if df is not None:
if not df.empty:
return pandas.get_dummies(df, **kwargs)
else:
return pandas.DataFrame([])
func = self._prepare_method(lambda df: get_dummies_builder(df))
new_data = columns_applied.map_across_full_axis(0, func)
untouched_data = None
else:
def get_dummies_builder(df, internal_indices=[]):
return pandas.get_dummies(
df.iloc[:, internal_indices], columns=None, **kwargs
)
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = columns_applied.apply_func_to_select_indices_along_full_axis(
0, get_dummies_builder, numeric_indices, keep_remaining=False
)
untouched_data = self.drop(columns=columns)
# Since we set the columns in the beginning, we can just extract them
# here. There is fortunately no required extra steps for a correct
# column index.
final_columns = self.compute_index(1, new_data, False)
# If we mapped over all the data we are done. If not, we need to
# prepend the `new_data` with the raw data from the columns that were
# not selected.
if len(columns) != len(self.columns):
new_data = untouched_data.data.concat(1, new_data)
final_columns = untouched_data.columns.append(pandas.Index(final_columns))
return cls(new_data, self.index, final_columns)
|
[
"def",
"get_dummies",
"(",
"self",
",",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"# `columns` as None does not mean all columns, by default it means only",
"# non-numeric columns.",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"columns",
"if",
"not",
"is_numeric_dtype",
"(",
"self",
".",
"dtypes",
"[",
"c",
"]",
")",
"]",
"# If we aren't computing any dummies, there is no need for any",
"# remote compute.",
"if",
"len",
"(",
"columns",
")",
"==",
"0",
":",
"return",
"self",
".",
"copy",
"(",
")",
"elif",
"not",
"is_list_like",
"(",
"columns",
")",
":",
"columns",
"=",
"[",
"columns",
"]",
"# We have to do one of two things in order to ensure the final columns",
"# are correct. Our first option is to map over the data and assign the",
"# columns in a separate pass. That is what we have chosen to do here.",
"# This is not as efficient, but it requires less information from the",
"# lower layers and does not break any of our internal requirements. The",
"# second option is that we assign the columns as a part of the",
"# `get_dummies` call. This requires knowledge of the length of each",
"# partition, and breaks some of our assumptions and separation of",
"# concerns.",
"def",
"set_columns",
"(",
"df",
",",
"columns",
")",
":",
"df",
".",
"columns",
"=",
"columns",
"return",
"df",
"set_cols",
"=",
"self",
".",
"columns",
"columns_applied",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"1",
",",
"lambda",
"df",
":",
"set_columns",
"(",
"df",
",",
"set_cols",
")",
")",
"# In some cases, we are mapping across all of the data. It is more",
"# efficient if we are mapping over all of the data to do it this way",
"# than it would be to reuse the code for specific columns.",
"if",
"len",
"(",
"columns",
")",
"==",
"len",
"(",
"self",
".",
"columns",
")",
":",
"def",
"get_dummies_builder",
"(",
"df",
")",
":",
"if",
"df",
"is",
"not",
"None",
":",
"if",
"not",
"df",
".",
"empty",
":",
"return",
"pandas",
".",
"get_dummies",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"pandas",
".",
"DataFrame",
"(",
"[",
"]",
")",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"lambda",
"df",
":",
"get_dummies_builder",
"(",
"df",
")",
")",
"new_data",
"=",
"columns_applied",
".",
"map_across_full_axis",
"(",
"0",
",",
"func",
")",
"untouched_data",
"=",
"None",
"else",
":",
"def",
"get_dummies_builder",
"(",
"df",
",",
"internal_indices",
"=",
"[",
"]",
")",
":",
"return",
"pandas",
".",
"get_dummies",
"(",
"df",
".",
"iloc",
"[",
":",
",",
"internal_indices",
"]",
",",
"columns",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"numeric_indices",
"=",
"list",
"(",
"self",
".",
"columns",
".",
"get_indexer_for",
"(",
"columns",
")",
")",
"new_data",
"=",
"columns_applied",
".",
"apply_func_to_select_indices_along_full_axis",
"(",
"0",
",",
"get_dummies_builder",
",",
"numeric_indices",
",",
"keep_remaining",
"=",
"False",
")",
"untouched_data",
"=",
"self",
".",
"drop",
"(",
"columns",
"=",
"columns",
")",
"# Since we set the columns in the beginning, we can just extract them",
"# here. There is fortunately no required extra steps for a correct",
"# column index.",
"final_columns",
"=",
"self",
".",
"compute_index",
"(",
"1",
",",
"new_data",
",",
"False",
")",
"# If we mapped over all the data we are done. If not, we need to",
"# prepend the `new_data` with the raw data from the columns that were",
"# not selected.",
"if",
"len",
"(",
"columns",
")",
"!=",
"len",
"(",
"self",
".",
"columns",
")",
":",
"new_data",
"=",
"untouched_data",
".",
"data",
".",
"concat",
"(",
"1",
",",
"new_data",
")",
"final_columns",
"=",
"untouched_data",
".",
"columns",
".",
"append",
"(",
"pandas",
".",
"Index",
"(",
"final_columns",
")",
")",
"return",
"cls",
"(",
"new_data",
",",
"self",
".",
"index",
",",
"final_columns",
")"
] |
Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
|
[
"Convert",
"categorical",
"variables",
"to",
"dummy",
"variables",
"for",
"certain",
"columns",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2327-L2402
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompiler.global_idx_to_numeric_idx
|
def global_idx_to_numeric_idx(self, axis, indices):
"""
Note: this function involves making copies of the index in memory.
Args:
axis: Axis to extract indices.
indices: Indices to convert to numerical.
Returns:
An Index object.
"""
assert axis in ["row", "col", "columns"]
if axis == "row":
return pandas.Index(
pandas.Series(np.arange(len(self.index)), index=self.index)
.loc[indices]
.values
)
elif axis in ["col", "columns"]:
return pandas.Index(
pandas.Series(np.arange(len(self.columns)), index=self.columns)
.loc[indices]
.values
)
|
python
|
def global_idx_to_numeric_idx(self, axis, indices):
"""
Note: this function involves making copies of the index in memory.
Args:
axis: Axis to extract indices.
indices: Indices to convert to numerical.
Returns:
An Index object.
"""
assert axis in ["row", "col", "columns"]
if axis == "row":
return pandas.Index(
pandas.Series(np.arange(len(self.index)), index=self.index)
.loc[indices]
.values
)
elif axis in ["col", "columns"]:
return pandas.Index(
pandas.Series(np.arange(len(self.columns)), index=self.columns)
.loc[indices]
.values
)
|
[
"def",
"global_idx_to_numeric_idx",
"(",
"self",
",",
"axis",
",",
"indices",
")",
":",
"assert",
"axis",
"in",
"[",
"\"row\"",
",",
"\"col\"",
",",
"\"columns\"",
"]",
"if",
"axis",
"==",
"\"row\"",
":",
"return",
"pandas",
".",
"Index",
"(",
"pandas",
".",
"Series",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"index",
")",
")",
",",
"index",
"=",
"self",
".",
"index",
")",
".",
"loc",
"[",
"indices",
"]",
".",
"values",
")",
"elif",
"axis",
"in",
"[",
"\"col\"",
",",
"\"columns\"",
"]",
":",
"return",
"pandas",
".",
"Index",
"(",
"pandas",
".",
"Series",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"columns",
")",
")",
",",
"index",
"=",
"self",
".",
"columns",
")",
".",
"loc",
"[",
"indices",
"]",
".",
"values",
")"
] |
Note: this function involves making copies of the index in memory.
Args:
axis: Axis to extract indices.
indices: Indices to convert to numerical.
Returns:
An Index object.
|
[
"Note",
":",
"this",
"function",
"involves",
"making",
"copies",
"of",
"the",
"index",
"in",
"memory",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2442-L2465
|
train
|
modin-project/modin
|
modin/backends/pandas/query_compiler.py
|
PandasQueryCompilerView._get_data
|
def _get_data(self) -> BaseFrameManager:
"""Perform the map step
Returns:
A BaseFrameManager object.
"""
def iloc(partition, row_internal_indices, col_internal_indices):
return partition.iloc[row_internal_indices, col_internal_indices]
masked_data = self.parent_data.apply_func_to_indices_both_axis(
func=iloc,
row_indices=self.index_map.values,
col_indices=self.columns_map.values,
lazy=False,
keep_remaining=False,
)
return masked_data
|
python
|
def _get_data(self) -> BaseFrameManager:
"""Perform the map step
Returns:
A BaseFrameManager object.
"""
def iloc(partition, row_internal_indices, col_internal_indices):
return partition.iloc[row_internal_indices, col_internal_indices]
masked_data = self.parent_data.apply_func_to_indices_both_axis(
func=iloc,
row_indices=self.index_map.values,
col_indices=self.columns_map.values,
lazy=False,
keep_remaining=False,
)
return masked_data
|
[
"def",
"_get_data",
"(",
"self",
")",
"->",
"BaseFrameManager",
":",
"def",
"iloc",
"(",
"partition",
",",
"row_internal_indices",
",",
"col_internal_indices",
")",
":",
"return",
"partition",
".",
"iloc",
"[",
"row_internal_indices",
",",
"col_internal_indices",
"]",
"masked_data",
"=",
"self",
".",
"parent_data",
".",
"apply_func_to_indices_both_axis",
"(",
"func",
"=",
"iloc",
",",
"row_indices",
"=",
"self",
".",
"index_map",
".",
"values",
",",
"col_indices",
"=",
"self",
".",
"columns_map",
".",
"values",
",",
"lazy",
"=",
"False",
",",
"keep_remaining",
"=",
"False",
",",
")",
"return",
"masked_data"
] |
Perform the map step
Returns:
A BaseFrameManager object.
|
[
"Perform",
"the",
"map",
"step"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L2528-L2545
|
train
|
modin-project/modin
|
modin/engines/python/pandas_on_python/frame/partition_manager.py
|
PythonFrameManager.block_lengths
|
def block_lengths(self):
"""Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._lengths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same length in a
# row of blocks.
self._lengths_cache = (
[obj.length() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache
|
python
|
def block_lengths(self):
"""Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._lengths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same length in a
# row of blocks.
self._lengths_cache = (
[obj.length() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache
|
[
"def",
"block_lengths",
"(",
"self",
")",
":",
"if",
"self",
".",
"_lengths_cache",
"is",
"None",
":",
"# The first column will have the correct lengths. We have an",
"# invariant that requires that all blocks be the same length in a",
"# row of blocks.",
"self",
".",
"_lengths_cache",
"=",
"(",
"[",
"obj",
".",
"length",
"(",
")",
"for",
"obj",
"in",
"self",
".",
"_partitions_cache",
".",
"T",
"[",
"0",
"]",
"]",
"if",
"len",
"(",
"self",
".",
"_partitions_cache",
".",
"T",
")",
">",
"0",
"else",
"[",
"]",
")",
"return",
"self",
".",
"_lengths_cache"
] |
Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
|
[
"Gets",
"the",
"lengths",
"of",
"the",
"blocks",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/python/pandas_on_python/frame/partition_manager.py#L29-L44
|
train
|
modin-project/modin
|
modin/engines/python/pandas_on_python/frame/partition_manager.py
|
PythonFrameManager.block_widths
|
def block_widths(self):
"""Gets the widths of the blocks.
Note: This works with the property structure `_widths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._widths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same width in a
# column of blocks.
self._widths_cache = (
[obj.width() for obj in self._partitions_cache[0]]
if len(self._partitions_cache) > 0
else []
)
return self._widths_cache
|
python
|
def block_widths(self):
"""Gets the widths of the blocks.
Note: This works with the property structure `_widths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._widths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same width in a
# column of blocks.
self._widths_cache = (
[obj.width() for obj in self._partitions_cache[0]]
if len(self._partitions_cache) > 0
else []
)
return self._widths_cache
|
[
"def",
"block_widths",
"(",
"self",
")",
":",
"if",
"self",
".",
"_widths_cache",
"is",
"None",
":",
"# The first column will have the correct lengths. We have an",
"# invariant that requires that all blocks be the same width in a",
"# column of blocks.",
"self",
".",
"_widths_cache",
"=",
"(",
"[",
"obj",
".",
"width",
"(",
")",
"for",
"obj",
"in",
"self",
".",
"_partitions_cache",
"[",
"0",
"]",
"]",
"if",
"len",
"(",
"self",
".",
"_partitions_cache",
")",
">",
"0",
"else",
"[",
"]",
")",
"return",
"self",
".",
"_widths_cache"
] |
Gets the widths of the blocks.
Note: This works with the property structure `_widths_cache` to avoid
having to recompute these values each time they are needed.
|
[
"Gets",
"the",
"widths",
"of",
"the",
"blocks",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/python/pandas_on_python/frame/partition_manager.py#L47-L62
|
train
|
modin-project/modin
|
modin/pandas/base.py
|
BasePandasDataset._update_inplace
|
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
|
python
|
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
|
[
"def",
"_update_inplace",
"(",
"self",
",",
"new_query_compiler",
")",
":",
"old_query_compiler",
"=",
"self",
".",
"_query_compiler",
"self",
".",
"_query_compiler",
"=",
"new_query_compiler",
"old_query_compiler",
".",
"free",
"(",
")"
] |
Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
|
[
"Updates",
"the",
"current",
"DataFrame",
"inplace",
".",
"Args",
":",
"new_query_compiler",
":",
"The",
"new",
"QueryCompiler",
"to",
"use",
"to",
"manage",
"the",
"data"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L74-L82
|
train
|
modin-project/modin
|
modin/pandas/base.py
|
BasePandasDataset._validate_other
|
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if isinstance(other, BasePandasDataset):
return other._query_compiler
elif is_list_like(other):
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(len(self._query_compiler.index), len(other))
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(
len(self._query_compiler.columns), len(other)
)
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
else:
other_dtypes = [type(x) for x in other]
else:
other_dtypes = [
type(other)
for _ in range(
len(self._query_compiler.index)
if axis
else len(self._query_compiler.columns)
)
]
# Do dtype checking
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result
|
python
|
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if isinstance(other, BasePandasDataset):
return other._query_compiler
elif is_list_like(other):
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(len(self._query_compiler.index), len(other))
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(
len(self._query_compiler.columns), len(other)
)
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
else:
other_dtypes = [type(x) for x in other]
else:
other_dtypes = [
type(other)
for _ in range(
len(self._query_compiler.index)
if axis
else len(self._query_compiler.columns)
)
]
# Do dtype checking
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result
|
[
"def",
"_validate_other",
"(",
"self",
",",
"other",
",",
"axis",
",",
"numeric_only",
"=",
"False",
",",
"numeric_or_time_only",
"=",
"False",
",",
"numeric_or_object_only",
"=",
"False",
",",
"comparison_dtypes_only",
"=",
"False",
",",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"is",
"not",
"None",
"else",
"1",
"result",
"=",
"other",
"if",
"isinstance",
"(",
"other",
",",
"BasePandasDataset",
")",
":",
"return",
"other",
".",
"_query_compiler",
"elif",
"is_list_like",
"(",
"other",
")",
":",
"if",
"axis",
"==",
"0",
":",
"if",
"len",
"(",
"other",
")",
"!=",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
":",
"raise",
"ValueError",
"(",
"\"Unable to coerce to Series, length must be {0}: \"",
"\"given {1}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
",",
"len",
"(",
"other",
")",
")",
")",
"else",
":",
"if",
"len",
"(",
"other",
")",
"!=",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"\"Unable to coerce to Series, length must be {0}: \"",
"\"given {1}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
",",
"len",
"(",
"other",
")",
")",
")",
"if",
"hasattr",
"(",
"other",
",",
"\"dtype\"",
")",
":",
"other_dtypes",
"=",
"[",
"other",
".",
"dtype",
"]",
"*",
"len",
"(",
"other",
")",
"else",
":",
"other_dtypes",
"=",
"[",
"type",
"(",
"x",
")",
"for",
"x",
"in",
"other",
"]",
"else",
":",
"other_dtypes",
"=",
"[",
"type",
"(",
"other",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
"if",
"axis",
"else",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
")",
"]",
"# Do dtype checking\r",
"if",
"numeric_only",
":",
"if",
"not",
"all",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation on non-numeric dtypes\"",
")",
"elif",
"numeric_or_object_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_object_dtype",
"(",
"self_dtype",
")",
"and",
"is_object_dtype",
"(",
"other_dtype",
")",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric dtypes\"",
")",
"elif",
"comparison_dtypes_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_datetime_or_timedelta_dtype",
"(",
"self_dtype",
")",
"and",
"is_datetime_or_timedelta_dtype",
"(",
"other_dtype",
")",
")",
"or",
"is_dtype_equal",
"(",
"self_dtype",
",",
"other_dtype",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric objects with numeric objects\"",
")",
"elif",
"numeric_or_time_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_datetime_or_timedelta_dtype",
"(",
"self_dtype",
")",
"and",
"is_datetime_or_timedelta_dtype",
"(",
"other_dtype",
")",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric objects with numeric objects\"",
")",
"return",
"result"
] |
Helper method to check validity of other in inter-df operations
|
[
"Helper",
"method",
"to",
"check",
"validity",
"of",
"other",
"in",
"inter",
"-",
"df",
"operations"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L84-L165
|
train
|
modin-project/modin
|
modin/pandas/base.py
|
BasePandasDataset._default_to_pandas
|
def _default_to_pandas(self, op, *args, **kwargs):
"""Helper method to use default pandas function"""
empty_self_str = "" if not self.empty else " for empty DataFrame"
ErrorMessage.default_to_pandas(
"`{}.{}`{}".format(
self.__name__,
op if isinstance(op, str) else op.__name__,
empty_self_str,
)
)
if callable(op):
result = op(self._to_pandas(), *args, **kwargs)
elif isinstance(op, str):
# The inner `getattr` is ensuring that we are treating this object (whether
# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`
# will get the operation (`op`) from the pandas version of the class and run
# it on the object after we have converted it to pandas.
result = getattr(getattr(pandas, self.__name__), op)(
self._to_pandas(), *args, **kwargs
)
# SparseDataFrames cannot be serialize by arrow and cause problems for Modin.
# For now we will use pandas.
if isinstance(result, type(self)) and not isinstance(
result, (pandas.SparseDataFrame, pandas.SparseSeries)
):
return self._create_or_update_from_compiler(
result, inplace=kwargs.get("inplace", False)
)
elif isinstance(result, pandas.DataFrame):
from .dataframe import DataFrame
return DataFrame(result)
elif isinstance(result, pandas.Series):
from .series import Series
return Series(result)
else:
try:
if (
isinstance(result, (list, tuple))
and len(result) == 2
and isinstance(result[0], pandas.DataFrame)
):
# Some operations split the DataFrame into two (e.g. align). We need to wrap
# both of the returned results
if isinstance(result[1], pandas.DataFrame):
second = self.__constructor__(result[1])
else:
second = result[1]
return self.__constructor__(result[0]), second
else:
return result
except TypeError:
return result
|
python
|
def _default_to_pandas(self, op, *args, **kwargs):
"""Helper method to use default pandas function"""
empty_self_str = "" if not self.empty else " for empty DataFrame"
ErrorMessage.default_to_pandas(
"`{}.{}`{}".format(
self.__name__,
op if isinstance(op, str) else op.__name__,
empty_self_str,
)
)
if callable(op):
result = op(self._to_pandas(), *args, **kwargs)
elif isinstance(op, str):
# The inner `getattr` is ensuring that we are treating this object (whether
# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`
# will get the operation (`op`) from the pandas version of the class and run
# it on the object after we have converted it to pandas.
result = getattr(getattr(pandas, self.__name__), op)(
self._to_pandas(), *args, **kwargs
)
# SparseDataFrames cannot be serialize by arrow and cause problems for Modin.
# For now we will use pandas.
if isinstance(result, type(self)) and not isinstance(
result, (pandas.SparseDataFrame, pandas.SparseSeries)
):
return self._create_or_update_from_compiler(
result, inplace=kwargs.get("inplace", False)
)
elif isinstance(result, pandas.DataFrame):
from .dataframe import DataFrame
return DataFrame(result)
elif isinstance(result, pandas.Series):
from .series import Series
return Series(result)
else:
try:
if (
isinstance(result, (list, tuple))
and len(result) == 2
and isinstance(result[0], pandas.DataFrame)
):
# Some operations split the DataFrame into two (e.g. align). We need to wrap
# both of the returned results
if isinstance(result[1], pandas.DataFrame):
second = self.__constructor__(result[1])
else:
second = result[1]
return self.__constructor__(result[0]), second
else:
return result
except TypeError:
return result
|
[
"def",
"_default_to_pandas",
"(",
"self",
",",
"op",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"empty_self_str",
"=",
"\"\"",
"if",
"not",
"self",
".",
"empty",
"else",
"\" for empty DataFrame\"",
"ErrorMessage",
".",
"default_to_pandas",
"(",
"\"`{}.{}`{}\"",
".",
"format",
"(",
"self",
".",
"__name__",
",",
"op",
"if",
"isinstance",
"(",
"op",
",",
"str",
")",
"else",
"op",
".",
"__name__",
",",
"empty_self_str",
",",
")",
")",
"if",
"callable",
"(",
"op",
")",
":",
"result",
"=",
"op",
"(",
"self",
".",
"_to_pandas",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"op",
",",
"str",
")",
":",
"# The inner `getattr` is ensuring that we are treating this object (whether\r",
"# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`\r",
"# will get the operation (`op`) from the pandas version of the class and run\r",
"# it on the object after we have converted it to pandas.\r",
"result",
"=",
"getattr",
"(",
"getattr",
"(",
"pandas",
",",
"self",
".",
"__name__",
")",
",",
"op",
")",
"(",
"self",
".",
"_to_pandas",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# SparseDataFrames cannot be serialize by arrow and cause problems for Modin.\r",
"# For now we will use pandas.\r",
"if",
"isinstance",
"(",
"result",
",",
"type",
"(",
"self",
")",
")",
"and",
"not",
"isinstance",
"(",
"result",
",",
"(",
"pandas",
".",
"SparseDataFrame",
",",
"pandas",
".",
"SparseSeries",
")",
")",
":",
"return",
"self",
".",
"_create_or_update_from_compiler",
"(",
"result",
",",
"inplace",
"=",
"kwargs",
".",
"get",
"(",
"\"inplace\"",
",",
"False",
")",
")",
"elif",
"isinstance",
"(",
"result",
",",
"pandas",
".",
"DataFrame",
")",
":",
"from",
".",
"dataframe",
"import",
"DataFrame",
"return",
"DataFrame",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"result",
",",
"pandas",
".",
"Series",
")",
":",
"from",
".",
"series",
"import",
"Series",
"return",
"Series",
"(",
"result",
")",
"else",
":",
"try",
":",
"if",
"(",
"isinstance",
"(",
"result",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"result",
")",
"==",
"2",
"and",
"isinstance",
"(",
"result",
"[",
"0",
"]",
",",
"pandas",
".",
"DataFrame",
")",
")",
":",
"# Some operations split the DataFrame into two (e.g. align). We need to wrap\r",
"# both of the returned results\r",
"if",
"isinstance",
"(",
"result",
"[",
"1",
"]",
",",
"pandas",
".",
"DataFrame",
")",
":",
"second",
"=",
"self",
".",
"__constructor__",
"(",
"result",
"[",
"1",
"]",
")",
"else",
":",
"second",
"=",
"result",
"[",
"1",
"]",
"return",
"self",
".",
"__constructor__",
"(",
"result",
"[",
"0",
"]",
")",
",",
"second",
"else",
":",
"return",
"result",
"except",
"TypeError",
":",
"return",
"result"
] |
Helper method to use default pandas function
|
[
"Helper",
"method",
"to",
"use",
"default",
"pandas",
"function"
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L181-L234
|
train
|
modin-project/modin
|
modin/pandas/base.py
|
BasePandasDataset.abs
|
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return self.__constructor__(query_compiler=self._query_compiler.abs())
|
python
|
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return self.__constructor__(query_compiler=self._query_compiler.abs())
|
[
"def",
"abs",
"(",
"self",
")",
":",
"self",
".",
"_validate_dtypes",
"(",
"numeric_only",
"=",
"True",
")",
"return",
"self",
".",
"__constructor__",
"(",
"query_compiler",
"=",
"self",
".",
"_query_compiler",
".",
"abs",
"(",
")",
")"
] |
Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
|
[
"Apply",
"an",
"absolute",
"value",
"function",
"to",
"all",
"numeric",
"columns",
".",
"Returns",
":",
"A",
"new",
"DataFrame",
"with",
"the",
"applied",
"absolute",
"value",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L246-L253
|
train
|
modin-project/modin
|
modin/pandas/base.py
|
BasePandasDataset.add
|
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
return self._binary_op(
"add", other, axis=axis, level=level, fill_value=fill_value
)
|
python
|
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
return self._binary_op(
"add", other, axis=axis, level=level, fill_value=fill_value
)
|
[
"def",
"add",
"(",
"self",
",",
"other",
",",
"axis",
"=",
"\"columns\"",
",",
"level",
"=",
"None",
",",
"fill_value",
"=",
"None",
")",
":",
"return",
"self",
".",
"_binary_op",
"(",
"\"add\"",
",",
"other",
",",
"axis",
"=",
"axis",
",",
"level",
"=",
"level",
",",
"fill_value",
"=",
"fill_value",
")"
] |
Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
|
[
"Add",
"this",
"DataFrame",
"to",
"another",
"or",
"a",
"scalar",
"/",
"list",
".",
"Args",
":",
"other",
":",
"What",
"to",
"add",
"this",
"this",
"DataFrame",
".",
"axis",
":",
"The",
"axis",
"to",
"apply",
"addition",
"over",
".",
"Only",
"applicaable",
"to",
"Series",
"or",
"list",
"other",
".",
"level",
":",
"A",
"level",
"in",
"the",
"multilevel",
"axis",
"to",
"add",
"over",
".",
"fill_value",
":",
"The",
"value",
"to",
"fill",
"NaN",
".",
"Returns",
":",
"A",
"new",
"DataFrame",
"with",
"the",
"applied",
"addition",
"."
] |
5b77d242596560c646b8405340c9ce64acb183cb
|
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L273-L288
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.