after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index, columns=["__reduced__"])
|
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def applyier(df, **kwargs):
result = df.apply(func, **applyier_kwargs)
return result.set_axis(df.axes[axis ^ 1], axis=0)
|
def applyier(df, other):
concated = pandas.concat([df, other], axis=1, copy=False)
result = concated.pivot_table(
index=index,
values=values if len(values) > 0 else None,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
# in that case Pandas transposes the result of `pivot_table`,
# transposing it back to be consistent with column axis values along
# different partitions
if len(index) == 0 and len(columns) > 0:
result = result.T
return result
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def caller(self, *args, **kwargs):
# If `numeric_only` is None then we don't know what columns/indices will
# be dropped at the result of reduction function, and so can't preserve labels
preserve_index = kwargs.get("numeric_only", None) is not None
return applier.register(*funcs, preserve_index=preserve_index)(
self, *args, **kwargs
)
|
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def call(cls, fold_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._fold(
cls.validate_axis(axis),
lambda x: fold_function(x, *args, **kwargs),
)
)
return caller
|
def call(cls, fold_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
return query_compiler.__constructor__(
query_compiler._modin_frame._fold(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: fold_function(x, *args, **kwargs),
)
)
return caller
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def caller(query_compiler, *args, **kwargs):
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._fold(
cls.validate_axis(axis),
lambda x: fold_function(x, *args, **kwargs),
)
)
|
def caller(query_compiler, *args, **kwargs):
return query_compiler.__constructor__(
query_compiler._modin_frame._fold(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: fold_function(x, *args, **kwargs),
)
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def call(cls, map_function, reduce_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._map_reduce(
cls.validate_axis(axis),
lambda x: map_function(x, *args, **kwargs),
lambda y: reduce_function(y, *args, **kwargs),
preserve_index=preserve_index,
)
)
return caller
|
def call(cls, map_function, reduce_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
return query_compiler.__constructor__(
query_compiler._modin_frame._map_reduce(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: map_function(x, *args, **kwargs),
lambda y: reduce_function(y, *args, **kwargs),
preserve_index=preserve_index,
)
)
return caller
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._map_reduce(
cls.validate_axis(axis),
lambda x: map_function(x, *args, **kwargs),
lambda y: reduce_function(y, *args, **kwargs),
preserve_index=preserve_index,
)
)
|
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
return query_compiler.__constructor__(
query_compiler._modin_frame._map_reduce(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: map_function(x, *args, **kwargs),
lambda y: reduce_function(y, *args, **kwargs),
preserve_index=preserve_index,
)
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def register(cls, map_function, reduce_function=None, **kwargs):
if reduce_function is None:
reduce_function = map_function
return cls.call(map_function, reduce_function, **kwargs)
|
def register(cls, map_function, reduce_function, **kwargs):
return cls.call(map_function, reduce_function, **kwargs)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def call(cls, reduction_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._fold_reduce(
cls.validate_axis(axis),
lambda x: reduction_function(x, *args, **kwargs),
preserve_index=preserve_index,
)
)
return caller
|
def call(cls, reduction_function, **call_kwds):
def caller(query_compiler, *args, **kwargs):
return query_compiler.__constructor__(
query_compiler._modin_frame._fold_reduce(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: reduction_function(x, *args, **kwargs),
)
)
return caller
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def caller(query_compiler, *args, **kwargs):
preserve_index = call_kwds.pop("preserve_index", True)
axis = call_kwds.get("axis", kwargs.get("axis"))
return query_compiler.__constructor__(
query_compiler._modin_frame._fold_reduce(
cls.validate_axis(axis),
lambda x: reduction_function(x, *args, **kwargs),
preserve_index=preserve_index,
)
)
|
def caller(query_compiler, *args, **kwargs):
return query_compiler.__constructor__(
query_compiler._modin_frame._fold_reduce(
call_kwds.get("axis") if "axis" in call_kwds else kwargs.get("axis"),
lambda x: reduction_function(x, *args, **kwargs),
)
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _validate_axis_equality(self, axis: int, force: bool = False):
"""
Validates internal and external indices of modin_frame at the specified axis.
Parameters
----------
axis : 0 or 1
Axis to validate indices along (0 - index, 1 - columns).
force : boolean, default False
Whether to update external indices with internal if their lengths
do not match or raise an exception in that case.
"""
internal_axis = self._compute_axis_labels(axis)
self_axis = self.axes[axis]
is_equals = self_axis.equals(internal_axis)
if (
isinstance(self_axis, DatetimeIndex)
and isinstance(internal_axis, DatetimeIndex)
and is_equals
):
if getattr(self_axis, "freq") != getattr(internal_axis, "freq"):
is_equals = False
force = True
is_lenghts_matches = len(self_axis) == len(internal_axis)
if not is_equals:
if not is_lenghts_matches:
if axis:
self._column_widths_cache = None
else:
self._row_lengths_cache = None
new_axis = self_axis if is_lenghts_matches and not force else internal_axis
self._set_axis(axis, new_axis, cache_only=not is_lenghts_matches)
|
def _validate_axis_equality(self, axis: int, force: bool = False):
"""
Validates internal and external indices of modin_frame at the specified axis.
Parameters
----------
axis : 0 or 1
Axis to validate indices along (0 - index, 1 - columns).
force : boolean, default False
Whether to update external indices with internal if their lengths
do not match or raise an exception in that case.
"""
internal_axis = self._frame_mgr_cls.get_indices(
axis, self._partitions, lambda df: df.axes[axis]
)
self_axis = self.axes[axis]
is_equals = self_axis.equals(internal_axis)
if (
isinstance(self_axis, DatetimeIndex)
and isinstance(internal_axis, DatetimeIndex)
and is_equals
):
if getattr(self_axis, "freq") != getattr(internal_axis, "freq"):
is_equals = False
force = True
is_lenghts_matches = len(self_axis) == len(internal_axis)
if not is_equals:
if not is_lenghts_matches:
if axis:
self._column_widths_cache = None
else:
self._row_lengths_cache = None
new_axis = self_axis if is_lenghts_matches and not force else internal_axis
self._set_axis(axis, new_axis, cache_only=not is_lenghts_matches)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _compute_map_reduce_metadata(self, axis, new_parts, preserve_index=True):
"""
Computes metadata for the result of reduce function.
Parameters
----------
axis: int,
The axis on which reduce function was applied
new_parts: numpy 2D array
Partitions with the result of applied function
preserve_index: boolean
The flag to preserve labels for the reduced axis.
Returns
-------
BasePandasFrame
Pandas series containing the reduced data.
"""
new_axes, new_axes_lengths = [0, 0], [0, 0]
new_axes[axis] = ["__reduced__"]
new_axes[axis ^ 1] = (
self.axes[axis ^ 1]
if preserve_index
else self._compute_axis_labels(axis ^ 1, new_parts)
)
new_axes_lengths[axis] = [1]
new_axes_lengths[axis ^ 1] = (
self._axes_lengths[axis ^ 1] if preserve_index else None
)
if (axis == 0 or self._dtypes is None) and preserve_index:
new_dtypes = self._dtypes
elif preserve_index:
new_dtypes = pandas.Series(
[find_common_type(self.dtypes.values)], index=new_axes[axis]
)
else:
new_dtypes = None
return self.__constructor__(
new_parts,
*new_axes,
*new_axes_lengths,
new_dtypes,
validate_axes="reduced",
)
|
def _compute_map_reduce_metadata(self, axis, new_parts):
if axis == 0:
columns = self.columns
index = ["__reduced__"]
new_lengths = [1]
new_widths = self._column_widths
new_dtypes = self._dtypes
else:
columns = ["__reduced__"]
index = self.index
new_lengths = self._row_lengths
new_widths = [1]
if self._dtypes is not None:
new_dtypes = pandas.Series(
np.full(1, find_common_type(self.dtypes.values)),
index=["__reduced__"],
)
else:
new_dtypes = self._dtypes
return self.__constructor__(
new_parts,
index,
columns,
new_lengths,
new_widths,
new_dtypes,
validate_axes="reduced",
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _fold_reduce(self, axis, func, preserve_index=True):
"""
Apply function that reduce Manager to series but require knowledge of full axis.
Parameters
----------
axis : 0 or 1
The axis to apply the function to (0 - index, 1 - columns).
func : callable
The function to reduce the Manager by. This function takes in a Manager.
preserve_index : boolean
The flag to preserve labels for the reduced axis.
Returns
-------
BasePandasFrame
Pandas series containing the reduced data.
"""
func = self._build_mapreduce_func(axis, func)
new_parts = self._frame_mgr_cls.map_axis_partitions(axis, self._partitions, func)
return self._compute_map_reduce_metadata(
axis, new_parts, preserve_index=preserve_index
)
|
def _fold_reduce(self, axis, func):
"""
Apply function that reduce Manager to series but require knowledge of full axis.
Parameters
----------
axis : 0 or 1
The axis to apply the function to (0 - index, 1 - columns).
func : callable
The function to reduce the Manager by. This function takes in a Manager.
Returns
-------
BasePandasFrame
Pandas series containing the reduced data.
"""
func = self._build_mapreduce_func(axis, func)
new_parts = self._frame_mgr_cls.map_axis_partitions(axis, self._partitions, func)
return self._compute_map_reduce_metadata(axis, new_parts)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _map_reduce(self, axis, map_func, reduce_func=None, preserve_index=True):
"""
Apply function that will reduce the data to a Pandas Series.
Parameters
----------
axis : 0 or 1
0 for columns and 1 for rows.
map_func : callable
Callable function to map the dataframe.
reduce_func : callable
Callable function to reduce the dataframe.
If none, then apply map_func twice. Default is None.
preserve_index : boolean
The flag to preserve index for default behavior
map and reduce operations. Default is True.
Returns
-------
BasePandasFrame
A new dataframe.
"""
map_func = self._build_mapreduce_func(axis, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_mapreduce_func(axis, reduce_func)
map_parts = self._frame_mgr_cls.lazy_map_partitions(self._partitions, map_func)
reduce_parts = self._frame_mgr_cls.map_axis_partitions(axis, map_parts, reduce_func)
return self._compute_map_reduce_metadata(
axis, reduce_parts, preserve_index=preserve_index
)
|
def _map_reduce(self, axis, map_func, reduce_func=None, preserve_index=True):
"""
Apply function that will reduce the data to a Pandas Series.
Parameters
----------
axis : 0 or 1
0 for columns and 1 for rows.
map_func : callable
Callable function to map the dataframe.
reduce_func : callable
Callable function to reduce the dataframe.
If none, then apply map_func twice. Default is None.
preserve_index : boolean
The flag to preserve index for default behavior
map and reduce operations. Default is True.
Returns
-------
BasePandasFrame
A new dataframe.
"""
map_func = self._build_mapreduce_func(axis, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_mapreduce_func(axis, reduce_func)
map_parts = self._frame_mgr_cls.lazy_map_partitions(self._partitions, map_func)
reduce_parts = self._frame_mgr_cls.map_axis_partitions(axis, map_parts, reduce_func)
if preserve_index:
return self._compute_map_reduce_metadata(axis, reduce_parts)
else:
if axis == 0:
new_index = ["__reduced__"]
new_columns = self._frame_mgr_cls.get_indices(
1, reduce_parts, lambda df: df.columns
)
else:
new_index = self._frame_mgr_cls.get_indices(
0, reduce_parts, lambda df: df.index
)
new_columns = ["__reduced__"]
return self.__constructor__(
reduce_parts, new_index, new_columns, validate_axes="reduced"
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _map(self, func, dtypes=None, validate_index=False, validate_columns=False):
"""Perform a function that maps across the entire dataset.
Pamareters
----------
func : callable
The function to apply.
dtypes :
(optional) The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
validate_index : bool, (default False)
Is index validation required after performing `func` on partitions.
Returns
-------
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.lazy_map_partitions(self._partitions, func)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(self.columns), index=self.columns
)
axis_validate_mask = [validate_index, validate_columns]
new_axes = [
self._compute_axis_labels(axis, new_partitions)
if should_validate
else self.axes[axis]
for axis, should_validate in enumerate(axis_validate_mask)
]
new_lengths = [
self._axes_lengths[axis]
if len(new_axes[axis]) == len(self.axes[axis])
else None
for axis in [0, 1]
]
return self.__constructor__(
new_partitions,
*new_axes,
*new_lengths,
dtypes=dtypes,
)
|
def _map(self, func, dtypes=None, validate_index=False, validate_columns=False):
"""Perform a function that maps across the entire dataset.
Pamareters
----------
func : callable
The function to apply.
dtypes :
(optional) The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
validate_index : bool, (default False)
Is index validation required after performing `func` on partitions.
Returns
-------
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.lazy_map_partitions(self._partitions, func)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(self.columns), index=self.columns
)
if validate_index:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
else:
new_index = self.index
if len(new_index) != len(self.index):
new_row_lengths = None
else:
new_row_lengths = self._row_lengths
if validate_columns:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
else:
new_columns = self.columns
if len(new_columns) != len(self.columns):
new_column_widths = None
else:
new_column_widths = self._column_widths
return self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_column_widths,
dtypes=dtypes,
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def filter_full_axis(self, axis, func):
"""Filter data based on the function provided along an entire axis.
Args:
axis: The axis to filter over.
func: The function to use for the filter. This function should filter the
data itself.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func, keep_partitioning=True
)
new_axes, new_lengths = [0, 0], [0, 0]
new_axes[axis] = self.axes[axis]
new_axes[axis ^ 1] = self._compute_axis_labels(axis ^ 1, new_partitions)
new_lengths[axis] = self._axes_lengths[axis]
new_lengths[axis ^ 1] = None # We do not know what the resulting widths will be
return self.__constructor__(
new_partitions,
*new_axes,
*new_lengths,
self.dtypes if axis == 0 else None,
)
|
def filter_full_axis(self, axis, func):
"""Filter data based on the function provided along an entire axis.
Args:
axis: The axis to filter over.
func: The function to use for the filter. This function should filter the
data itself.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func, keep_partitioning=True
)
if axis == 0:
new_index = self.index
new_lengths = self._row_lengths
new_widths = None # We do not know what the resulting widths will be
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
else:
new_columns = self.columns
new_lengths = None # We do not know what the resulting lengths will be
new_widths = self._column_widths
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
new_lengths,
new_widths,
self.dtypes if axis == 0 else None,
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def broadcast_apply_select_indices(
self,
axis,
func,
other,
apply_indices=None,
numeric_indices=None,
keep_remaining=False,
broadcast_all=True,
new_index=None,
new_columns=None,
):
"""
Applyies `func` to select indices at specified axis and broadcasts
partitions of `other` frame.
Parameters
----------
axis : int,
Axis to apply function along
func : callable,
Function to apply
other : BasePandasFrame,
Partitions of which should be broadcasted
apply_indices : list,
List of labels to apply (if `numeric_indices` are not specified)
numeric_indices : list,
Numeric indices to apply (if `apply_indices` are not specified)
keep_remaining : Whether or not to drop the data that is not computed over.
broadcast_all : Whether broadcast the whole axis of right frame to every
partition or just a subset of it.
new_index : Index, (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed
new_columns : Index, (optional)
The columns of the result. We may know this in advance,
and if not provided it must be computed.
Returns
-------
BasePandasFrame
"""
assert apply_indices is not None or numeric_indices is not None, (
"Indices to apply must be specified!"
)
if other is None:
if apply_indices is None:
apply_indices = self.axes[axis][numeric_indices]
return self._apply_select_indices(
axis=axis,
func=func,
apply_indices=apply_indices,
keep_remaining=keep_remaining,
new_index=new_index,
new_columns=new_columns,
)
if numeric_indices is None:
old_index = self.index if axis else self.columns
numeric_indices = old_index.get_indexer_for(apply_indices)
dict_indices = self._get_dict_of_block_index(axis ^ 1, numeric_indices)
broadcasted_dict = other._prepare_frame_to_broadcast(
axis, dict_indices, broadcast_all=broadcast_all
)
new_partitions = self._frame_mgr_cls.broadcast_apply_select_indices(
axis,
func,
self._partitions,
other._partitions,
dict_indices,
broadcasted_dict,
keep_remaining,
)
new_axes = [
self._compute_axis_labels(i, new_partitions) if new_axis is None else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
return self.__constructor__(new_partitions, *new_axes)
|
def broadcast_apply_select_indices(
self,
axis,
func,
other,
apply_indices=None,
numeric_indices=None,
keep_remaining=False,
broadcast_all=True,
new_index=None,
new_columns=None,
):
"""
Applyies `func` to select indices at specified axis and broadcasts
partitions of `other` frame.
Parameters
----------
axis : int,
Axis to apply function along
func : callable,
Function to apply
other : BasePandasFrame,
Partitions of which should be broadcasted
apply_indices : list,
List of labels to apply (if `numeric_indices` are not specified)
numeric_indices : list,
Numeric indices to apply (if `apply_indices` are not specified)
keep_remaining : Whether or not to drop the data that is not computed over.
broadcast_all : Whether broadcast the whole axis of right frame to every
partition or just a subset of it.
new_index : Index, (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed
new_columns : Index, (optional)
The columns of the result. We may know this in advance,
and if not provided it must be computed.
Returns
-------
BasePandasFrame
"""
assert apply_indices is not None or numeric_indices is not None, (
"Indices to apply must be specified!"
)
if other is None:
if apply_indices is None:
apply_indices = self.axes[axis][numeric_indices]
return self._apply_select_indices(
axis=axis,
func=func,
apply_indices=apply_indices,
keep_remaining=keep_remaining,
new_index=new_index,
new_columns=new_columns,
)
if numeric_indices is None:
old_index = self.index if axis else self.columns
numeric_indices = old_index.get_indexer_for(apply_indices)
dict_indices = self._get_dict_of_block_index(axis ^ 1, numeric_indices)
broadcasted_dict = other._prepare_frame_to_broadcast(
axis, dict_indices, broadcast_all=broadcast_all
)
new_partitions = self._frame_mgr_cls.broadcast_apply_select_indices(
axis,
func,
self._partitions,
other._partitions,
dict_indices,
broadcasted_dict,
keep_remaining,
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
return self.__constructor__(new_partitions, new_index, new_columns)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def broadcast_apply_full_axis(
self,
axis,
func,
other,
new_index=None,
new_columns=None,
dtypes=None,
):
"""Broadcast partitions of other dataframe partitions and apply a function along full axis.
Parameters
----------
axis : 0 or 1
The axis to apply over (0 - rows, 1 - columns).
func : callable
The function to apply.
other : other Modin frame to broadcast
new_index : list-like (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like (optional)
The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes : list-like (optional)
The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
A new Modin DataFrame
"""
new_partitions = self._frame_mgr_cls.broadcast_axis_partitions(
axis=axis,
left=self._partitions,
right=other if other is None else other._partitions,
apply_func=self._build_mapreduce_func(axis, func),
keep_partitioning=True,
)
# Index objects for new object creation. This is shorter than if..else
new_axes = [
self._compute_axis_labels(i, new_partitions) if new_axis is None else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_axes[1]), index=new_axes[1])
return self.__constructor__(
new_partitions,
*new_axes,
None,
None,
dtypes,
validate_axes="all" if new_partitions.size != 0 else False,
)
|
def broadcast_apply_full_axis(
self,
axis,
func,
other,
new_index=None,
new_columns=None,
dtypes=None,
):
"""Broadcast partitions of other dataframe partitions and apply a function along full axis.
Parameters
----------
axis : 0 or 1
The axis to apply over (0 - rows, 1 - columns).
func : callable
The function to apply.
other : other Modin frame to broadcast
new_index : list-like (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like (optional)
The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes : list-like (optional)
The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
A new Modin DataFrame
"""
new_partitions = self._frame_mgr_cls.broadcast_axis_partitions(
axis=axis,
left=self._partitions,
right=other if other is None else other._partitions,
apply_func=self._build_mapreduce_func(axis, func),
keep_partitioning=True,
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_columns), index=new_columns)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
None,
None,
dtypes,
validate_axes="all" if new_partitions.size != 0 else False,
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def groupby_reduce(
self, axis, by, map_func, reduce_func, new_index=None, new_columns=None
):
"""Groupby another dataframe and aggregate the result.
Args:
axis: The axis to groupby and aggregate over.
by: The dataframe to group by.
map_func: The map component of the aggregation.
reduce_func: The reduce component of the aggregation.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.groupby_reduce(
axis, self._partitions, by._partitions, map_func, reduce_func
)
new_axes = [
self._compute_axis_labels(i, new_partitions) if new_axis is None else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
return self.__constructor__(new_partitions, *new_axes)
|
def groupby_reduce(
self, axis, by, map_func, reduce_func, new_index=None, new_columns=None
):
"""Groupby another dataframe and aggregate the result.
Args:
axis: The axis to groupby and aggregate over.
by: The dataframe to group by.
map_func: The map component of the aggregation.
reduce_func: The reduce component of the aggregation.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.groupby_reduce(
axis, self._partitions, by._partitions, map_func, reduce_func
)
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
return self.__constructor__(new_partitions, new_index, new_columns)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
axis = self._get_axis_number(axis)
if level is not None:
func_kwargs = {
"skipna": skipna,
"level": level,
"numeric_only": numeric_only,
}
return self.__constructor__(
query_compiler=self._query_compiler.apply("kurt", axis, **func_kwargs)
)
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
data = self._get_numeric_data(axis) if numeric_only else self
return self._reduce_dimension(
data._query_compiler.kurt(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
)
|
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
axis = self._get_axis_number(axis)
if level is not None:
func_kwargs = {
"skipna": skipna,
"level": level,
"numeric_only": numeric_only,
}
return self.__constructor__(
query_compiler=self._query_compiler.apply("kurt", axis, **func_kwargs)
)
if numeric_only:
self._validate_dtypes(numeric_only=True)
return self._reduce_dimension(
self._query_compiler.kurt(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_operation("mean", axis, skipna, level, numeric_only, **kwargs)
|
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
axis = self._get_axis_number(axis)
data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return data._reduce_dimension(
data._query_compiler.mean(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _validate_dtypes_min_max(self, axis, numeric_only):
# If our DataFrame has both numeric and non-numeric dtypes then
# comparisons between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring any non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
axis
and numeric_only is False
and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if all(
dtype != np.dtype("datetime64[ns]") and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot compare Numeric and Non-Numeric Types")
return self._get_numeric_data(axis) if numeric_only else self
|
def _validate_dtypes_min_max(self, axis, numeric_only):
# If our DataFrame has both numeric and non-numeric dtypes then
# comparisons between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring any non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
axis
and numeric_only is False
and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if all(
dtype != np.dtype("datetime64[ns]") and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot compare Numeric and Non-Numeric Types")
# Pandas ignores `numeric_only` if `axis` is 1, but we do have to drop
# non-numeric columns if `axis` is 0.
if numeric_only and axis == 0:
return self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
else:
return self
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _validate_dtypes_sum_prod_mean(self, axis, numeric_only, ignore_axis=False):
"""
Raise TypeErrors for sum, prod, and mean where necessary.
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
"""
# We cannot add datetime types, so if we are summing a column with
# dtype datetime64 and cannot ignore non-numeric types, we must throw a
# TypeError.
if (
not axis
and numeric_only is False
and any(dtype == np.dtype("datetime64[ns]") for dtype in self.dtypes)
):
raise TypeError("Cannot add Timestamp Types")
# If our DataFrame has both numeric and non-numeric dtypes then
# operations between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring any non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
(axis or ignore_axis)
and numeric_only is False
and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if all(
dtype != np.dtype("datetime64[ns]") and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot operate on Numeric and Non-Numeric Types")
return self._get_numeric_data(axis) if numeric_only else self
|
def _validate_dtypes_sum_prod_mean(self, axis, numeric_only, ignore_axis=False):
"""
Raise TypeErrors for sum, prod, and mean where necessary.
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
"""
# We cannot add datetime types, so if we are summing a column with
# dtype datetime64 and cannot ignore non-numeric types, we must throw a
# TypeError.
if (
not axis
and numeric_only is False
and any(dtype == np.dtype("datetime64[ns]") for dtype in self.dtypes)
):
raise TypeError("Cannot add Timestamp Types")
# If our DataFrame has both numeric and non-numeric dtypes then
# operations between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring any non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
(axis or ignore_axis)
and numeric_only is False
and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if all(
dtype != np.dtype("datetime64[ns]") and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot operate on Numeric and Non-Numeric Types")
# Pandas ignores `numeric_only` if `axis` is 1, but we do have to drop
# non-numeric columns if `axis` is 0.
if numeric_only and axis == 0:
return self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
else:
return self
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns).astype(self.dtypes).describe(**kwargs)
)
new_index = empty_df.index
# Note: `describe` convert timestamp type to object type
# which results in the loss of two values in index: `first` and `last`
# for empty DataFrame.
datetime_is_numeric = kwargs.get("datetime_is_numeric") or False
if not any(map(is_numeric_dtype, empty_df.dtypes)) and not datetime_is_numeric:
for col_name in empty_df.dtypes.index:
# if previosly type of `col_name` was datetime or timedelta
if is_datetime_or_timedelta_dtype(self.dtypes[col_name]):
new_index = pandas.Index(
empty_df.index.to_list() + ["first"] + ["last"]
)
break
def describe_builder(df, internal_indices=[]):
return df.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=new_index,
new_columns=empty_df.columns,
)
)
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns).astype(self.dtypes).describe(**kwargs)
)
def describe_builder(df, internal_indices=[]):
return df.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=empty_df.index,
new_columns=empty_df.columns,
)
)
|
https://github.com/modin-project/modin/issues/2195
|
df.describe()
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/usr/lib/python3.8/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
/usr/lib/python3.8/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
/usr/lib/python3.8/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
698 """A pprint that just redirects to the normal repr function."""
699 # Find newlines and replace them with p.break_()
--> 700 output = repr(obj)
701 lines = output.splitlines()
702 with p.group():
~/.local/lib/python3.8/site-packages/modin/pandas/dataframe.py in __repr__(self)
163
164 num_cols += len(self.columns) - i
--> 165 result = repr(self._build_repr_df(num_rows, num_cols))
166 if len(self.index) > num_rows or len(self.columns) > num_cols:
167 # The split here is so that we don't repr pandas row lengths.
~/.local/lib/python3.8/site-packages/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
106 else:
107 indexer = row_indexer
--> 108 return self.iloc[indexer]._query_compiler.to_pandas()
109
110 def _update_inplace(self, new_query_compiler):
~/.local/lib/python3.8/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
377
378 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
--> 379 result = super(_iLocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
380 if isinstance(result, Series):
381 result._parent = self.df
~/.local/lib/python3.8/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
128
129 def __getitem__(self, row_lookup, col_lookup, ndim):
--> 130 qc_view = self.qc.view(row_lookup, col_lookup)
131 if ndim == 2:
132 return self.df.__constructor__(query_compiler=qc_view)
~/.local/lib/python3.8/site-packages/modin/backends/pandas/query_compiler.py in view(self, index, columns)
2144 def view(self, index=None, columns=None):
2145 return self.__constructor__(
-> 2146 self._modin_frame.mask(row_numeric_idx=index, col_numeric_idx=columns)
2147 )
2148
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in mask(self, row_indices, row_numeric_idx, col_indices, col_numeric_idx)
464 and col_numeric_idx is None
465 ):
--> 466 return self.copy()
467 if row_indices is not None:
468 row_numeric_idx = self.index.get_indexer_for(row_indices)
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in copy(self)
635 A copied version of this object.
636 """
--> 637 return self.__constructor__(
638 self._partitions,
639 self.index.copy(),
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in __init__(self, partitions, index, columns, row_lengths, column_widths, dtypes, validate_axes)
63 self._columns_cache = ensure_index(columns)
64 if row_lengths is not None and len(self.index) > 0:
---> 65 ErrorMessage.catch_bugs_and_request_email(
66 sum(row_lengths) != len(self._index_cache),
67 "Row lengths: {} != {}".format(
~/.local/lib/python3.8/site-packages/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition, extra_log)
49 def catch_bugs_and_request_email(cls, failure_condition, extra_log=""):
50 if failure_condition:
---> 51 raise Exception(
52 "Internal Error. "
53 "Please email bug_reports@modin.org with the traceback and command that"
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Row lengths: 6 != 4
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/usr/lib/python3.8/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/.local/lib/python3.8/site-packages/modin/pandas/dataframe.py in _repr_html_(self)
184 # We use pandas _repr_html_ to get a string of the HTML representation
185 # of the dataframe.
--> 186 result = self._build_repr_df(num_rows, num_cols)._repr_html_()
187 if len(self.index) > num_rows or len(self.columns) > num_cols:
188 # We split so that we insert our correct dataframe dimensions.
~/.local/lib/python3.8/site-packages/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
106 else:
107 indexer = row_indexer
--> 108 return self.iloc[indexer]._query_compiler.to_pandas()
109
110 def _update_inplace(self, new_query_compiler):
~/.local/lib/python3.8/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
377
378 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
--> 379 result = super(_iLocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
380 if isinstance(result, Series):
381 result._parent = self.df
~/.local/lib/python3.8/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
128
129 def __getitem__(self, row_lookup, col_lookup, ndim):
--> 130 qc_view = self.qc.view(row_lookup, col_lookup)
131 if ndim == 2:
132 return self.df.__constructor__(query_compiler=qc_view)
~/.local/lib/python3.8/site-packages/modin/backends/pandas/query_compiler.py in view(self, index, columns)
2144 def view(self, index=None, columns=None):
2145 return self.__constructor__(
-> 2146 self._modin_frame.mask(row_numeric_idx=index, col_numeric_idx=columns)
2147 )
2148
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in mask(self, row_indices, row_numeric_idx, col_indices, col_numeric_idx)
464 and col_numeric_idx is None
465 ):
--> 466 return self.copy()
467 if row_indices is not None:
468 row_numeric_idx = self.index.get_indexer_for(row_indices)
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in copy(self)
635 A copied version of this object.
636 """
--> 637 return self.__constructor__(
638 self._partitions,
639 self.index.copy(),
~/.local/lib/python3.8/site-packages/modin/engines/base/frame/data.py in __init__(self, partitions, index, columns, row_lengths, column_widths, dtypes, validate_axes)
63 self._columns_cache = ensure_index(columns)
64 if row_lengths is not None and len(self.index) > 0:
---> 65 ErrorMessage.catch_bugs_and_request_email(
66 sum(row_lengths) != len(self._index_cache),
67 "Row lengths: {} != {}".format(
~/.local/lib/python3.8/site-packages/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition, extra_log)
49 def catch_bugs_and_request_email(cls, failure_condition, extra_log=""):
50 if failure_condition:
---> 51 raise Exception(
52 "Internal Error. "
53 "Please email bug_reports@modin.org with the traceback and command that"
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Row lengths: 6 != 4
|
Exception
|
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
usecols = kwargs.get("usecols", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
elif index_col is None and not usecols:
# When names is set to some list that is smaller than the number of columns
# in the file, the first columns are built as a hierarchical index.
empty_pd_df = pandas.read_csv(filepath_or_buffer, nrows=0)
num_cols = len(empty_pd_df.columns)
if num_cols > len(names):
index_col = list(range(num_cols - len(names)))
if len(index_col) == 1:
index_col = index_col[0]
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
cls.offset(
f,
nrows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
nrows=nrows,
num_partitions=num_partitions,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
cls.offset(
f,
nrows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
nrows=nrows,
num_partitions=num_partitions,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/2074
|
col2
one two
three four
col2
0 two
1 four
Traceback (most recent call last):
File "test_read_csv.py", line 47, in <module>
df_equals(df_pd, df_pandas)
File "/localdisk/amyskov/modin2/modin/pandas/test/utils.py", line 499, in df_equals
assert_frame_equal(
File "/localdisk/amyskov/miniconda3_2/envs/modin_test3/lib/python3.8/site-packages/pandas/_testing.py", line 1332, in assert_frame_equal
assert_index_equal(
File "/localdisk/amyskov/miniconda3_2/envs/modin_test3/lib/python3.8/site-packages/pandas/_testing.py", line 679, in assert_index_equal
_testing.assert_almost_equal(
File "pandas/_libs/testing.pyx", line 65, in pandas._libs.testing.assert_almost_equal
File "pandas/_libs/testing.pyx", line 174, in pandas._libs.testing.assert_almost_equal
File "/localdisk/amyskov/miniconda3_2/envs/modin_test3/lib/python3.8/site-packages/pandas/_testing.py", line 915, in raise_assert_detail
raise AssertionError(msg)
AssertionError: DataFrame.index are different
DataFrame.index values are different (100.0 %)
[left]: RangeIndex(start=0, stop=2, step=1)
[right]: Index(['one', 'three'], dtype='object')
|
AssertionError
|
def sort_rows(self, columns, ascending, ignore_index, na_position):
if na_position != "first" and na_position != "last":
raise ValueError(f"Unsupported na_position value '{na_position}'")
if not isinstance(columns, list):
columns = [columns]
columns = [self._find_index_or_col(col) for col in columns]
if isinstance(ascending, list):
if len(ascending) != len(columns):
raise ValueError("ascending list length doesn't match columns list")
else:
if not isinstance(ascending, bool):
raise ValueError("unsupported ascending value")
ascending = [ascending] * len(columns)
if ignore_index:
# If index is ignored then we might need to drop some columns.
# At the same time some of dropped index columns can be used
# for sorting and should be droped after sorting is done.
if self._index_cols is not None:
base = self
drop_index_cols_before = [
col for col in self._index_cols if col not in columns
]
drop_index_cols_after = [col for col in self._index_cols if col in columns]
if not drop_index_cols_after:
drop_index_cols_after = None
if drop_index_cols_before:
exprs = OrderedDict()
index_cols = drop_index_cols_after if drop_index_cols_after else None
for col in drop_index_cols_after:
exprs[col] = base.ref(col)
for col in base.columns:
exprs[col] = base.ref(col)
base = self.__constructor__(
columns=base.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(base, exprs),
index_cols=index_cols,
force_execution_mode=self._force_execution_mode,
)
base = self.__constructor__(
columns=base.columns,
dtypes=base._dtypes,
op=SortNode(base, columns, ascending, na_position),
index_cols=base._index_cols,
force_execution_mode=self._force_execution_mode,
)
if drop_index_cols_after:
exprs = OrderedDict()
for col in base.columns:
exprs[col] = base.ref(col)
base = self.__constructor__(
columns=base.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(base, exprs),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
return base
else:
return self.__constructor__(
columns=self.columns,
dtypes=self._dtypes,
op=SortNode(self, columns, ascending, na_position),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
else:
base = self
# If index is preserved and we have no index columns then we
# need to create one using __rowid__ virtual column.
if self._index_cols is None:
base = base._materialize_rowid()
return self.__constructor__(
columns=base.columns,
dtypes=base._dtypes,
op=SortNode(base, columns, ascending, na_position),
index_cols=base._index_cols,
force_execution_mode=self._force_execution_mode,
)
|
def sort_rows(self, columns, ascending, ignore_index, na_position):
if na_position != "first" and na_position != "last":
raise ValueError(f"Unsupported na_position value '{na_position}'")
if not isinstance(columns, list):
columns = [columns]
for col in columns:
if col not in self._table_cols:
raise ValueError(f"Unknown column '{col}'")
if isinstance(ascending, list):
if len(ascending) != len(columns):
raise ValueError("ascending list length doesn't match columns list")
else:
if not isinstance(ascending, bool):
raise ValueError("unsupported ascending value")
ascending = [ascending] * len(columns)
if ignore_index:
# If index is ignored then we might need to drop some columns.
# At the same time some of dropped index columns can be used
# for sorting and should be droped after sorting is done.
if self._index_cols is not None:
base = self
drop_index_cols_before = [
col for col in self._index_cols if col not in columns
]
drop_index_cols_after = [col for col in self._index_cols if col in columns]
if not drop_index_cols_after:
drop_index_cols_after = None
if drop_index_cols_before:
exprs = OrderedDict()
index_cols = drop_index_cols_after if drop_index_cols_after else None
for col in drop_index_cols_after:
exprs[col] = base.ref(col)
for col in base.columns:
exprs[col] = base.ref(col)
base = self.__constructor__(
columns=base.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(base, exprs),
index_cols=index_cols,
force_execution_mode=self._force_execution_mode,
)
base = self.__constructor__(
columns=base.columns,
dtypes=base._dtypes,
op=SortNode(base, columns, ascending, na_position),
index_cols=base._index_cols,
force_execution_mode=self._force_execution_mode,
)
if drop_index_cols_after:
exprs = OrderedDict()
for col in base.columns:
exprs[col] = base.ref(col)
base = self.__constructor__(
columns=base.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(base, exprs),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
return base
else:
return self.__constructor__(
columns=self.columns,
dtypes=self._dtypes,
op=SortNode(self, columns, ascending, na_position),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
else:
base = self
# If index is preserved and we have no index columns then we
# need to create one using __rowid__ virtual column.
if self._index_cols is None:
base = base._materialize_rowid()
return self.__constructor__(
columns=base.columns,
dtypes=base._dtypes,
op=SortNode(base, columns, ascending, na_position),
index_cols=base._index_cols,
force_execution_mode=self._force_execution_mode,
)
|
https://github.com/modin-project/modin/issues/2156
|
Traceback (most recent call last):
File "../reproducer.py", line 15, in <module>
md_df = pd.DataFrame(data, index=mi_index)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 142, in __init__
self._query_compiler = from_pandas(pandas_df)._query_compiler
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/utils.py", line 37, in from_pandas
return DataFrame(query_compiler=EngineDispatcher.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/dispatcher.py", line 97, in from_pandas
return cls.__engine._from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/factories.py", line 71, in _from_pandas
return cls.io_cls.from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/io/io.py", line 31, in from_pandas
return cls.query_compiler_cls.from_pandas(df, cls.frame_cls)
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/backends/omnisci/query_compiler.py", line 42, in from_pandas
return cls(data_cls.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/engines/omnisci_on_ray/frame/data.py", line 1142, in from_pandas
df = df.reset_index()
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 4854, in reset_index
new_obj.insert(0, name, level_values)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 3623, in insert
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/internals/managers.py", line 1177, in insert
raise ValueError(f"cannot insert {item}, already exists")
ValueError: cannot insert __index__, already exists
|
ValueError
|
def reset_index(self, drop):
if drop:
exprs = OrderedDict()
for c in self.columns:
exprs[c] = self.ref(c)
return self.__constructor__(
columns=self.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(self, exprs),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
else:
if self._index_cols is None:
raise NotImplementedError(
"default index reset with no drop is not supported"
)
# Need to demangle index names.
exprs = OrderedDict()
for i, c in enumerate(self._index_cols):
name = self._index_name(c)
if name is None:
name = f"level_{i}"
if name in exprs:
raise ValueError(f"cannot insert {name}, already exists")
exprs[name] = self.ref(c)
for c in self.columns:
if c in exprs:
raise ValueError(f"cannot insert {c}, already exists")
exprs[c] = self.ref(c)
new_columns = Index.__new__(Index, data=exprs.keys(), dtype="O")
return self.__constructor__(
columns=new_columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(self, exprs),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
|
def reset_index(self, drop):
if drop:
exprs = OrderedDict()
for c in self.columns:
exprs[c] = self.ref(c)
return self.__constructor__(
columns=self.columns,
dtypes=self._dtypes_for_exprs(exprs),
op=TransformNode(self, exprs),
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
else:
if self._index_cols is None:
raise NotImplementedError(
"default index reset with no drop is not supported"
)
new_columns = Index.__new__(Index, data=self._table_cols, dtype="O")
return self.__constructor__(
columns=new_columns,
dtypes=self._dtypes_for_cols(None, new_columns),
op=self._op,
index_cols=None,
force_execution_mode=self._force_execution_mode,
)
|
https://github.com/modin-project/modin/issues/2156
|
Traceback (most recent call last):
File "../reproducer.py", line 15, in <module>
md_df = pd.DataFrame(data, index=mi_index)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 142, in __init__
self._query_compiler = from_pandas(pandas_df)._query_compiler
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/utils.py", line 37, in from_pandas
return DataFrame(query_compiler=EngineDispatcher.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/dispatcher.py", line 97, in from_pandas
return cls.__engine._from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/factories.py", line 71, in _from_pandas
return cls.io_cls.from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/io/io.py", line 31, in from_pandas
return cls.query_compiler_cls.from_pandas(df, cls.frame_cls)
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/backends/omnisci/query_compiler.py", line 42, in from_pandas
return cls(data_cls.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/engines/omnisci_on_ray/frame/data.py", line 1142, in from_pandas
df = df.reset_index()
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 4854, in reset_index
new_obj.insert(0, name, level_values)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 3623, in insert
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/internals/managers.py", line 1177, in insert
raise ValueError(f"cannot insert {item}, already exists")
ValueError: cannot insert __index__, already exists
|
ValueError
|
def _index_name(self, col):
if col == "__index__":
return None
match = re.search("__index__\\d+_(.*)", col)
if match:
name = match.group(1)
if name == "__None__":
return None
return name
return col
|
def _index_name(self, col):
if col.startswith("__index__"):
return None
return col
|
https://github.com/modin-project/modin/issues/2156
|
Traceback (most recent call last):
File "../reproducer.py", line 15, in <module>
md_df = pd.DataFrame(data, index=mi_index)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 142, in __init__
self._query_compiler = from_pandas(pandas_df)._query_compiler
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/utils.py", line 37, in from_pandas
return DataFrame(query_compiler=EngineDispatcher.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/dispatcher.py", line 97, in from_pandas
return cls.__engine._from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/factories.py", line 71, in _from_pandas
return cls.io_cls.from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/io/io.py", line 31, in from_pandas
return cls.query_compiler_cls.from_pandas(df, cls.frame_cls)
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/backends/omnisci/query_compiler.py", line 42, in from_pandas
return cls(data_cls.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/engines/omnisci_on_ray/frame/data.py", line 1142, in from_pandas
df = df.reset_index()
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 4854, in reset_index
new_obj.insert(0, name, level_values)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 3623, in insert
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/internals/managers.py", line 1177, in insert
raise ValueError(f"cannot insert {item}, already exists")
ValueError: cannot insert __index__, already exists
|
ValueError
|
def from_pandas(cls, df):
new_index = df.index
new_columns = df.columns
# If there is non-trivial index, we put it into columns.
# That's what we usually have for arrow tables and execution
# result. Unnamed index is renamed to __index__. Also all
# columns get 'F_' prefix to handle names unsupported in
# OmniSci.
if cls._is_trivial_index(df.index):
index_cols = None
else:
orig_index_names = df.index.names
orig_df = df
index_cols = [
f"__index__{i}_{'__None__' if n is None else n}"
for i, n in enumerate(df.index.names)
]
df.index.names = index_cols
df = df.reset_index()
orig_df.index.names = orig_index_names
new_dtypes = df.dtypes
df = df.add_prefix("F_")
new_parts, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)
return cls(
new_parts,
new_index,
new_columns,
new_lengths,
new_widths,
dtypes=new_dtypes,
index_cols=index_cols,
)
|
def from_pandas(cls, df):
new_index = df.index
new_columns = df.columns
# If there is non-trivial index, we put it into columns.
# That's what we usually have for arrow tables and execution
# result. Unnamed index is renamed to __index__. Also all
# columns get 'F_' prefix to handle names unsupported in
# OmniSci.
if cls._is_trivial_index(df.index):
index_cols = None
else:
orig_index_names = df.index.names
orig_df = df
index_cols = ["__index__" if n is None else n for n in df.index.names]
df.index.names = index_cols
df = df.reset_index()
orig_df.index.names = orig_index_names
new_dtypes = df.dtypes
df = df.add_prefix("F_")
new_parts, new_lengths, new_widths = cls._frame_mgr_cls.from_pandas(df, True)
return cls(
new_parts,
new_index,
new_columns,
new_lengths,
new_widths,
dtypes=new_dtypes,
index_cols=index_cols,
)
|
https://github.com/modin-project/modin/issues/2156
|
Traceback (most recent call last):
File "../reproducer.py", line 15, in <module>
md_df = pd.DataFrame(data, index=mi_index)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 142, in __init__
self._query_compiler = from_pandas(pandas_df)._query_compiler
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/utils.py", line 37, in from_pandas
return DataFrame(query_compiler=EngineDispatcher.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/dispatcher.py", line 97, in from_pandas
return cls.__engine._from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/data_management/factories/factories.py", line 71, in _from_pandas
return cls.io_cls.from_pandas(df)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/io/io.py", line 31, in from_pandas
return cls.query_compiler_cls.from_pandas(df, cls.frame_cls)
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/backends/omnisci/query_compiler.py", line 42, in from_pandas
return cls(data_cls.from_pandas(df))
File "/localdisk/dchigare/repos/modin_bp/modin/experimental/engines/omnisci_on_ray/frame/data.py", line 1142, in from_pandas
df = df.reset_index()
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 4854, in reset_index
new_obj.insert(0, name, level_values)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/frame.py", line 3623, in insert
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
File "/localdisk/dchigare/miniconda3/envs/modin_on_omnisci/lib/python3.7/site-packages/pandas/core/internals/managers.py", line 1177, in insert
raise ValueError(f"cannot insert {item}, already exists")
ValueError: cannot insert __index__, already exists
|
ValueError
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : boolean
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
if all(o.axes[axis].equals(self.axes[axis]) for o in other):
return (
self._partitions,
[self._simple_shuffle(axis, o) for o in other],
self.axes[axis].copy(),
)
index_other_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_other_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : boolean
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_other_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_other_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
https://github.com/modin-project/modin/issues/2134
|
UserWarning: Distributing <class 'list'> object. This may take some time.
In [4]: df
Out[4]: ---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/software_builds/modin/modin/pandas/dataframe.py in __repr__(self)
168
169 num_cols += len(self.columns) - i
--> 170 result = repr(self._build_repr_df(num_rows, num_cols))
171 if len(self.index) > num_rows or len(self.columns) > num_cols:
172 # The split here is so that we don't repr pandas row lengths.
~/software_builds/modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
117 else:
118 indexer = row_indexer
--> 119 return self.iloc[indexer]._query_compiler.to_pandas()
120
121 def _update_inplace(self, new_query_compiler):
~/software_builds/modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
193
194 def to_pandas(self):
--> 195 return self._modin_frame.to_pandas()
196
197 @classmethod
~/software_builds/modin/modin/engines/base/frame/data.py in to_pandas(self)
1816 ErrorMessage.catch_bugs_and_request_email(
1817 not df.index.equals(self.index) or not df.columns.equals(self.columns),
-> 1818 "Internal and external indices do not match.",
1819 )
1820 df.index = self.index
~/software_builds/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition, extra_log)
52 "Internal Error. "
53 "Please email bug_reports@modin.org with the traceback and command that"
---> 54 " caused this error.\n{}".format(extra_log)
55 )
56
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
cls.offset(
f,
nrows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
nrows=nrows,
num_partitions=num_partitions,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
cls.offset(
f,
nrows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
nrows=nrows,
num_partitions=num_partitions,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/1386
|
Traceback (most recent call last):
File "cat_test.py", line 4, in <module>
df1 = pd.read_csv("categories.json", dtype={"one": "int64", "two": "category"})
File "/localdisk/gashiman/modin/modin/pandas/io.py", line 111, in parser_func
return _read(**kwargs)
File "/localdisk/gashiman/modin/modin/pandas/io.py", line 124, in _read
pd_obj = BaseFactory.read_csv(**kwargs)
File "/localdisk/gashiman/modin/modin/data_management/factories.py", line 69, in read_csv
return cls._determine_engine()._read_csv(**kwargs)
File "/localdisk/gashiman/modin/modin/data_management/factories.py", line 73, in _read_csv
return cls.io_cls.read_csv(**kwargs)
File "/localdisk/gashiman/modin/modin/engines/base/io/file_reader.py", line 29, in read
query_compiler = cls._read(*args, **kwargs)
File "/localdisk/gashiman/modin/modin/engines/base/io/text/csv_reader.py", line 199, in _read
dtypes = cls.get_dtypes(dtypes_ids)
File "/localdisk/gashiman/modin/modin/backends/pandas/parsers.py", line 61, in get_dtypes
pandas.concat(cls.materialize(dtypes_ids), axis=1)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/reshape/concat.py", line 281, in concat
sort=sort,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/reshape/concat.py", line 329, in __init__
raise ValueError("No objects to concatenate")
ValueError: No objects to concatenate
|
ValueError
|
def groupby_reduce(cls, axis, partitions, by, map_func, reduce_func):
mapped_partitions = cls.broadcast_apply(
axis, map_func, left=partitions, right=by, other_name="other"
)
return cls.map_axis_partitions(axis, mapped_partitions, reduce_func)
|
def groupby_reduce(cls, axis, partitions, by, map_func, reduce_func):
by_parts = np.squeeze(by)
if len(by_parts.shape) == 0:
by_parts = np.array([by_parts.item()])
[obj.drain_call_queue() for obj in by_parts]
new_partitions = np.array(
[
[
part.add_to_apply_calls(
map_func,
other=by_parts[col_idx].get() if axis else by_parts[row_idx].get(),
)
for col_idx, part in enumerate(partitions[row_idx])
]
for row_idx in range(len(partitions))
]
)
return cls.map_axis_partitions(axis, new_partitions, reduce_func)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def broadcast_apply(cls, axis, apply_func, left, right, other_name="r"):
"""Broadcast the right partitions to left and apply a function.
Note: This will often be overridden by implementations. It materializes the
entire partitions of the right and applies them to the left through `apply`.
Parameters
----------
axis: The axis to apply and broadcast over.
apply_func: The function to apply.
left: The left partitions.
right: The right partitions.
other_name: Name of key-value argument for `apply_func` that
obtains `right`. (optional, by default it's `"r"`)
Returns
-------
A new `np.array` of partition objects.
"""
[obj.drain_call_queue() for row in right for obj in row]
new_right = np.empty(shape=right.shape[axis], dtype=object)
if axis:
right = right.T
for i in range(len(right)):
new_right[i] = pandas.concat(
[right[i][j].get() for j in range(len(right[i]))], axis=axis ^ 1
)
right = new_right.T if axis else new_right
new_partitions = np.array(
[
[
part.add_to_apply_calls(
apply_func,
**{other_name: right[col_idx] if axis else right[row_idx]},
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
return new_partitions
|
def broadcast_apply(cls, axis, apply_func, left, right):
"""Broadcast the right partitions to left and apply a function.
Note: This will often be overridden by implementations. It materializes the
entire partitions of the right and applies them to the left through `apply`.
Args:
axis: The axis to apply and broadcast over.
apply_func: The function to apply.
left: The left partitions.
right: The right partitions.
Returns:
A new `np.array` of partition objects.
"""
if right.shape == (1, 1):
right_parts = right[0]
else:
right_parts = np.squeeze(right)
[obj.drain_call_queue() for obj in right_parts]
return np.array(
[
[
part.add_to_apply_calls(
apply_func,
r=right_parts[col_idx].get()
if axis
else right_parts[row_idx].get(),
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def deploy_func(df, apply_func, call_queue_df=None, call_queues_other=None, *others):
if call_queue_df is not None and len(call_queue_df) > 0:
for call, kwargs in call_queue_df:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
df = call(df, **kwargs)
new_others = np.empty(shape=len(others), dtype=object)
for i, call_queue_other in enumerate(call_queues_other):
other = others[i]
if call_queue_other is not None and len(call_queue_other) > 0:
for call, kwargs in call_queue_other:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
other = call(other, **kwargs)
new_others[i] = other
if isinstance(apply_func, bytes):
apply_func = pkl.loads(apply_func)
return apply_func(df, new_others)
|
def deploy_func(df, other, apply_func, call_queue_df=None, call_queue_other=None):
if call_queue_df is not None and len(call_queue_df) > 0:
for call, kwargs in call_queue_df:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
df = call(df, **kwargs)
if call_queue_other is not None and len(call_queue_other) > 0:
for call, kwargs in call_queue_other:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
other = call(other, **kwargs)
if isinstance(apply_func, bytes):
apply_func = pkl.loads(apply_func)
return apply_func(df, other)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def broadcast_apply(cls, axis, apply_func, left, right, other_name="r"):
def mapper(df, others):
other = pandas.concat(others, axis=axis ^ 1)
return apply_func(df, **{other_name: other})
client = _get_global_client()
return np.array(
[
[
PandasOnDaskFramePartition(
client.submit(
deploy_func,
part.future,
mapper,
part.call_queue,
[obj[col_idx].call_queue for obj in right]
if axis
else [obj.call_queue for obj in right[row_idx]],
*(
[obj[col_idx].future for obj in right]
if axis
else [obj.future for obj in right[row_idx]]
),
pure=False,
)
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
|
def broadcast_apply(cls, axis, apply_func, left, right):
client = _get_global_client()
right_parts = np.squeeze(right)
if len(right_parts.shape) == 0:
right_parts = np.array([right_parts.item()])
assert len(right_parts.shape) == 1, (
"Invalid broadcast partitions shape {}\n{}".format(
right_parts.shape, [[i.get() for i in j] for j in right_parts]
)
)
return np.array(
[
[
PandasOnDaskFramePartition(
client.submit(
deploy_func,
part.future,
right_parts[col_idx].future
if axis
else right_parts[row_idx].future,
apply_func,
part.call_queue,
right_parts[col_idx].call_queue
if axis
else right_parts[row_idx].call_queue,
pure=False,
)
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def func(df, apply_func, call_queue_df=None, call_queues_other=None, *others):
if call_queue_df is not None and len(call_queue_df) > 0:
for call, kwargs in call_queue_df:
if isinstance(call, ray.ObjectID):
call = ray.get(call)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
df = call(df, **kwargs)
new_others = np.empty(shape=len(others), dtype=object)
for i, call_queue_other in enumerate(call_queues_other):
other = others[i]
if call_queue_other is not None and len(call_queue_other) > 0:
for call, kwargs in call_queue_other:
if isinstance(call, ray.ObjectID):
call = ray.get(call)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
other = call(other, **kwargs)
new_others[i] = other
return apply_func(df, new_others)
|
def func(df, other, apply_func, call_queue_df=None, call_queue_other=None):
if call_queue_df is not None and len(call_queue_df) > 0:
for call, kwargs in call_queue_df:
if isinstance(call, ray.ObjectID):
call = ray.get(call)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
df = call(df, **kwargs)
if call_queue_other is not None and len(call_queue_other) > 0:
for call, kwargs in call_queue_other:
if isinstance(call, ray.ObjectID):
call = ray.get(call)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
other = call(other, **kwargs)
return apply_func(df, other)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def broadcast_apply(cls, axis, apply_func, left, right, other_name="r"):
def mapper(df, others):
other = pandas.concat(others, axis=axis ^ 1)
return apply_func(df, **{other_name: other})
mapper = ray.put(mapper)
new_partitions = np.array(
[
[
PandasOnRayFramePartition(
func.remote(
part.oid,
mapper,
part.call_queue,
[obj[col_idx].call_queue for obj in right]
if axis
else [obj.call_queue for obj in right[row_idx]],
*(
[obj[col_idx].oid for obj in right]
if axis
else [obj.oid for obj in right[row_idx]]
),
)
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
return new_partitions
|
def broadcast_apply(cls, axis, apply_func, left, right):
map_func = ray.put(apply_func)
right_parts = np.squeeze(right)
if len(right_parts.shape) == 0:
right_parts = np.array([right_parts.item()])
assert len(right_parts.shape) == 1, (
"Invalid broadcast partitions shape {}\n{}".format(
right_parts.shape, [[i.get() for i in j] for j in right_parts]
)
)
return np.array(
[
[
PandasOnRayFramePartition(
func.remote(
part.oid,
right_parts[col_idx].oid if axis else right_parts[row_idx].oid,
map_func,
part.call_queue,
right_parts[col_idx].call_queue
if axis
else right_parts[row_idx].call_queue,
)
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
)
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
warnings.warn(
"Multi-column groupby is a new feature. "
"Please report any bugs/issues to bug_reports@modin.org."
)
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
)
|
https://github.com/modin-project/modin/issues/1376
|
import modin.pandas as pd
import glob
files = glob.glob('/path/to/files/*.parquet')
df = pd.concat([pd.read_parquet(file) for file in files])
df.reset_index(inplace=True)
df.drop(columns=['index', 'DATE', 'STATUS', 'ID', 'ID2],inplace=True)
df.groupby(['REPORTING_DATE', 'ENTITY', 'CURRENCY']).sum()
UserWarning: Multi-column groupby is a new feature. Please report any bugs/issues to bug_reports@modin.org.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-15-081b1819b7d6> in <module>
----> 1 df.groupby(['DATA_SALDA', 'BANK', 'WALUTA_KON']).sum()
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in sum(self, **kwargs)
323
324 def sum(self, **kwargs):
--> 325 return self._groupby_reduce(lambda df: df.sum(**kwargs), None)
326
327 def describe(self, **kwargs):
~/eksperyment/lib64/python3.6/site-packages/modin/pandas/groupby.py in _groupby_reduce(self, map_func, reduce_func, drop, numeric_only, **kwargs)
479 reduce_args=kwargs,
480 numeric_only=numeric_only,
--> 481 drop=self._drop,
482 )
483 )
~/eksperyment/lib64/python3.6/site-packages/modin/backends/pandas/query_compiler.py in groupby_reduce(self, by, axis, groupby_args, map_func, map_args, reduce_func, reduce_args, numeric_only, drop)
1236 _reduce,
1237 new_columns=new_columns,
-> 1238 new_index=new_index,
1239 )
1240 result = self.__constructor__(new_modin_frame)
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/data.py in groupby_reduce(self, axis, by, map_func, reduce_func, new_index, new_columns)
1148 """
1149 new_partitions = self._frame_mgr_cls.groupby_reduce(
-> 1150 axis, self._partitions, by._partitions, map_func, reduce_func
1151 )
1152 if new_columns is None:
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in groupby_reduce(cls, axis, partitions, by, map_func, reduce_func)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
~/eksperyment/lib64/python3.6/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
66 if len(by_parts.shape) == 0:
67 by_parts = np.array([by_parts.item()])
---> 68 [obj.drain_call_queue() for obj in by_parts]
69 new_partitions = np.array(
70 [
AttributeError: 'numpy.ndarray' object has no attribute 'drain_call_queue'
|
AttributeError
|
def concat(cls, axis, left_parts, right_parts):
"""Concatenate the blocks with another set of blocks.
Note: Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
Args:
axis: The axis to concatenate to.
right_parts: the other blocks to be concatenated. This is a
BaseFrameManager object.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
if type(right_parts) is list:
# `np.array` with partitions of empty ModinFrame has a shape (0,)
# but `np.concatenate` can concatenate arrays only if its shapes at
# specified axis are equals, so filtering empty frames to avoid concat error
right_parts = [o for o in right_parts if o.size != 0]
return np.concatenate([left_parts] + right_parts, axis=axis)
else:
return np.append(left_parts, right_parts, axis=axis)
|
def concat(cls, axis, left_parts, right_parts):
"""Concatenate the blocks with another set of blocks.
Note: Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
Args:
axis: The axis to concatenate to.
right_parts: the other blocks to be concatenated. This is a
BaseFrameManager object.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
if type(right_parts) is list:
return np.concatenate([left_parts] + right_parts, axis=axis)
else:
return np.append(left_parts, right_parts, axis=axis)
|
https://github.com/modin-project/modin/issues/1679
|
Traceback (most recent call last):
File "test_setitem6.py", line 4, in <module>
df1.loc["row1"]["col1"] = 11
File "/localdisk/gashiman/modin/modin/pandas/series.py", line 335, in __setitem__
self._parent.loc[self.name] = self
File "/localdisk/gashiman/modin/modin/pandas/indexing.py", line 279, in __setitem__
super(_LocIndexer, self).__setitem__(row_lookup, col_lookup, item)
File "/localdisk/gashiman/modin/modin/pandas/indexing.py", line 162, in __setitem__
new_qc = self.qc.setitem(1, self.qc.index[row_lookup[0]], item)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1285, in setitem
value._modin_frame._concat(0, [mask], "inner", False)
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1273, in _concat
new_partitions = self._frame_mgr_cls.concat(axis, left_parts, right_parts)
File "/localdisk/gashiman/modin/modin/engines/base/frame/partition_manager.py", line 223, in concat
return np.concatenate([left_parts] + right_parts, axis=axis)
File "<__array_function__ internals>", line 5, in concatenate
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s)
|
ValueError
|
def make_wrapped_class(local_cls: type, rpyc_wrapper_name: str):
"""
Replaces given local class in its module with a replacement class
which has __new__ defined (a dual-nature class).
This new class is instantiated differently depending on
whether this is done in remote or local context.
In local context we effectively get the same behaviour, but in remote
context the created class is actually of separate type which
proxies most requests to a remote end.
Parameters
----------
local_cls: class
The class to replace with a dual-nature class
rpyc_wrapper_name: str
The function *name* to make a proxy class type.
Note that this is specifically taken as string to not import
"rpyc_proxy" module in top-level, as it requires RPyC to be
installed, and not all users of Modin (even in experimental mode)
need remote context.
"""
# get a copy of local_cls attributes' dict but skip _very_ special attributes,
# because copying them to a different type leads to them not working.
# Python should create new descriptors automatically for us instead.
namespace = {
name: value
for name, value in local_cls.__dict__.items()
if not isinstance(value, types.GetSetDescriptorType)
}
namespace["__real_cls__"] = None
namespace["__new__"] = None
# define a new class the same way original was defined but with replaced
# metaclass and a few more attributes in namespace
result = RemoteMeta(local_cls.__name__, local_cls.__bases__, namespace)
def make_new(__class__):
"""
Define a __new__() with a __class__ that is closure-bound, needed for super() to work
"""
def __new__(cls, *a, **kw):
if cls is result and cls.__real_cls__ is not result:
return cls.__real_cls__(*a, **kw)
return super().__new__(cls)
__class__.__new__ = __new__
make_new(result)
setattr(sys.modules[local_cls.__module__], local_cls.__name__, result)
_KNOWN_DUALS[local_cls] = result
def update_class(_):
if execution_engine.get() == "Cloudray":
from . import rpyc_proxy
result.__real_cls__ = getattr(rpyc_proxy, rpyc_wrapper_name)(result)
else:
result.__real_cls__ = result
execution_engine.subscribe(update_class)
|
def make_wrapped_class(local_cls: type, rpyc_wrapper_name: str):
"""
Replaces given local class in its module with a descendant class
which has __new__ overridden (a dual-nature class).
This new class is instantiated differently depending o
whether this is done in remote context or local.
In local context we effectively get the same behaviour, but in remote
context the created class is actually of separate type which
proxies most requests to a remote end.
Parameters
----------
local_cls: class
The class to replace with a dual-nature class
rpyc_wrapper_name: str
The function *name* to make a proxy class type.
Note that this is specifically taken as string to not import
"rpyc_proxy" module in top-level, as it requires RPyC to be
installed, and not all users of Modin (even in experimental mode)
need remote context.
"""
namespace = {
"__real_cls__": None,
"__new__": None,
"__module__": local_cls.__module__,
}
result = RemoteMeta(local_cls.__name__, (local_cls,), namespace)
def make_new(__class__):
"""
Define a __new__() with a __class__ that is closure-bound, needed for super() to work
"""
def __new__(cls, *a, **kw):
if cls is result and cls.__real_cls__ is not result:
return cls.__real_cls__(*a, **kw)
return super().__new__(cls)
__class__.__new__ = __new__
make_new(result)
setattr(sys.modules[local_cls.__module__], local_cls.__name__, result)
_KNOWN_DUALS[local_cls] = result
def update_class(_):
if execution_engine.get() == "Cloudray":
from . import rpyc_proxy
result.__real_cls__ = getattr(rpyc_proxy, rpyc_wrapper_name)(result)
else:
result.__real_cls__ = result
execution_engine.subscribe(update_class)
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def _binary_op(self, op, other, **kwargs):
# _axis indicates the operator will use the default axis
if kwargs.pop("_axis", None) is None:
if kwargs.get("axis", None) is not None:
kwargs["axis"] = axis = self._get_axis_number(kwargs.get("axis", None))
else:
kwargs["axis"] = axis = 1
else:
axis = 0
if kwargs.get("level", None) is not None:
# Broadcast is an internally used argument
kwargs.pop("broadcast", None)
return self._default_to_pandas(
getattr(getattr(pandas, type(self).__name__), op), other, **kwargs
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
return self._create_or_update_from_compiler(new_query_compiler)
|
def _binary_op(self, op, other, **kwargs):
# _axis indicates the operator will use the default axis
if kwargs.pop("_axis", None) is None:
if kwargs.get("axis", None) is not None:
kwargs["axis"] = axis = self._get_axis_number(kwargs.get("axis", None))
else:
kwargs["axis"] = axis = 1
else:
axis = 0
if kwargs.get("level", None) is not None:
# Broadcast is an internally used argument
kwargs.pop("broadcast", None)
return self._default_to_pandas(
getattr(getattr(pandas, self.__name__), op), other, **kwargs
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
return self._create_or_update_from_compiler(new_query_compiler)
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def _default_to_pandas(self, op, *args, **kwargs):
"""Helper method to use default pandas function"""
empty_self_str = "" if not self.empty else " for empty DataFrame"
ErrorMessage.default_to_pandas(
"`{}.{}`{}".format(
type(self).__name__,
op if isinstance(op, str) else op.__name__,
empty_self_str,
)
)
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
pandas_obj = self._to_pandas()
if callable(op):
result = op(pandas_obj, *args, **kwargs)
elif isinstance(op, str):
# The inner `getattr` is ensuring that we are treating this object (whether
# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`
# will get the operation (`op`) from the pandas version of the class and run
# it on the object after we have converted it to pandas.
result = getattr(getattr(pandas, type(self).__name__), op)(
pandas_obj, *args, **kwargs
)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=True,
extra_log="{} is an unsupported operation".format(op),
)
# SparseDataFrames cannot be serialized by arrow and cause problems for Modin.
# For now we will use pandas.
if isinstance(result, type(self)) and not isinstance(
result, (pandas.SparseDataFrame, pandas.SparseSeries)
):
return self._create_or_update_from_compiler(
result, inplace=kwargs.get("inplace", False)
)
elif isinstance(result, pandas.DataFrame):
from .dataframe import DataFrame
return DataFrame(result)
elif isinstance(result, pandas.Series):
from .series import Series
return Series(result)
# inplace
elif result is None:
import modin.pandas as pd
return self._create_or_update_from_compiler(
getattr(pd, type(pandas_obj).__name__)(pandas_obj)._query_compiler,
inplace=True,
)
else:
try:
if (
isinstance(result, (list, tuple))
and len(result) == 2
and isinstance(result[0], pandas.DataFrame)
):
# Some operations split the DataFrame into two (e.g. align). We need to wrap
# both of the returned results
if isinstance(result[1], pandas.DataFrame):
second = self.__constructor__(result[1])
else:
second = result[1]
return self.__constructor__(result[0]), second
else:
return result
except TypeError:
return result
|
def _default_to_pandas(self, op, *args, **kwargs):
"""Helper method to use default pandas function"""
empty_self_str = "" if not self.empty else " for empty DataFrame"
ErrorMessage.default_to_pandas(
"`{}.{}`{}".format(
self.__name__,
op if isinstance(op, str) else op.__name__,
empty_self_str,
)
)
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
pandas_obj = self._to_pandas()
if callable(op):
result = op(pandas_obj, *args, **kwargs)
elif isinstance(op, str):
# The inner `getattr` is ensuring that we are treating this object (whether
# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`
# will get the operation (`op`) from the pandas version of the class and run
# it on the object after we have converted it to pandas.
result = getattr(getattr(pandas, self.__name__), op)(
pandas_obj, *args, **kwargs
)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=True,
extra_log="{} is an unsupported operation".format(op),
)
# SparseDataFrames cannot be serialized by arrow and cause problems for Modin.
# For now we will use pandas.
if isinstance(result, type(self)) and not isinstance(
result, (pandas.SparseDataFrame, pandas.SparseSeries)
):
return self._create_or_update_from_compiler(
result, inplace=kwargs.get("inplace", False)
)
elif isinstance(result, pandas.DataFrame):
from .dataframe import DataFrame
return DataFrame(result)
elif isinstance(result, pandas.Series):
from .series import Series
return Series(result)
# inplace
elif result is None:
import modin.pandas as pd
return self._create_or_update_from_compiler(
getattr(pd, type(pandas_obj).__name__)(pandas_obj)._query_compiler,
inplace=True,
)
else:
try:
if (
isinstance(result, (list, tuple))
and len(result) == 2
and isinstance(result[0], pandas.DataFrame)
):
# Some operations split the DataFrame into two (e.g. align). We need to wrap
# both of the returned results
if isinstance(result[1], pandas.DataFrame):
second = self.__constructor__(result[1])
else:
second = result[1]
return self.__constructor__(result[0]), second
else:
return result
except TypeError:
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def _get_axis_number(self, axis):
return (
getattr(pandas, type(self).__name__)()._get_axis_number(axis)
if axis is not None
else 0
)
|
def _get_axis_number(self, axis):
return (
getattr(pandas, self.__name__)()._get_axis_number(axis)
if axis is not None
else 0
)
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
type(self).__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.all(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._handle_level_agg(axis, level, "all", skipna=skipna, **kwargs)
return self._reduce_dimension(
self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
if level is not None:
return self._handle_level_agg(axis, level, "all", skipna=skipna, **kwargs)
else:
result = self._reduce_dimension(
self._query_compiler.all(
axis=0,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result
|
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
self.__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.all(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._handle_level_agg(axis, level, "all", skipna=skipna, **kwargs)
return self._reduce_dimension(
self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
if level is not None:
return self._handle_level_agg(axis, level, "all", skipna=skipna, **kwargs)
else:
result = self._reduce_dimension(
self._query_compiler.all(
axis=0,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
type(self).__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.any(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._handle_level_agg(axis, level, "any", skipna=skipna, **kwargs)
return self._reduce_dimension(
self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
if level is not None:
return self._handle_level_agg(axis, level, "any", skipna=skipna, **kwargs)
else:
result = self._reduce_dimension(
self._query_compiler.any(
axis=0,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result
|
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
self.__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.any(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._handle_level_agg(axis, level, "any", skipna=skipna, **kwargs)
return self._reduce_dimension(
self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
if level is not None:
return self._handle_level_agg(axis, level, "any", skipna=skipna, **kwargs)
else:
result = self._reduce_dimension(
self._query_compiler.any(
axis=0,
bool_only=bool_only,
skipna=skipna,
level=level,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def rename_axis(
self, mapper=None, index=None, columns=None, axis=None, copy=True, inplace=False
):
kwargs = {
"index": index,
"columns": columns,
"axis": axis,
"copy": copy,
"inplace": inplace,
}
axes, kwargs = getattr(
pandas, type(self).__name__
)()._construct_axes_from_arguments((), kwargs, sentinel=sentinel)
if axis is not None:
axis = self._get_axis_number(axis)
else:
axis = 0
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not None:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns is specified
result = self if inplace else self.copy(deep=copy)
for axis in axes:
if axes[axis] is None:
continue
v = axes[axis]
axis = self._get_axis_number(axis)
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
def _get_rename_function(mapper):
if isinstance(mapper, (dict, BasePandasDataset)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
f = _get_rename_function(v)
curnames = self.index.names if axis == 0 else self.columns.names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
|
def rename_axis(
self, mapper=None, index=None, columns=None, axis=None, copy=True, inplace=False
):
kwargs = {
"index": index,
"columns": columns,
"axis": axis,
"copy": copy,
"inplace": inplace,
}
axes, kwargs = getattr(pandas, self.__name__)()._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel
)
if axis is not None:
axis = self._get_axis_number(axis)
else:
axis = 0
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not None:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns is specified
result = self if inplace else self.copy(deep=copy)
for axis in axes:
if axes[axis] is None:
continue
v = axes[axis]
axis = self._get_axis_number(axis)
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
def _get_rename_function(mapper):
if isinstance(mapper, (dict, BasePandasDataset)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
f = _get_rename_function(v)
curnames = self.index.names if axis == 0 else self.columns.names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __getitem__(self, key):
if len(self) == 0:
return self._default_to_pandas("__getitem__", key)
# see if we can slice the rows
# This lets us reuse code in Pandas to error check
indexer = convert_to_index_sliceable(
getattr(pandas, type(self).__name__)(index=self.index), key
)
if indexer is not None:
return self._getitem_slice(indexer)
else:
return self._getitem(key)
|
def __getitem__(self, key):
if len(self) == 0:
return self._default_to_pandas("__getitem__", key)
# see if we can slice the rows
# This lets us reuse code in Pandas to error check
indexer = convert_to_index_sliceable(
getattr(pandas, self.__name__)(index=self.index), key
)
if indexer is not None:
return self._getitem_slice(indexer)
else:
return self._getitem(key)
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, type(self).__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
|
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `self.__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, self.__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = self.__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns).astype(self.dtypes).eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
|
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns).astype(self.dtypes).eval(expr, **kwargs)
).__name__
if return_type == self.__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def apply(self, func, convert_dtype=True, args=(), **kwds):
# apply and aggregate have slightly different behaviors, so we have to use
# each one separately to determine the correct return type. In the case of
# `agg`, the axis is set, but it is not required for the computation, so we use
# it to determine which function to run.
if kwds.pop("axis", None) is not None:
apply_func = "agg"
else:
apply_func = "apply"
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
# Because a `Series` cannot be empty in pandas, we create a "dummy" `Series` to
# do the error checking and determining the return type.
try:
return_type = type(
getattr(pandas.Series([""], index=self.index[:1]), apply_func)(
func, *args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if (
isinstance(func, str)
or is_list_like(func)
or return_type not in ["DataFrame", "Series"]
):
query_compiler = super(Series, self).apply(func, *args, **kwds)
else:
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(self)
query_compiler = self.map(f)._query_compiler
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if result.name == self.index[0]:
result.name = None
return result
|
def apply(self, func, convert_dtype=True, args=(), **kwds):
# apply and aggregate have slightly different behaviors, so we have to use
# each one separately to determine the correct return type. In the case of
# `agg`, the axis is set, but it is not required for the computation, so we use
# it to determine which function to run.
if kwds.pop("axis", None) is not None:
apply_func = "agg"
else:
apply_func = "apply"
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `self.__name__` for the return
# type.
# Because a `Series` cannot be empty in pandas, we create a "dummy" `Series` to
# do the error checking and determining the return type.
try:
return_type = type(
getattr(pandas.Series([""], index=self.index[:1]), apply_func)(
func, *args, **kwds
)
).__name__
except Exception:
return_type = self.__name__
if (
isinstance(func, str)
or is_list_like(func)
or return_type not in ["DataFrame", "Series"]
):
query_compiler = super(Series, self).apply(func, *args, **kwds)
else:
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(self)
query_compiler = self.map(f)._query_compiler
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if result.name == self.index[0]:
result.name = None
return result
|
https://github.com/modin-project/modin/issues/1872
|
Traceback (most recent call last):
File "1.py", line 14, in <module>
ref = df["a"].add(df["b"])
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
File "/localdisk/ilyaenko/modin/modin/pandas/series.py", line 376, in add
return super(Series, new_self).add(
[Previous line repeated 978 more times]
...
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __repr__(self):
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame) and not temp_df.empty:
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(
str(self.dtype) + ")" if temp_df.empty else temp_str.rsplit("dtype: ", 1)[-1]
)
if len(self) == 0:
return "Series([], {}{}".format(name_str, dtype_str)
return temp_str.rsplit("\nName:", 1)[0] + "\n{}{}{}".format(
name_str, len_str, dtype_str
)
|
def __repr__(self):
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame):
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(temp_str.rsplit("dtype: ", 1)[-1])
if len(self) == 0:
return "Series([], {}{}".format(name_str, dtype_str)
return temp_str.rsplit("\nName:", 1)[0] + "\n{}{}{}".format(
name_str, len_str, dtype_str
)
|
https://github.com/modin-project/modin/issues/1647
|
Traceback (most recent call last):
File "t2.py", line 5, in <module>
print(s)
File "C:\Users\dchigare\Desktop\REPOS\modin\modin\pandas\base.py", line 3471, in __str__
return repr(self)
File "C:\Users\dchigare\Desktop\REPOS\modin\modin\pandas\series.py", line 298, in __repr__
temp_df = temp_df.iloc[:, 0]
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1994, in _validate_key
self._validate_integer(key, axis)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2063, in _validate_integer
raise IndexError("single positional indexer is out-of-bounds")
IndexError: single positional indexer is out-of-bounds
|
IndexError
|
def __getattribute__(self, name):
if name in _LOCAL_ATTRS:
# never proxy special attributes, always get them from the class type
return super().__getattribute__(name)
else:
try:
# Go for proxying class-level attributes first;
# make sure to check for attribute in self.__dict__ to get the class-level
# attribute from the class itself, not from some of its parent classes.
res = super().__getattribute__("__dict__")[name]
except KeyError:
# Class-level attribute not found in the class itself; it might be present
# in its parents, but we must first see if we should go to a remote
# end, because in "remote context" local attributes are only those which
# are explicitly allowed by being defined in the class itself.
frame = sys._getframe()
try:
is_inspect = frame.f_back.f_code.co_filename == inspect.__file__
except AttributeError:
is_inspect = False
finally:
del frame
if is_inspect:
# be always-local for inspect.* functions
return super().__getattribute__(name)
else:
try:
remote = self.__real_cls__.__wrapper_remote__
except AttributeError:
# running in local mode, fall back
return super().__getattribute__(name)
return getattr(remote, name)
else:
try:
# note that any attribute might be in fact a data descriptor,
# account for that; we only need it for attributes we get from __dict__[],
# because other cases are handled by super().__getattribute__ for us
getter = res.__get__
except AttributeError:
return res
return getter(None, self)
|
def __getattribute__(self, name):
if name in _LOCAL_ATTRS:
# never proxy special attributes, always get them from the class type
res = object.__getattribute__(self, name)
else:
try:
# Go for proxying class-level attributes first;
# make sure to check for attribute in self.__dict__ to get the class-level
# attribute from the class itself, not from some of its parent classes.
# Also note we use object.__getattribute__() to skip any potential
# class-level __getattr__
res = object.__getattribute__(self, "__dict__")[name]
except KeyError:
try:
res = object.__getattribute__(self, name)
except AttributeError:
frame = sys._getframe()
try:
is_inspect = frame.f_back.f_code.co_filename == inspect.__file__
except AttributeError:
is_inspect = False
finally:
del frame
if is_inspect:
# be always-local for inspect.* functions
res = super().__getattribute__(name)
else:
try:
remote = object.__getattribute__(
object.__getattribute__(self, "__real_cls__"),
"__wrapper_remote__",
)
except AttributeError:
# running in local mode, fall back
res = super().__getattribute__(name)
else:
res = getattr(remote, name)
try:
# note that any attribute might be in fact a data descriptor,
# account for that
getter = res.__get__
except AttributeError:
return res
return getter(None, self)
|
https://github.com/modin-project/modin/issues/1873
|
Traceback (most recent call last):
File "2.py", line 15, in <module>
ref = df.groupby("b").size()
File "/localdisk/ilyaenko/modin/modin/pandas/groupby.py", line 420, in size
work_object = SeriesGroupBy(
TypeError: type.__new__(SeriesGroupBy): SeriesGroupBy is not a subtype of type
|
TypeError
|
def make_proxy_cls(
remote_cls: netref.BaseNetref,
origin_cls: type,
override: type,
cls_name: str = None,
):
"""
Makes a new class type which inherits from <origin_cls> (for isinstance() and issubtype()),
takes methods from <override> as-is and proxy all requests for other members to <remote_cls>.
Note that origin_cls and remote_cls are assumed to be the same class types, but one is local
and other is obtained from RPyC.
Effectively implements subclassing, but without subclassing. This is needed because it is
impossible to subclass a remote-obtained class, something in the very internals of RPyC bugs out.
Parameters
----------
remote_cls: netref.BaseNetref
Type obtained from RPyC connection, expected to mirror origin_cls
origin_cls: type
The class to prepare a proxying wrapping for
override: type
The mixin providing methods and attributes to overlay on top of remote values and methods.
cls_name: str, optional
The name to give to the resulting class.
Returns
-------
type
New wrapper that takes attributes from override and relays requests to all other
attributes to remote_cls
"""
class ProxyMeta(RemoteMeta):
"""
This metaclass deals with printing a telling repr() to assist in debugging,
and to actually implement the "subclass without subclassing" thing by
directly adding references to attributes of "override" and by making proxy methods
for other functions of origin_cls. Class-level attributes being proxied is managed
by RemoteMeta parent.
Do note that we cannot do the same for certain special members like __getitem__
because CPython for optimization doesn't do a lookup of "type(obj).__getitem__(foo)" when
"obj[foo]" is called, but it effectively does "type(obj).__dict__['__getitem__'](foo)"
(but even without checking for __dict__), so all present methods must be declared
beforehand.
"""
def __repr__(self):
return f"<proxy for {origin_cls.__module__}.{origin_cls.__name__}:{cls_name or origin_cls.__name__}>"
def __prepare__(*args, **kw):
"""
Cooks the __dict__ of the type being constructed. Takes attributes from <override> as is
and adds proxying wrappers for other attributes of <origin_cls>.
This "manual inheritance" is needed for RemoteMeta.__getattribute__ which first looks into
type(obj).__dict__ (EXCLUDING parent classes) and then goes to proxy type.
"""
namespace = type.__prepare__(*args, **kw)
namespace["__remote_methods__"] = {}
# try computing overridden differently to allow subclassing one override from another
no_override = set(_NO_OVERRIDE)
for base in override.__mro__:
if base == object:
continue
for attr_name, attr_value in base.__dict__.items():
if (
attr_name not in namespace
and attr_name not in no_override
and getattr(object, attr_name, None) != attr_value
):
namespace[attr_name] = (
attr_value # force-inherit an attribute manually
)
no_override.add(attr_name)
for base in origin_cls.__mro__:
if base == object:
continue
# try unwrapping a dual-nature class first
while True:
try:
sub_base = object.__getattribute__(base, "__real_cls__")
except AttributeError:
break
if sub_base is base:
break
base = sub_base
for name, entry in base.__dict__.items():
if (
name not in namespace
and name not in no_override
and isinstance(entry, types.FunctionType)
):
def method(_self, *_args, __method_name__=name, **_kw):
try:
remote = _self.__remote_methods__[__method_name__]
except KeyError:
# use remote_cls.__getattr__ to force RPyC return us
# a proxy for remote method call instead of its local wrapper
_self.__remote_methods__[__method_name__] = remote = (
remote_cls.__getattr__(__method_name__)
)
return remote(_self.__remote_end__, *_args, **_kw)
method.__name__ = name
namespace[name] = method
return namespace
class Wrapper(override, origin_cls, metaclass=ProxyMeta):
"""
Subclass origin_cls replacing attributes with what is defined in override while
relaying requests for all other attributes to remote_cls.
"""
__name__ = cls_name or origin_cls.__name__
__wrapper_remote__ = remote_cls
def __init__(self, *a, __remote_end__=None, **kw):
if __remote_end__ is None:
__remote_end__ = remote_cls(*a, **kw)
while True:
# unwrap the object if it's a wrapper
try:
__remote_end__ = object.__getattribute__(
__remote_end__, "__remote_end__"
)
except AttributeError:
break
object.__setattr__(self, "__remote_end__", __remote_end__)
@classmethod
def from_remote_end(cls, remote_inst):
return cls(__remote_end__=remote_inst)
def __getattribute__(self, name):
"""
Implement "default" resolution order to override whatever __getattribute__
a parent being wrapped may have defined, but only look up on own __dict__
without looking into ancestors' ones, because we copy them in __prepare__.
Effectively, any attributes not currently known to Wrapper (i.e. not defined here
or in override class) will be retrieved from the remote end.
Algorithm (mimicking default Python behaviour):
1) check if type(self).__dict__[name] exists and is a get/set data descriptor
2) check if self.__dict__[name] exists
3) check if type(self).__dict__[name] is a non-data descriptor
4) check if type(self).__dict__[name] exists
5) pass through to remote end
"""
if name == "__class__":
return object.__getattribute__(self, "__class__")
dct = object.__getattribute__(self, "__dict__")
if name == "__dict__":
return dct
cls_dct = object.__getattribute__(type(self), "__dict__")
try:
cls_attr, has_cls_attr = cls_dct[name], True
except KeyError:
has_cls_attr = False
else:
oget = None
try:
oget = object.__getattribute__(cls_attr, "__get__")
object.__getattribute__(cls_attr, "__set__")
except AttributeError:
pass # not a get/set data descriptor, go next
else:
return oget(self, type(self))
# type(self).name is not a get/set data descriptor
try:
return dct[name]
except KeyError:
# instance doesn't have an attribute
if has_cls_attr:
# type(self) has this attribute, but it's not a get/set descriptor
if oget:
# this attribute is a get data descriptor
return oget(self, type(self))
return cls_attr # not a data descriptor whatsoever
# this instance/class does not have this attribute, pass it through to remote end
return getattr(dct["__remote_end__"], name)
if override.__setattr__ == object.__setattr__:
# no custom attribute setting, define our own relaying to remote end
def __setattr__(self, name, value):
if name not in _PROXY_LOCAL_ATTRS:
setattr(self.__remote_end__, name, value)
else:
object.__setattr__(self, name, value)
if override.__delattr__ == object.__delattr__:
# no custom __delattr__, define our own
def __delattr__(self, name):
if name not in _PROXY_LOCAL_ATTRS:
delattr(self.__remote_end__, name)
return Wrapper
|
def make_proxy_cls(
remote_cls: netref.BaseNetref,
origin_cls: type,
override: type,
cls_name: str = None,
):
"""
Makes a new class type which inherits from <origin_cls> (for isinstance() and issubtype()),
takes methods from <override> as-is and proxy all requests for other members to <remote_cls>.
Note that origin_cls and remote_cls are assumed to be the same class types, but one is local
and other is obtained from RPyC.
Effectively implements subclassing, but without subclassing. This is needed because it is
impossible to subclass a remote-obtained class, something in the very internals of RPyC bugs out.
Parameters
----------
remote_cls: netref.BaseNetref
Type obtained from RPyC connection, expected to mirror origin_cls
origin_cls: type
The class to prepare a proxying wrapping for
override: type
The mixin providing methods and attributes to overlay on top of remote values and methods.
cls_name: str, optional
The name to give to the resulting class.
Returns
-------
type
New wrapper that takes attributes from override and relays requests to all other
attributes to remote_cls
"""
class ProxyMeta(RemoteMeta):
"""
This metaclass deals with printing a telling repr() to assist in debugging,
and to actually implement the "subclass without subclassing" thing by
directly adding references to attributes of "override" and by making proxy methods
for other functions of origin_cls. Class-level attributes being proxied is managed
by RemoteMeta parent.
Do note that we cannot do the same for certain special members like __getitem__
because CPython for optimization doesn't do a lookup of "type(obj).__getitem__(foo)" when
"obj[foo]" is called, but it effectively does "type(obj).__dict__['__getitem__'](foo)"
(but even without checking for __dict__), so all present methods must be declared
beforehand.
"""
def __repr__(self):
return f"<proxy for {origin_cls.__module__}.{origin_cls.__name__}:{cls_name or origin_cls.__name__}>"
def __prepare__(*args, **kw):
"""
Cooks the __dict__ of the type being constructed. Takes attributes from <override> as is
and adds proxying wrappers for other attributes of <origin_cls>.
This "manual inheritance" is needed for RemoteMeta.__getattribute__ which first looks into
type(obj).__dict__ (EXCLUDING parent classes) and then goes to proxy type.
"""
namespace = type.__prepare__(*args, **kw)
namespace["__remote_methods__"] = {}
# try computing overridden differently to allow subclassing one override from another
no_override = set(_NO_OVERRIDE)
for base in override.__mro__:
if base == object:
continue
for attr_name, attr_value in base.__dict__.items():
if (
attr_name not in namespace
and attr_name not in no_override
and getattr(object, attr_name, None) != attr_value
):
namespace[attr_name] = (
attr_value # force-inherit an attribute manually
)
no_override.add(attr_name)
for base in origin_cls.__mro__:
if base == object:
continue
# try unwrapping a dual-nature class first
while True:
try:
sub_base = object.__getattribute__(base, "__real_cls__")
except AttributeError:
break
if sub_base is base:
break
base = sub_base
for name, entry in base.__dict__.items():
if (
name not in namespace
and name not in no_override
and isinstance(entry, types.FunctionType)
):
def method(_self, *_args, __method_name__=name, **_kw):
try:
remote = _self.__remote_methods__[__method_name__]
except KeyError:
# use remote_cls.__getattr__ to force RPyC return us
# a proxy for remote method call instead of its local wrapper
_self.__remote_methods__[__method_name__] = remote = (
remote_cls.__getattr__(__method_name__)
)
return remote(_self.__remote_end__, *_args, **_kw)
method.__name__ = name
namespace[name] = method
return namespace
class Wrapper(override, origin_cls, metaclass=ProxyMeta):
"""
Subclass origin_cls replacing attributes with what is defined in override while
relaying requests for all other attributes to remote_cls.
"""
__name__ = cls_name or origin_cls.__name__
__wrapper_remote__ = remote_cls
def __new__(cls, *a, **kw):
return override.__new__(cls)
def __init__(self, *a, __remote_end__=None, **kw):
if __remote_end__ is None:
__remote_end__ = remote_cls(*a, **kw)
while True:
# unwrap the object if it's a wrapper
try:
__remote_end__ = object.__getattribute__(
__remote_end__, "__remote_end__"
)
except AttributeError:
break
object.__setattr__(self, "__remote_end__", __remote_end__)
@classmethod
def from_remote_end(cls, remote_inst):
return cls(__remote_end__=remote_inst)
def __getattribute__(self, name):
"""
Implement "default" resolution order to override whatever __getattribute__
a parent being wrapped may have defined, but only look up on own __dict__
without looking into ancestors' ones, because we copy them in __prepare__.
Effectively, any attributes not currently known to Wrapper (i.e. not defined here
or in override class) will be retrieved from the remote end.
Algorithm (mimicking default Python behaviour):
1) check if type(self).__dict__[name] exists and is a get/set data descriptor
2) check if self.__dict__[name] exists
3) check if type(self).__dict__[name] is a non-data descriptor
4) check if type(self).__dict__[name] exists
5) pass through to remote end
"""
dct = object.__getattribute__(self, "__dict__")
if name == "__dict__":
return dct
cls_dct = object.__getattribute__(type(self), "__dict__")
try:
cls_attr, has_cls_attr = cls_dct[name], True
except KeyError:
has_cls_attr = False
else:
oget = None
try:
oget = object.__getattribute__(cls_attr, "__get__")
object.__getattribute__(cls_attr, "__set__")
except AttributeError:
pass # not a get/set data descriptor, go next
else:
return oget(self, type(self))
# type(self).name is not a get/set data descriptor
try:
return dct[name]
except KeyError:
# instance doesn't have an attribute
if has_cls_attr:
# type(self) has this attribute, but it's not a get/set descriptor
if oget:
# this attribute is a get data descriptor
return oget(self, type(self))
return cls_attr # not a data descriptor whatsoever
# this instance/class does not have this attribute, pass it through to remote end
return getattr(dct["__remote_end__"], name)
if override.__setattr__ == object.__setattr__:
# no custom attribute setting, define our own relaying to remote end
def __setattr__(self, name, value):
if name not in _PROXY_LOCAL_ATTRS:
setattr(self.__remote_end__, name, value)
else:
object.__setattr__(self, name, value)
if override.__delattr__ == object.__delattr__:
# no custom __delattr__, define our own
def __delattr__(self, name):
if name not in _PROXY_LOCAL_ATTRS:
delattr(self.__remote_end__, name)
return Wrapper
|
https://github.com/modin-project/modin/issues/1873
|
Traceback (most recent call last):
File "2.py", line 15, in <module>
ref = df.groupby("b").size()
File "/localdisk/ilyaenko/modin/modin/pandas/groupby.py", line 420, in size
work_object = SeriesGroupBy(
TypeError: type.__new__(SeriesGroupBy): SeriesGroupBy is not a subtype of type
|
TypeError
|
def __getattribute__(self, name):
"""
Implement "default" resolution order to override whatever __getattribute__
a parent being wrapped may have defined, but only look up on own __dict__
without looking into ancestors' ones, because we copy them in __prepare__.
Effectively, any attributes not currently known to Wrapper (i.e. not defined here
or in override class) will be retrieved from the remote end.
Algorithm (mimicking default Python behaviour):
1) check if type(self).__dict__[name] exists and is a get/set data descriptor
2) check if self.__dict__[name] exists
3) check if type(self).__dict__[name] is a non-data descriptor
4) check if type(self).__dict__[name] exists
5) pass through to remote end
"""
if name == "__class__":
return object.__getattribute__(self, "__class__")
dct = object.__getattribute__(self, "__dict__")
if name == "__dict__":
return dct
cls_dct = object.__getattribute__(type(self), "__dict__")
try:
cls_attr, has_cls_attr = cls_dct[name], True
except KeyError:
has_cls_attr = False
else:
oget = None
try:
oget = object.__getattribute__(cls_attr, "__get__")
object.__getattribute__(cls_attr, "__set__")
except AttributeError:
pass # not a get/set data descriptor, go next
else:
return oget(self, type(self))
# type(self).name is not a get/set data descriptor
try:
return dct[name]
except KeyError:
# instance doesn't have an attribute
if has_cls_attr:
# type(self) has this attribute, but it's not a get/set descriptor
if oget:
# this attribute is a get data descriptor
return oget(self, type(self))
return cls_attr # not a data descriptor whatsoever
# this instance/class does not have this attribute, pass it through to remote end
return getattr(dct["__remote_end__"], name)
|
def __getattribute__(self, name):
"""
Implement "default" resolution order to override whatever __getattribute__
a parent being wrapped may have defined, but only look up on own __dict__
without looking into ancestors' ones, because we copy them in __prepare__.
Effectively, any attributes not currently known to Wrapper (i.e. not defined here
or in override class) will be retrieved from the remote end.
Algorithm (mimicking default Python behaviour):
1) check if type(self).__dict__[name] exists and is a get/set data descriptor
2) check if self.__dict__[name] exists
3) check if type(self).__dict__[name] is a non-data descriptor
4) check if type(self).__dict__[name] exists
5) pass through to remote end
"""
dct = object.__getattribute__(self, "__dict__")
if name == "__dict__":
return dct
cls_dct = object.__getattribute__(type(self), "__dict__")
try:
cls_attr, has_cls_attr = cls_dct[name], True
except KeyError:
has_cls_attr = False
else:
oget = None
try:
oget = object.__getattribute__(cls_attr, "__get__")
object.__getattribute__(cls_attr, "__set__")
except AttributeError:
pass # not a get/set data descriptor, go next
else:
return oget(self, type(self))
# type(self).name is not a get/set data descriptor
try:
return dct[name]
except KeyError:
# instance doesn't have an attribute
if has_cls_attr:
# type(self) has this attribute, but it's not a get/set descriptor
if oget:
# this attribute is a get data descriptor
return oget(self, type(self))
return cls_attr # not a data descriptor whatsoever
# this instance/class does not have this attribute, pass it through to remote end
return getattr(dct["__remote_end__"], name)
|
https://github.com/modin-project/modin/issues/1873
|
Traceback (most recent call last):
File "2.py", line 15, in <module>
ref = df.groupby("b").size()
File "/localdisk/ilyaenko/modin/modin/pandas/groupby.py", line 420, in size
work_object = SeriesGroupBy(
TypeError: type.__new__(SeriesGroupBy): SeriesGroupBy is not a subtype of type
|
TypeError
|
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
# if any of args contain modin object, we should
# convert it to pandas
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
if isinstance(func, str):
return self._apply_text_func_elementwise(func, axis, *args, **kwargs)
elif callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
|
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if isinstance(func, str):
return self._apply_text_func_elementwise(func, axis, *args, **kwargs)
elif callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _apply_text_func_elementwise(self, func, axis, *args, **kwargs):
"""Apply func passed as str across given axis in elementwise manner.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
assert isinstance(func, str)
kwargs["axis"] = axis
new_modin_frame = self._modin_frame._apply_full_axis(
axis, lambda df: df.apply(func, *args, **kwargs)
)
return self.__constructor__(new_modin_frame)
|
def _apply_text_func_elementwise(self, func, axis, *args, **kwargs):
"""Apply func passed as str across given axis in elementwise manner.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
assert isinstance(func, str)
kwargs["axis"] = axis
new_modin_frame = self._modin_frame._apply_full_axis(
axis, lambda df: getattr(df, func)(**kwargs)
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
func = {k: wrap_udf_function(v) if callable(v) else v for k, v in func.items()}
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
axis, dict_apply_builder, func, keep_remaining=False
)
)
|
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
axis, dict_apply_builder, func, keep_remaining=False
)
)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 1
else self.columns
)
func = [wrap_udf_function(f) if callable(f) else f for f in func]
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
|
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 1
else self.columns
)
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func = wrap_udf_function(func)
new_modin_frame = self._modin_frame._apply_full_axis(
axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
)
return self.__constructor__(new_modin_frame)
|
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if isinstance(pandas.DataFrame().apply(func), pandas.Series):
new_modin_frame = self._modin_frame._fold_reduce(
axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
)
else:
new_modin_frame = self._modin_frame._apply_full_axis(
axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _set_axis(self, axis, new_axis, cache_only=False):
"""Replaces the current labels at the specified axis with the new one
Parameters
----------
axis : int,
Axis to set labels along
new_axis : Index,
The replacement labels
cache_only : bool,
Whether to change only external indices, or propagate it
into partitions
"""
if axis:
if not cache_only:
self._set_columns(new_axis)
else:
self._columns_cache = ensure_index(new_axis)
else:
if not cache_only:
self._set_index(new_axis)
else:
self._index_cache = ensure_index(new_axis)
|
def _set_axis(self, axis, new_axis):
"""Replaces the current labels at the specified axis with the new one
Parameters
----------
axis : int,
Axis to set labels along
new_axis : Index,
The replacement labels
"""
if axis:
self._set_columns(new_axis)
else:
self._set_index(new_axis)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _validate_axis_equality(self, axis: int, force: bool = False):
"""
Validates internal and external indices of modin_frame at the specified axis.
Parameters
----------
axis : int,
Axis to validate indices along
force : bool,
Whether to update external indices with internal if their lengths
do not match or raise an exception in that case.
"""
internal_axis = self._frame_mgr_cls.get_indices(
axis, self._partitions, lambda df: df.axes[axis]
)
is_equals = self.axes[axis].equals(internal_axis)
is_lenghts_matches = len(self.axes[axis]) == len(internal_axis)
if not is_equals:
if force:
new_axis = self.axes[axis] if is_lenghts_matches else internal_axis
self._set_axis(axis, new_axis, cache_only=not is_lenghts_matches)
else:
self._set_axis(
axis,
self.axes[axis],
)
|
def _validate_axis_equality(self, axis: int):
"""
Validates internal and external indices of modin_frame at the specified axis.
Parameters
----------
axis : int,
Axis to validate indices along
"""
internal_axis = self._frame_mgr_cls.get_indices(
axis, self._partitions, lambda df: df.axes[axis]
)
is_equals = self.axes[axis].equals(internal_axis)
if not is_equals:
self._set_axis(axis, self.axes[axis])
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _validate_internal_indices(self, mode=None, **kwargs):
"""
Validates and optionally updates internal and external indices
of modin_frame in specified mode. There is 3 modes supported:
1. "reduced" - force validates on that axes
where external indices is ["__reduced__"]
2. "all" - validates indices at all axes, optionally force
if `force` parameter specified in kwargs
3. "custom" - validation follows arguments specified in kwargs.
Parameters
----------
mode : str or bool, default None
validate_index : bool, (optional, could be specified via `mode`)
validate_columns : bool, (optional, could be specified via `mode`)
force : bool (optional, could be specified via `mode`)
Whether to update external indices with internal if their lengths
do not match or raise an exception in that case.
"""
if isinstance(mode, bool):
is_force = mode
mode = "all"
else:
is_force = kwargs.get("force", False)
reduced_sample = pandas.Index(["__reduced__"])
args_dict = {
"custom": kwargs,
"reduced": {
"validate_index": self.index.equals(reduced_sample),
"validate_columns": self.columns.equals(reduced_sample),
"force": True,
},
"all": {
"validate_index": True,
"validate_columns": True,
"force": is_force,
},
}
args = args_dict.get(mode, args_dict["custom"])
if args.get("validate_index", True):
self._validate_axis_equality(axis=0)
if args.get("validate_columns", True):
self._validate_axis_equality(axis=1)
|
def _validate_internal_indices(self, mode=None, **kwargs):
"""
Validates and optionally updates internal and external indices
of modin_frame in specified mode. There is 3 modes supported:
1. "reduced" - validates and updates indices on that axes
where external indices is ["__reduced__"]
2. "all" - validates indices at all axes, optionally updates
internal indices if `update` parameter specified in kwargs
3. "custom" - validation follows arguments specified in kwargs.
Parameters
----------
mode : str or bool, default None
validate_index : bool, (optional, could be specified via `mode`)
validate_columns : bool, (optional, could be specified via `mode`)
"""
if isinstance(mode, bool):
mode = "all"
reduced_sample = pandas.Index(["__reduced__"])
args_dict = {
"custom": kwargs,
"reduced": {
"validate_index": self.index.equals(reduced_sample),
"validate_columns": self.columns.equals(reduced_sample),
},
"all": {"validate_index": True, "validate_columns": True},
}
args = args_dict.get(mode, args_dict["custom"])
if args.get("validate_index", True):
self._validate_axis_equality(axis=0)
if args.get("validate_columns", True):
self._validate_axis_equality(axis=1)
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
result_type=None,
convert_dtype=True,
args=(),
**kwds,
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = self._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, str):
result = self._query_compiler.apply(
func,
axis=axis,
raw=raw,
result_type=result_type,
*args,
**kwds,
)
if isinstance(result, BasePandasDataset):
return result._query_compiler
return result
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", 'occurred at index {0}'".format(
self.index[0]
)
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif not callable(func) and not is_list_like(func):
raise TypeError("{} object is not callable".format(type(func)))
query_compiler = self._query_compiler.apply(
func,
axis,
args=args,
raw=raw,
result_type=result_type,
**kwds,
)
return query_compiler
|
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
result_type=None,
convert_dtype=True,
args=(),
**kwds,
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = self._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, str):
result = self._query_compiler.apply(func, axis=axis, *args, **kwds)
if isinstance(result, BasePandasDataset):
return result._query_compiler
return result
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", 'occurred at index {0}'".format(
self.index[0]
)
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif not callable(func) and not is_list_like(func):
raise TypeError("{} object is not callable".format(type(func)))
query_compiler = self._query_compiler.apply(func, axis, args=args, **kwds)
return query_compiler
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _apply_agg_function(self, f, drop=True, *args, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
f = wrap_udf_function(f)
if self._is_multi_by:
return self._default_to_pandas(f, *args, **kwargs)
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(
by, self._axis, f, self._kwargs, kwargs, drop=self._drop
)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
result = type(self._df)(query_compiler=new_manager)
if self._kwargs.get("squeeze", False):
return result.squeeze()
return result
|
def _apply_agg_function(self, f, drop=True, *args, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
if self._is_multi_by:
return self._default_to_pandas(f, *args, **kwargs)
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(
by, self._axis, f, self._kwargs, kwargs, drop=self._drop
)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
result = type(self._df)(query_compiler=new_manager)
if self._kwargs.get("squeeze", False):
return result.squeeze()
return result
|
https://github.com/modin-project/modin/issues/1154
|
FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
UserWarning: User-defined function verification is still under development in Modin. The function provided is not verified.
empty !
Traceback (most recent call last):
File "./test2.py", line 24, in <module>
rn = r.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/pandas/base.py", line 2962, in to_numpy
arr = self._query_compiler.to_numpy()
File "~/.local/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 169, in to_numpy
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
File "~/.local/lib/python3.7/site-packages/modin/error_message.py", line 40, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def concat(
objs: Union[
Iterable[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion]
],
axis=0,
join="outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
copy: bool = True,
) -> FrameOrSeriesUnion:
if isinstance(objs, (pandas.Series, Series, DataFrame, str, pandas.DataFrame)):
raise TypeError(
"first argument must be an iterable of pandas "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
axis = pandas.DataFrame()._get_axis_number(axis)
if isinstance(objs, dict):
list_of_objs = list(objs.values())
else:
list_of_objs = list(objs)
if len(list_of_objs) == 0:
raise ValueError("No objects to concatenate")
list_of_objs = [obj for obj in list_of_objs if obj is not None]
if len(list_of_objs) == 0:
raise ValueError("All objects passed were None")
try:
type_check = next(
obj
for obj in list_of_objs
if not isinstance(obj, (pandas.Series, Series, pandas.DataFrame, DataFrame))
)
except StopIteration:
type_check = None
if type_check is not None:
raise ValueError(
'cannot concatenate object of type "{0}"; only '
"modin.pandas.Series "
"and modin.pandas.DataFrame objs are "
"valid",
type(type_check),
)
all_series = all(isinstance(obj, Series) for obj in list_of_objs)
if all_series and axis == 0:
return Series(
query_compiler=list_of_objs[0]._query_compiler.concat(
axis,
[o._query_compiler for o in list_of_objs[1:]],
join=join,
join_axes=None,
ignore_index=ignore_index,
keys=None,
levels=None,
names=None,
verify_integrity=False,
copy=True,
sort=sort,
)
)
if join not in ["inner", "outer"]:
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis"
)
# We have the weird Series and axis check because, when concatenating a
# dataframe to a series on axis=0, pandas ignores the name of the series,
# and this check aims to mirror that (possibly buggy) functionality
list_of_objs = [
obj
if isinstance(obj, DataFrame)
else DataFrame(obj.rename())
if isinstance(obj, (pandas.Series, Series)) and axis == 0
else DataFrame(obj)
for obj in list_of_objs
]
list_of_objs = [
obj._query_compiler
for obj in list_of_objs
if len(obj.index) or len(obj.columns)
]
if keys is not None:
if all_series:
new_idx = keys
else:
list_of_objs = [
list_of_objs[i] for i in range(min(len(list_of_objs), len(keys)))
]
new_idx_labels = {
k: v.index if axis == 0 else v.columns
for k, v in zip(keys, list_of_objs)
}
tuples = [
(k, *o) if isinstance(o, tuple) else (k, o)
for k, obj in new_idx_labels.items()
for o in obj
]
new_idx = pandas.MultiIndex.from_tuples(tuples)
if names is not None:
new_idx.names = names
else:
old_name = _determine_name(list_of_objs, axis)
if old_name is not None:
new_idx.names = [None] + old_name
elif isinstance(objs, dict):
new_idx = pandas.concat(
{k: pandas.Series(index=obj.axes[axis]) for k, obj in objs.items()}
).index
else:
new_idx = None
new_query_compiler = list_of_objs[0].concat(
axis,
list_of_objs[1:],
join=join,
join_axes=None,
ignore_index=ignore_index,
keys=None,
levels=None,
names=None,
verify_integrity=False,
copy=True,
sort=sort,
)
result_df = DataFrame(query_compiler=new_query_compiler)
if new_idx is not None:
if axis == 0:
result_df.index = new_idx
else:
result_df.columns = new_idx
return result_df
|
def concat(
objs: Union[
Iterable[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion]
],
axis=0,
join="outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
copy: bool = True,
) -> FrameOrSeriesUnion:
if isinstance(objs, (pandas.Series, Series, DataFrame, str, pandas.DataFrame)):
raise TypeError(
"first argument must be an iterable of pandas "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
axis = pandas.DataFrame()._get_axis_number(axis)
objs = list(objs)
if len(objs) == 0:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
if len(objs) == 0:
raise ValueError("All objects passed were None")
try:
type_check = next(
obj
for obj in objs
if not isinstance(obj, (pandas.Series, Series, pandas.DataFrame, DataFrame))
)
except StopIteration:
type_check = None
if type_check is not None:
raise ValueError(
'cannot concatenate object of type "{0}"; only '
"pandas.Series, pandas.DataFrame, "
"and modin.pandas.DataFrame objs are "
"valid",
type(type_check),
)
all_series = all(isinstance(obj, Series) for obj in objs)
if all_series and axis == 0:
return Series(
query_compiler=objs[0]._query_compiler.concat(
axis,
[o._query_compiler for o in objs[1:]],
join=join,
join_axes=None,
ignore_index=ignore_index,
keys=None,
levels=None,
names=None,
verify_integrity=False,
copy=True,
sort=sort,
)
)
if isinstance(objs, dict):
raise NotImplementedError("Obj as dicts not implemented.")
if join not in ["inner", "outer"]:
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis"
)
# We have the weird Series and axis check because, when concatenating a
# dataframe to a series on axis=0, pandas ignores the name of the series,
# and this check aims to mirror that (possibly buggy) functionality
objs = [
obj
if isinstance(obj, DataFrame)
else DataFrame(obj.rename())
if isinstance(obj, (pandas.Series, Series)) and axis == 0
else DataFrame(obj)
for obj in objs
]
objs = [obj._query_compiler for obj in objs if len(obj.index) or len(obj.columns)]
if keys is not None:
if all_series:
new_idx = keys
else:
objs = [objs[i] for i in range(min(len(objs), len(keys)))]
new_idx_labels = {
k: v.index if axis == 0 else v.columns for k, v in zip(keys, objs)
}
tuples = [
(k, *o) if isinstance(o, tuple) else (k, o)
for k, obj in new_idx_labels.items()
for o in obj
]
new_idx = pandas.MultiIndex.from_tuples(tuples)
if names is not None:
new_idx.names = names
else:
old_name = _determine_name(objs, axis)
if old_name is not None:
new_idx.names = [None] + old_name
else:
new_idx = None
new_query_compiler = objs[0].concat(
axis,
objs[1:],
join=join,
join_axes=None,
ignore_index=ignore_index,
keys=None,
levels=None,
names=None,
verify_integrity=False,
copy=True,
sort=sort,
)
result_df = DataFrame(query_compiler=new_query_compiler)
if new_idx is not None:
if axis == 0:
result_df.index = new_idx
else:
result_df.columns = new_idx
return result_df
|
https://github.com/modin-project/modin/issues/1631
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-1-0ed75db51eaa> in <module>
6 surveys = {'1': df_wave1, '2': df_wave2, '3': df_wave3, }
7
----> 8 df_all = pd.concat(surveys, sort=False)
~/.virtualenvs/learningenv/lib/python3.7/site-packages/modin/pandas/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy)
64 "and modin.pandas.DataFrame objs are "
65 "valid",
---> 66 type(type_check),
67 )
68 all_series = all(isinstance(obj, Series) for obj in objs)
ValueError: ('cannot concatenate object of type "{0}"; only pandas.Series, pandas.DataFrame, and modin.pandas.DataFrame objs are valid', <class 'str'>)
|
ValueError
|
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
|
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
|
https://github.com/modin-project/modin/issues/1774
|
Python 3.8.3 (default, Jun 9 2020, 17:49:41)
[GCC 8.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import modin.pandas as pandas
pandas.DataFrame()
UserWarning: Distributing <class 'NoneType'> object. This may take some time.
Empty DataFrame
Columns: []
Index: []
pandas.set_option("display.max_rows", None)
pandas.DataFrame()
UserWarning: Distributing <class 'NoneType'> object. This may take some time.
Empty DataFrame
Columns: []
Index: []
pandas.set_option("display.max_columns", None)
pandas.DataFrame()
UserWarning: Distributing <class 'NoneType'> object. This may take some time.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/src/modin/modin/pandas/dataframe.py", line 151, in __repr__
col_counter += len(str(self.columns[i])) + 1
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 3930, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
```
|
IndexError
|
def __getitem__(self, key):
if callable(key):
return self.__getitem__(key(self.df))
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
if isinstance(row_loc, slice) and row_loc == slice(None):
# If we're only slicing columns, handle the case with `__getitem__`
if not isinstance(col_loc, slice):
# Boolean indexers can just be sliced into the columns object and
# then passed to `__getitem__`
if is_boolean_array(col_loc):
col_loc = self.df.columns[col_loc]
return self.df.__getitem__(col_loc)
else:
result_slice = self.df.columns.slice_locs(col_loc.start, col_loc.stop)
return self.df.iloc[:, slice(*result_slice)]
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
if any(i == -1 for i in row_lookup) or any(i == -1 for i in col_lookup):
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels is no longer "
"supported, see https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and not isinstance(col_loc, slice)
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
if isinstance(row_loc, slice) and row_loc == slice(None):
# If we're only slicing columns, handle the case with `__getitem__`
if not isinstance(col_loc, slice):
# Boolean indexers can just be sliced into the columns object and
# then passed to `__getitem__`
if is_boolean_array(col_loc):
col_loc = self.df.columns[col_loc]
return self.df.__getitem__(col_loc)
else:
result_slice = self.df.columns.slice_locs(col_loc.start, col_loc.stop)
return self.df.iloc[:, slice(*result_slice)]
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
if any(i == -1 for i in row_lookup) or any(i == -1 for i in col_lookup):
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels is no longer "
"supported, see https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and not isinstance(col_loc, slice)
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
https://github.com/modin-project/modin/issues/1775
|
Python 3.8.3 (default, Jun 9 2020, 17:49:41)
[GCC 8.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import modin.pandas as pandas
foo = pandas.DataFrame({"col": [1,2,3,4]})
UserWarning: Distributing <class 'dict'> object. This may take some time.
foo
col
0 1
1 2
2 3
3 4
foo.loc[lambda val: val.col%2==0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/src/modin/modin/pandas/indexing.py", line 252, in __getitem__
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/src/modin/modin/pandas/indexing.py", line 345, in _compute_lookup
row_lookup = self.qc.index.get_indexer_for(row_loc)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 4502, in get_indexer_for
return self.get_indexer(target, **kwargs)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/range.py", line 358, in get_indexer
return super().get_indexer(
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 2709, in get_indexer
target = ensure_index(target)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 5358, in ensure_index
return Index(index_like)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 438, in __new__
subarr = com.asarray_tuplesafe(data, dtype=object)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/common.py", line 222, in asarray_tuplesafe
values = list(values)
TypeError: 'function' object is not iterable
|
TypeError
|
def __getitem__(self, key):
if callable(key):
return self.__getitem__(key(self.df))
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
result = super(_iLocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0
return result
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
result = super(_iLocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0
return result
|
https://github.com/modin-project/modin/issues/1775
|
Python 3.8.3 (default, Jun 9 2020, 17:49:41)
[GCC 8.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import modin.pandas as pandas
foo = pandas.DataFrame({"col": [1,2,3,4]})
UserWarning: Distributing <class 'dict'> object. This may take some time.
foo
col
0 1
1 2
2 3
3 4
foo.loc[lambda val: val.col%2==0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/src/modin/modin/pandas/indexing.py", line 252, in __getitem__
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/src/modin/modin/pandas/indexing.py", line 345, in _compute_lookup
row_lookup = self.qc.index.get_indexer_for(row_loc)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 4502, in get_indexer_for
return self.get_indexer(target, **kwargs)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/range.py", line 358, in get_indexer
return super().get_indexer(
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 2709, in get_indexer
target = ensure_index(target)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 5358, in ensure_index
return Index(index_like)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 438, in __new__
subarr = com.asarray_tuplesafe(data, dtype=object)
File "/root/.cache/pypoetry/virtualenvs/onepassword-etl-NcjVOHQt-py3.8/lib/python3.8/site-packages/pandas/core/common.py", line 222, in asarray_tuplesafe
values = list(values)
TypeError: 'function' object is not iterable
|
TypeError
|
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(self._modin_frame._apply_full_axis(1, map_func))
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
|
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
right = right.to_pandas()
sort = kwargs.get("sort")
kwargs["sort"] = not sort if sort else sort
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_modin_frame = self._modin_frame._apply_full_axis(1, map_func)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
level = kwargs.pop("level", None)
sort_remaining = kwargs.pop("sort_remaining", True)
kwargs["inplace"] = False
if level is not None or (
(axis == 0 and isinstance(self.index, pandas.MultiIndex))
or (axis == 1 and isinstance(self.columns, pandas.MultiIndex))
):
return self.default_to_pandas(
pandas.DataFrame.sort_index,
level=level,
sort_remaining=sort_remaining,
**kwargs,
)
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.sort_index(
axis=axis, level=level, sort_remaining=sort_remaining, **kwargs
),
new_index,
new_columns,
dtypes="copy" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
|
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.sort_index(axis=axis, **kwargs),
new_index,
new_columns,
dtypes="copy" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
|
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
getattr(pandas.Series, sort_type)(df.squeeze(axis=1), n=n, keep=keep)
)
return getattr(pandas.DataFrame, sort_type)(df, n=n, columns=columns, keep=keep)
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def length_fn_pandas(df):
assert isinstance(df, pandas.DataFrame)
return len(df) if len(df) > 0 else 0
|
def length_fn_pandas(df):
assert isinstance(df, pandas.DataFrame)
return len(df) if len(df.columns) > 0 else 0
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def width_fn_pandas(df):
assert isinstance(df, pandas.DataFrame)
return len(df.columns) if len(df.columns) > 0 else 0
|
def width_fn_pandas(df):
assert isinstance(df, pandas.DataFrame)
return len(df.columns) if len(df) > 0 else 0
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
ignore_index: bool = False,
):
"""Sort a DataFrame by one of the indices (columns or index).
Args:
axis: The axis to sort over.
level: The MultiIndex level to sort over.
ascending: Ascending or descending
inplace: Whether or not to update this DataFrame inplace.
kind: How to perform the sort.
na_position: Where to position NA on the sort.
sort_remaining: On Multilevel Index sort based on all levels.
by: (Deprecated) argument to pass to sort_values.
Returns:
A sorted DataFrame
"""
axis = self._get_axis_number(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
ignore_index=ignore_index,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
ignore_index: bool = False,
):
"""Sort a DataFrame by one of the indices (columns or index).
Args:
axis: The axis to sort over.
level: The MultiIndex level to sort over.
ascending: Ascending or descending
inplace: Whether or not to update this DataFrame inplace.
kind: How to perform the sort.
na_position: Where to position NA on the sort.
sort_remaining: On Multilevel Index sort based on all levels.
by: (Deprecated) argument to pass to sort_values.
Returns:
A sorted DataFrame
"""
axis = self._get_axis_number(axis)
if level is not None or (
(axis == 0 and isinstance(self.index, pandas.MultiIndex))
or (axis == 1 and isinstance(self.columns, pandas.MultiIndex))
):
new_query_compiler = self._default_to_pandas(
"sort_index",
axis=axis,
level=level,
ascending=ascending,
inplace=False,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
)._query_compiler
return self._create_or_update_from_compiler(new_query_compiler, inplace)
new_query_compiler = self._query_compiler.sort_index(
axis=axis, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_inplace(new_query_compiler=new_query_compiler)
else:
return self.__constructor__(query_compiler=new_query_compiler)
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on columns,
the DataFrame indexes will be ignored. Otherwise if joining indexes on indexes or
indexes on a column or columns, the index will be passed on.
Parameters
----------
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
- left: use only keys from left frame,
similar to a SQL left outer join; preserve key order.
- right: use only keys from right frame,
similar to a SQL right outer join; preserve key order.
- outer: use union of keys from both frames,
similar to a SQL full outer join; sort keys lexicographically.
- inner: use intersection of keys from both frames,
similar to a SQL inner join; preserve the order of the left keys.
on : label or list
Column or index level names to join on.
These must be found in both DataFrames. If on is None and not merging on indexes
then this defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame.
Can also be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame.
Can also be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s).
If it is a MultiIndex, the number of keys in the other DataFrame
(either the index or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame.
If False, the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right side, respectively.
To raise an exception on overlapping columns use (False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with information
on the source of each row. If string, column with information on source of each row
will be added to output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right' DataFrame,
and "both" if the observation’s merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
- 'one_to_one' or '1:1': check if merge keys are unique in both left and right datasets.
- 'one_to_many' or '1:m': check if merge keys are unique in left dataset.
- 'many_to_one' or 'm:1': check if merge keys are unique in right dataset.
- 'many_to_many' or 'm:m': allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
"""
if isinstance(right, Series):
if right.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
right = right.to_frame()
if not isinstance(right, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(right)} was passed"
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
return self.__constructor__(
query_compiler=self._query_compiler.merge(
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
)
|
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on columns,
the DataFrame indexes will be ignored. Otherwise if joining indexes on indexes or
indexes on a column or columns, the index will be passed on.
Parameters
----------
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
- left: use only keys from left frame,
similar to a SQL left outer join; preserve key order.
- right: use only keys from right frame,
similar to a SQL right outer join; preserve key order.
- outer: use union of keys from both frames,
similar to a SQL full outer join; sort keys lexicographically.
- inner: use intersection of keys from both frames,
similar to a SQL inner join; preserve the order of the left keys.
on : label or list
Column or index level names to join on.
These must be found in both DataFrames. If on is None and not merging on indexes
then this defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame.
Can also be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame.
Can also be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s).
If it is a MultiIndex, the number of keys in the other DataFrame
(either the index or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame.
If False, the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right side, respectively.
To raise an exception on overlapping columns use (False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with information
on the source of each row. If string, column with information on source of each row
will be added to output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right' DataFrame,
and "both" if the observation’s merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
- 'one_to_one' or '1:1': check if merge keys are unique in both left and right datasets.
- 'one_to_many' or '1:m': check if merge keys are unique in left dataset.
- 'many_to_one' or 'm:1': check if merge keys are unique in right dataset.
- 'many_to_many' or 'm:m': allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
"""
if isinstance(right, Series):
if right.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
right = right.to_frame()
if not isinstance(right, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(right)} was passed"
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
if how in ["left", "inner"] and left_index is False and right_index is False:
result = self.__constructor__(
query_compiler=self._query_compiler.merge(
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
result = (
result.sort_values(left_on.append(right_on))
if is_reset_index
else result.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in self.index.names and o in right.index.names for o in on
)
if sort:
result = (
result.sort_values(on)
if is_reset_index
else result.sort_index(axis=0, level=on)
)
return result.reset_index(drop=True) if is_reset_index else result
return self.__constructor__(
query_compiler=self._query_compiler.default_to_pandas(
pandas.DataFrame.merge,
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
)
|
https://github.com/modin-project/modin/issues/1771
|
Traceback (most recent call last):
File "merge_test.py", line 6, in <module>
df3 = df1.merge(df2, on='name', how='inner')
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 1540, in merge
return result.reset_index(drop=True) if is_reset_index else result
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 2301, in reset_index
new_query_compiler = self._query_compiler.reset_index(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 442, in reset_index
new_self = self.copy()
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 217, in copy
return self.__constructor__(self._modin_frame.copy())
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 532, in copy
return self.__constructor__(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 68, in __init__
ErrorMessage.catch_bugs_and_request_email(
File "/localdisk/gashiman/modin/modin/error_message.py", line 50, in catch_bugs_and_request_email
raise Exception(
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Column widths: 0 != 23
|
Exception
|
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(is_monotonic_map, is_monotonic_reduce, axis=0)(
self
)
|
def _is_monotonic(self, type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(is_monotonic_map, is_monotonic_reduce)(self)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
|
def is_monotonic_decreasing(self):
return self._is_monotonic(type="decreasing")
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def nsmallest(self, *args, **kwargs):
return self.nsort(sort_type="nsmallest", *args, **kwargs)
|
def nsmallest(self, n, columns=None, keep="first"):
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
pandas.Series.nsmallest(df.squeeze(axis=1), n=n, keep=keep)
)
return pandas.DataFrame.nsmallest(df, n=n, columns=columns, keep=keep)
if columns is None:
new_columns = ["__reduced__"]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
getattr(pandas.Series, sort_type)(df.squeeze(axis=1), n=n, keep=keep)
)
return getattr(pandas.DataFrame, sort_type)(df, n=n, columns=columns, keep=keep)
|
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
pandas.Series.nsmallest(df.squeeze(axis=1), n=n, keep=keep)
)
return pandas.DataFrame.nsmallest(df, n=n, columns=columns, keep=keep)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def call(cls, map_func, reduce_func, *call_args, **call_kwds):
def caller(
query_compiler,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
assert isinstance(by, type(query_compiler)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = query_compiler.getitem_column_array(
query_compiler._modin_frame._numeric_columns(True)
)
else:
qc = query_compiler
as_index = groupby_args.get("as_index", True)
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(
df.groupby(by=other, axis=axis, **groupby_args), **map_args
)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format(
"_modin_groupby_", result.index.name
)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
# TODO: try to precompute `new_index` and `new_columns`
new_modin_frame = qc._modin_frame.groupby_reduce(
axis, by._modin_frame, _map, _reduce
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
def call(cls, map_func, reduce_func, *call_args, **call_kwds):
def caller(
query_compiler,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
assert isinstance(by, type(query_compiler)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = query_compiler.getitem_column_array(
query_compiler._modin_frame._numeric_columns(True)
)
else:
qc = query_compiler
as_index = groupby_args.get("as_index", True)
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(
df.groupby(by=other, axis=axis, **groupby_args), **map_args
)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format(
"_modin_groupby_", result.index.name
)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
if axis == 0:
new_columns = qc.columns
new_index = None
else:
new_index = query_compiler.index
new_columns = None
new_modin_frame = qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def caller(
query_compiler,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
assert isinstance(by, type(query_compiler)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = query_compiler.getitem_column_array(
query_compiler._modin_frame._numeric_columns(True)
)
else:
qc = query_compiler
as_index = groupby_args.get("as_index", True)
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(
df.groupby(by=other, axis=axis, **groupby_args), **map_args
)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
# TODO: try to precompute `new_index` and `new_columns`
new_modin_frame = qc._modin_frame.groupby_reduce(
axis, by._modin_frame, _map, _reduce
)
return query_compiler.__constructor__(new_modin_frame)
|
def caller(
query_compiler,
by,
axis,
groupby_args,
map_args,
reduce_args=None,
numeric_only=True,
drop=False,
):
assert isinstance(by, type(query_compiler)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = query_compiler.getitem_column_array(
query_compiler._modin_frame._numeric_columns(True)
)
else:
qc = query_compiler
as_index = groupby_args.get("as_index", True)
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(
df.groupby(by=other, axis=axis, **groupby_args), **map_args
)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
if axis == 0:
new_columns = qc.columns
new_index = None
else:
new_index = query_compiler.index
new_columns = None
new_modin_frame = qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
return query_compiler.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
validate_axes: Union[bool, str] = False,
):
"""Initialize a dataframe.
Parameters
----------
partitions : A 2D NumPy array of partitions. Must contain partition objects.
index : The index object for the dataframe. Converts to a pandas.Index.
columns : The columns object for the dataframe. Converts to a pandas.Index.
row_lengths : (optional) The lengths of each partition in the rows. The
"height" of each of the block partitions. Is computed if not provided.
column_widths : (optional) The width of each partition in the columns. The
"width" of each of the block partitions. Is computed if not provided.
dtypes : (optional) The data types for the dataframe.
validate_axes : (optional) Whether or not validate for equality
internal indices of partitions and passed `index` and `columns`.
"""
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
if row_lengths is not None and len(self.index) > 0:
ErrorMessage.catch_bugs_and_request_email(
sum(row_lengths) != len(self._index_cache),
"Row lengths: {} != {}".format(sum(row_lengths), len(self._index_cache)),
)
self._row_lengths_cache = row_lengths
if column_widths is not None and len(self.columns) > 0:
ErrorMessage.catch_bugs_and_request_email(
sum(column_widths) != len(self._columns_cache),
"Column widths: {} != {}".format(
sum(column_widths), len(self._columns_cache)
),
)
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
if validate_axes is not False:
self._validate_internal_indices(mode=validate_axes)
|
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
"""Initialize a dataframe.
Args:
partitions: A 2D NumPy array of partitions. Must contain partition objects.
index: The index object for the dataframe. Converts to a pandas.Index.
columns: The columns object for the dataframe. Converts to a pandas.Index.
row_lengths: (optional) The lengths of each partition in the rows. The
"height" of each of the block partitions. Is computed if not provided.
column_widths: (optional) The width of each partition in the columns. The
"width" of each of the block partitions. Is computed if not provided.
dtypes: (optional) The data types for the dataframe.
"""
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
if row_lengths is not None and len(self.index) > 0:
ErrorMessage.catch_bugs_and_request_email(
sum(row_lengths) != len(self._index_cache),
"Row lengths: {} != {}".format(sum(row_lengths), len(self._index_cache)),
)
self._row_lengths_cache = row_lengths
if column_widths is not None and len(self.columns) > 0:
ErrorMessage.catch_bugs_and_request_email(
sum(column_widths) != len(self._columns_cache),
"Column widths: {} != {}".format(
sum(column_widths), len(self._columns_cache)
),
)
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def _compute_map_reduce_metadata(self, axis, new_parts):
if axis == 0:
columns = self.columns
index = ["__reduced__"]
new_lengths = [1]
new_widths = self._column_widths
new_dtypes = self._dtypes
else:
columns = ["__reduced__"]
index = self.index
new_lengths = self._row_lengths
new_widths = [1]
if self._dtypes is not None:
new_dtypes = pandas.Series(
np.full(1, find_common_type(self.dtypes.values)),
index=["__reduced__"],
)
else:
new_dtypes = self._dtypes
return self.__constructor__(
new_parts,
index,
columns,
new_lengths,
new_widths,
new_dtypes,
validate_axes="reduced",
)
|
def _compute_map_reduce_metadata(self, axis, new_parts):
if axis == 0:
columns = self.columns
index = ["__reduced__"]
new_lengths = [1]
new_widths = self._column_widths
new_dtypes = self._dtypes
else:
columns = ["__reduced__"]
index = self.index
new_lengths = self._row_lengths
new_widths = [1]
if self._dtypes is not None:
new_dtypes = pandas.Series(
np.full(1, find_common_type(self.dtypes.values)),
index=["__reduced__"],
)
else:
new_dtypes = self._dtypes
return self.__constructor__(
new_parts, index, columns, new_lengths, new_widths, new_dtypes
)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def _map_reduce(self, axis, map_func, reduce_func=None, preserve_index=True):
"""
Apply function that will reduce the data to a Pandas Series.
Parameters
----------
axis : 0 or 1
0 for columns and 1 for rows.
map_func : callable
Callable function to map the dataframe.
reduce_func : callable
Callable function to reduce the dataframe.
If none, then apply map_func twice. Default is None.
preserve_index : boolean
The flag to preserve index for default behavior
map and reduce operations. Default is True.
Returns
-------
BasePandasFrame
A new dataframe.
"""
map_func = self._build_mapreduce_func(axis, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_mapreduce_func(axis, reduce_func)
map_parts = self._frame_mgr_cls.map_partitions(self._partitions, map_func)
reduce_parts = self._frame_mgr_cls.map_axis_partitions(axis, map_parts, reduce_func)
if preserve_index:
return self._compute_map_reduce_metadata(axis, reduce_parts)
else:
if axis == 0:
new_index = ["__reduced__"]
new_columns = self._frame_mgr_cls.get_indices(
1, reduce_parts, lambda df: df.columns
)
else:
new_index = self._frame_mgr_cls.get_indices(
0, reduce_parts, lambda df: df.index
)
new_columns = ["__reduced__"]
return self.__constructor__(
reduce_parts, new_index, new_columns, validate_axes="reduced"
)
|
def _map_reduce(self, axis, map_func, reduce_func=None, preserve_index=True):
"""
Apply function that will reduce the data to a Pandas Series.
Parameters
----------
axis : 0 or 1
0 for columns and 1 for rows.
map_func : callable
Callable function to map the dataframe.
reduce_func : callable
Callable function to reduce the dataframe.
If none, then apply map_func twice. Default is None.
preserve_index : boolean
The flag to preserve index for default behavior
map and reduce operations. Default is True.
Returns
-------
BasePandasFrame
A new dataframe.
"""
map_func = self._build_mapreduce_func(axis, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_mapreduce_func(axis, reduce_func)
map_parts = self._frame_mgr_cls.map_partitions(self._partitions, map_func)
reduce_parts = self._frame_mgr_cls.map_axis_partitions(axis, map_parts, reduce_func)
if preserve_index:
return self._compute_map_reduce_metadata(axis, reduce_parts)
else:
if axis == 0:
new_index = ["__reduced__"]
new_columns = self._frame_mgr_cls.get_indices(
1, reduce_parts, lambda df: df.columns
)
else:
new_index = self._frame_mgr_cls.get_indices(
0, reduce_parts, lambda df: df.index
)
new_columns = ["__reduced__"]
return self.__constructor__(reduce_parts, new_index, new_columns)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def _apply_full_axis(
self,
axis,
func,
new_index=None,
new_columns=None,
dtypes=None,
):
"""
Perform a function across an entire axis.
Parameters
----------
axis : 0 or 1
The axis to apply over (0 - rows, 1 - columns).
func : callable
The function to apply.
new_index : list-like (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like (optional)
The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes : list-like (optional)
The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
BasePandasFrame
A new dataframe.
Notes
-----
The data shape may change as a result of the function.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis,
self._partitions,
self._build_mapreduce_func(axis, func),
keep_partitioning=True,
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_columns), index=new_columns)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
None,
None,
dtypes,
validate_axes="reduced",
)
|
def _apply_full_axis(
self,
axis,
func,
new_index=None,
new_columns=None,
dtypes=None,
):
"""
Perform a function across an entire axis.
Parameters
----------
axis : 0 or 1
The axis to apply over (0 - rows, 1 - columns).
func : callable
The function to apply.
new_index : list-like (optional)
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like (optional)
The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes : list-like (optional)
The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
BasePandasFrame
A new dataframe.
Notes
-----
The data shape may change as a result of the function.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis,
self._partitions,
self._build_mapreduce_func(axis, func),
keep_partitioning=True,
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_columns), index=new_columns)
return self.__constructor__(
new_partitions, new_index, new_columns, None, None, dtypes
)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def broadcast_apply(self, axis, func, other, preserve_labels=True, dtypes=None):
"""Broadcast partitions of other dataframe partitions and apply a function.
Args:
axis: The axis to broadcast over.
func: The function to apply.
other: The Modin DataFrame to broadcast.
preserve_labels: Whether or not to keep labels from this Modin DataFrame.
dtypes: "copy" or None. Whether to keep old dtypes or infer new dtypes from
data.
Returns:
A new Modin DataFrame
"""
# Only sort the indices if they do not match
left_parts, right_parts, joined_index = self._copartition(
axis, other, "left", sort=not self.axes[axis].equals(other.axes[axis])
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.broadcast_apply(axis, func, left_parts, right_parts)
if dtypes == "copy":
dtypes = self._dtypes
new_index = self.index
new_columns = self.columns
if not preserve_labels:
if axis == 1:
new_columns = joined_index
else:
new_index = joined_index
return self.__constructor__(
new_frame, new_index, new_columns, None, None, dtypes=dtypes
)
|
def broadcast_apply(self, axis, func, other, preserve_labels=True, dtypes=None):
"""Broadcast partitions of other dataframe partitions and apply a function.
Args:
axis: The axis to broadcast over.
func: The function to apply.
other: The Modin DataFrame to broadcast.
preserve_labels: Whether or not to keep labels from this Modin DataFrame.
dtypes: "copy" or None. Whether to keep old dtypes or infer new dtypes from
data.
Returns:
A new Modin DataFrame
"""
assert preserve_labels, "`preserve_labels=False` Not Yet Implemented"
# Only sort the indices if they do not match
left_parts, right_parts, joined_index = self._copartition(
axis, other, "left", sort=not self.axes[axis].equals(other.axes[axis])
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.broadcast_apply(axis, func, left_parts, right_parts)
if dtypes == "copy":
dtypes = self._dtypes
new_index = self.index
new_columns = self.columns
return self.__constructor__(
new_frame, new_index, new_columns, None, None, dtypes=dtypes
)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame.
"""
df = self._frame_mgr_cls.to_pandas(self._partitions)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
not df.index.equals(self.index) or not df.columns.equals(self.columns),
"Internal and external indices do not match.",
)
df.index = self.index
df.columns = self.columns
return df
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame.
"""
df = self._frame_mgr_cls.to_pandas(self._partitions)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
df.index.name = self.index.name
return df
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def to_sql(cls, qc, **kwargs):
"""Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.getitem_row_array([0]).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df):
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame._apply_full_axis(1, func, new_index=[], new_columns=[])
# blocking operation
result.to_pandas()
|
def to_sql(cls, qc, **kwargs):
"""Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.getitem_row_array([0]).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df):
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame._fold_reduce(1, func)
# blocking operation
result.to_pandas()
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
# Most of time indexing DataFrameGroupBy results in another DataFrameGroupBy object unless circumstances are
# special in which case SeriesGroupBy has to be returned. Such circumstances are when key equals to a single
# column name and is not a list of column names or list of one column name.
make_dataframe = True
if self._drop and self._as_index:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
make_dataframe = False
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
# Sometimes `__getitem__` doesn't only get the item, it also gets the `by`
# column. This logic is here to ensure that we also get the `by` data so
# that it is there for `as_index=False`.
if (
isinstance(self._by, type(self._query_compiler))
and all(c in self._columns for c in self._by.columns)
and self._drop
):
key = list(self._by.columns) + [key]
else:
key = [key]
if isinstance(key, list) and (make_dataframe or not self._as_index):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
# Most of time indexing DataFrameGroupBy results in another DataFrameGroupBy object unless circumstances are
# special in which case SeriesGroupBy has to be returned. Such circumstances are when key equals to a single
# column name and is not a list of column names or list of one column name.
make_dataframe = True
if self._drop and self._as_index:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
make_dataframe = False
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
# Sometimes `__getitem__` doesn't only get the item, it also gets the `by`
# column. This logic is here to ensure that we also get the `by` data so
# that it is there for `as_index=False`.
if (
isinstance(self._by, type(self._query_compiler))
and all(c in self._columns for c in self._by.columns)
and self._drop
):
key = [key] + list(self._by.columns)
else:
key = [key]
if isinstance(key, list) and (make_dataframe or not self._as_index):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/1734
|
pd_res:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res_partitions:
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
md_res:
distributed.worker - WARNING - Compute Failed
Function: apply_list_of_funcs
args: ([[b"\x80\x04\x95\xb0\x03\x00\x00\x00\x00\x00\x00\x8c\x17cloudpickle.cloudpickle\x94\x8c\x0e_fill_function\x94\x93\x94(h\x00\x8c\x0f_make_skel_func\x94\x93\x94h\x00\x8c\r_builtin_type\x94\x93\x94\x8c\x08CodeType\x94\x85\x94R\x94(K\x01K\x00K\x01K\x05K\x13C\x14t\x00\xa0\x01|\x00j\x02\x88\x01\x88\x00f\x02\x19\x00\xa1\x01S\x00\x94N\x85\x94\x8c\x06pandas\x94\x8c\tDataFrame\x94\x8c\x04iloc\x94\x87\x94\x8c\x02df\x94\x85\x94\x8cZc:\\users\\dchigare\\desktop\\repos\\modin\\modin\\engines\\dask\\pandas_on_dask\\frame\\partition.py\x94\x8c\x08<lambda>\x94KcC\x00\x94\x8c\x0bcol_indices\x94\x8c\x0brow_indices\x94\x86\x94)t\x94R\x94K\x02}\x94(\x8c\x0b__package__\x94\x8c'modin.engines.dask.pandas_on_dask.frame\x94\x8c\x08__name__\x94\x8c1modin.engines.dask.pandas_on_dask.frame.partition\x94\x8c\x08__file__\x94h\x12u\x87\x94R\x94}\x94(\x8c\x07globals\x94}\x94h\x0ch\x00\x8c\tsubimport\x94\x93\x94h\x0c\x85\x94R\x94s\x8c\x08defaults\x94N\x8c\x04dict\x94}\x94\x8c\x0eclosure_values\x94]\x94(\x8c\x15numpy.c
kwargs: {}
Exception: IndexError('positional indexers are out-of-bounds')
Traceback (most recent call last):
File "C:\Users\dchigare\Desktop\REPOS\TESTS\reprod.py", line 39, in <module>
print("\nmd_res:\n", md_res, sep="") # Exception: IndexError('positional indexers are out-of-bounds')
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 3465, in __str__
return repr(self)
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\dataframe.py", line 162, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\users\dchigare\desktop\repos\modin\modin\pandas\base.py", line 100, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\backends\pandas\query_compiler.py", line 188, in to_pandas
return self._modin_frame.to_pandas()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\data.py", line 1350, in to_pandas
df = self._frame_mgr_cls.to_pandas(self._partitions)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in to_pandas
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\base\frame\partition_manager.py", line 258, in <listcomp>
retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 127, in to_pandas
dataframe = self.get()
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 63, in get
return self.future.result()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\distributed\client.py", line 218, in result
raise exc.with_traceback(tb)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 27, in apply_list_of_funcs
df = func(df, **kwargs)
File "c:\users\dchigare\desktop\repos\modin\modin\engines\dask\pandas_on_dask\frame\partition.py", line 99, in <lambda>
lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 1762, in __getitem__
return self._getitem_tuple(key)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2067, in _getitem_tuple
self._has_valid_tuple(tup)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 703, in _has_valid_tuple
self._validate_key(k, i)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\pandas\core\indexing.py", line 2009, in _validate_key
raise IndexError("positional indexers are out-of-bounds")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = self._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, BasePandasDataset):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, str):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# This returns an empty object, and since it is a weird edge case that
# doesn't need to be distributed, we default to pandas for n=0.
return self._default_to_pandas(
"sample",
n=n,
frac=frac,
replace=replace,
weights=weights,
random_state=random_state,
axis=axis,
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace, p=weights
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(a=axis_labels, size=n, replace=replace, p=weights)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return self.__constructor__(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return self.__constructor__(query_compiler=query_compiler)
|
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = self._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, BasePandasDataset):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, str):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# This returns an empty object, and since it is a weird edge case that
# doesn't need to be distributed, we default to pandas for n=0.
return self._default_to_pandas(
"sample",
n=n,
frac=frac,
replace=replace,
weights=weights,
random_state=random_state,
axis=axis,
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.randomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace, p=weights
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(a=axis_labels, size=n, replace=replace, p=weights)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return self.__constructor__(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return self.__constructor__(query_compiler=query_compiler)
|
https://github.com/modin-project/modin/issues/1692
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-667e79aa734e> in <module>
1 rng = np.random.RandomState(42)
2 df = mpd.DataFrame(np.random.rand(100, 3))
----> 3 df = df.sample(n=50, random_state=rng)
/export/home/software/anaconda3/lib/python3.7/site-packages/modin/pandas/base.py in sample(self, n, frac, replace, weights, random_state, axis)
2481 if isinstance(random_state, int):
2482 random_num_gen = np.random.RandomState(random_state)
-> 2483 elif isinstance(random_state, np.random.randomState):
2484 random_num_gen = random_state
2485 else:
AttributeError: module 'numpy.random' has no attribute 'randomState'
|
AttributeError
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
if isinstance(row_loc, slice) and row_loc == slice(None):
# If we're only slicing columns, handle the case with `__getitem__`
if not isinstance(col_loc, slice):
# Boolean indexers can just be sliced into the columns object and
# then passed to `__getitem__`
if is_boolean_array(col_loc):
col_loc = self.df.columns[col_loc]
return self.df.__getitem__(col_loc)
else:
result_slice = self.df.columns.slice_locs(col_loc.start, col_loc.stop)
return self.df.iloc[:, slice(*result_slice)]
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
if any(i == -1 for i in row_lookup) or any(i == -1 for i in col_lookup):
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels is no longer "
"supported, see https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and not isinstance(col_loc, slice)
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
if isinstance(row_loc, slice) and row_loc == slice(None):
# If we're only slicing columns, handle the case with `__getitem__`
if not isinstance(col_loc, slice):
# Boolean indexers can just be sliced into the columns object and
# then passed to `__getitem__`
if is_boolean_array(col_loc):
col_loc = self.df.columns[col_loc]
return self.df.__getitem__(col_loc)
else:
result_slice = self.df.columns.slice_locs(col_loc.start, col_loc.stop)
return self.df.iloc[:, slice(*result_slice)]
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
if any(i == -1 for i in row_lookup) or any(i == -1 for i in col_lookup):
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels is no longer "
"supported, see https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike"
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
https://github.com/modin-project/modin/issues/1456
|
TypeError Traceback (most recent call last)
<ipython-input-2-ad6eab4f4683> in <module>()
4 np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
5 df = pd.DataFrame(np.random.randn(8, 4), index=arrays).T
----> 6 df.loc[df.index>2,:]
/usr/local/lib/python3.6/dist-packages/modin/pandas/indexing.py in __getitem__(self, key)
249 hasattr(result, "columns")
250 and isinstance(result.columns, pandas.MultiIndex)
--> 251 and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
252 ):
253 result.columns = result.columns.droplevel(list(range(len(col_loc))))
TypeError: object of type 'slice' has no len()
|
TypeError
|
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
|
def copy_df_for_func(func):
"""Create a function that copies the dataframe, likely because `func` is inplace.
Args:
func: The function, usually updates a dataframe inplace.
Returns:
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
)
)
else:
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Parameters
----------
axis : int
The axis to copartition along.
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : boolean
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_other_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis ^ 1, index_other_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_other_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""Copartition two dataframes.
Args:
axis: The axis to copartition along.
other: The other dataframes(s) to copartition against.
how: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis ^ 1, index_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def _binary_op(self, op, right_frame, join_type="outer"):
"""
Perform an operation that requires joining with another dataframe.
Parameters
----------
op : callable
The function to apply after the join.
right_frame : BasePandasFrame
The dataframe to join with.
join_type : str (optional)
The type of join to apply.
Returns
-------
BasePandasFrame
A new dataframe.
"""
left_parts, right_parts, joined_index = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(new_frame, self.index, new_columns, None, None)
|
def _binary_op(self, op, right_frame, join_type="outer"):
"""Perform an operation that requires joining with another dataframe.
Args:
op: The function to apply after the join.
right_frame: The dataframe to join with.
join_type: (optional) The type of join to apply.
Returns:
A new dataframe.
"""
left_parts, right_parts, joined_index = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(new_frame, self.index, new_columns, None, None)
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def binary_operation(cls, axis, left, func, right):
"""
Apply a function that requires two BasePandasFrame objects.
Parameters
----------
axis : int
The axis to apply the function over (0 - rows, 1 - columns)
left : NumPy array
The partitions of left Modin Frame
func : callable
The function to apply
right : NumPy array
The partitions of right Modin Frame.
Returns
-------
NumPy array
A new BasePandasFrame object, the type of object that called this.
"""
if axis:
left_partitions = cls.row_partitions(left)
right_partitions = cls.row_partitions(right)
else:
left_partitions = cls.column_partitions(left)
right_partitions = cls.column_partitions(right)
func = cls.preprocess_func(func)
result = np.array(
[
left_partitions[i].apply(
func,
num_splits=cls._compute_num_partitions(),
other_axis_partition=right_partitions[i],
)
for i in range(len(left_partitions))
]
)
return result if axis else result.T
|
def binary_operation(cls, axis, left, func, right):
"""Apply a function that requires two BaseFrameManager objects.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply
other: The other BaseFrameManager object to apply func to.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
if axis:
left_partitions = cls.row_partitions(left)
right_partitions = cls.row_partitions(right)
else:
left_partitions = cls.column_partitions(left)
right_partitions = cls.column_partitions(right)
func = cls.preprocess_func(func)
result = np.array(
[
left_partitions[i].apply(
func,
num_splits=cls._compute_num_partitions(),
other_axis_partition=right_partitions[i],
)
for i in range(len(left_partitions))
]
)
return result if axis else result.T
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def update(self, other, join="left", overwrite=True, filter_func=None, errors="ignore"):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
"""
if not isinstance(other, DataFrame):
other = DataFrame(other)
query_compiler = self._query_compiler.df_update(
other._query_compiler,
join=join,
overwrite=overwrite,
filter_func=filter_func,
errors=errors,
)
self._update_inplace(new_query_compiler=query_compiler)
|
def update(self, other, join="left", overwrite=True, filter_func=None, errors="ignore"):
"""Modify DataFrame in place using non-NA values from other.
Args:
other: DataFrame, or object coercible into a DataFrame
join: {'left'}, default 'left'
overwrite: If True then overwrite values for common keys in frame
filter_func: Can choose to replace values other than NA.
raise_conflict: If True, will raise an error if the DataFrame and
other both contain data in the same place.
Returns:
None
"""
if errors == "raise":
return self._default_to_pandas(
pandas.DataFrame.update,
other,
join=join,
overwrite=overwrite,
filter_func=filter_func,
errors=errors,
)
if not isinstance(other, DataFrame):
other = DataFrame(other)
query_compiler = self._query_compiler.update(
other._query_compiler,
join=join,
overwrite=overwrite,
filter_func=filter_func,
errors=errors,
)
self._update_inplace(new_query_compiler=query_compiler)
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.